gitea源码

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373
  1. // Copyright 2023 The Gitea Authors. All rights reserved.
  2. // SPDX-License-Identifier: MIT
  3. package queue
  4. import (
  5. "context"
  6. "runtime/pprof"
  7. "sync"
  8. "sync/atomic"
  9. "time"
  10. "code.gitea.io/gitea/modules/log"
  11. )
  12. var (
  13. infiniteTimerC = make(chan time.Time)
  14. batchDebounceDuration = 100 * time.Millisecond
  15. workerIdleDuration = 1 * time.Second
  16. shutdownDefaultTimeout = 2 * time.Second
  17. unhandledItemRequeueDuration atomic.Int64 // to avoid data race during test
  18. )
  19. func init() {
  20. unhandledItemRequeueDuration.Store(int64(time.Second))
  21. }
  22. // workerGroup is a group of workers to work with a WorkerPoolQueue
  23. type workerGroup[T any] struct {
  24. q *WorkerPoolQueue[T]
  25. wg sync.WaitGroup
  26. ctxWorker context.Context
  27. ctxWorkerCancel context.CancelFunc
  28. batchBuffer []T
  29. popItemChan chan []byte
  30. popItemErr chan error
  31. }
  32. func (wg *workerGroup[T]) doPrepareWorkerContext() {
  33. wg.ctxWorker, wg.ctxWorkerCancel = context.WithCancel(wg.q.ctxRun)
  34. }
  35. // doDispatchBatchToWorker dispatches a batch of items to worker's channel.
  36. // If the channel is full, it tries to start a new worker if possible.
  37. func (q *WorkerPoolQueue[T]) doDispatchBatchToWorker(wg *workerGroup[T], flushChan chan flushType) {
  38. batch := wg.batchBuffer
  39. wg.batchBuffer = nil
  40. if len(batch) == 0 {
  41. return
  42. }
  43. full := false
  44. select {
  45. case q.batchChan <- batch:
  46. default:
  47. full = true
  48. }
  49. // TODO: the logic could be improved in the future, to avoid a data-race between "doStartNewWorker" and "workerNum"
  50. // The root problem is that if we skip "doStartNewWorker" here, the "workerNum" might be decreased by other workers later
  51. // So ideally, it should check whether there are enough workers by some approaches, and start new workers if necessary.
  52. // This data-race is not serious, as long as a new worker will be started soon to make sure there are enough workers,
  53. // so no need to hugely refactor at the moment.
  54. q.workerNumMu.Lock()
  55. noWorker := q.workerNum == 0
  56. if full || noWorker {
  57. if q.workerNum < q.workerMaxNum || noWorker && q.workerMaxNum <= 0 {
  58. q.workerNum++
  59. q.doStartNewWorker(wg)
  60. }
  61. }
  62. q.workerNumMu.Unlock()
  63. if full {
  64. select {
  65. case q.batchChan <- batch:
  66. case flush := <-flushChan:
  67. q.doWorkerHandle(batch)
  68. q.doFlush(wg, flush)
  69. case <-q.ctxRun.Done():
  70. wg.batchBuffer = batch // return the batch to buffer, the "doRun" function will handle it
  71. }
  72. }
  73. }
  74. // doWorkerHandle calls the safeHandler to handle a batch of items, and it increases/decreases the active worker number.
  75. // If the context has been canceled, it should not be caller because the "Push" still needs the context, in such case, call q.safeHandler directly
  76. func (q *WorkerPoolQueue[T]) doWorkerHandle(batch []T) {
  77. q.workerNumMu.Lock()
  78. q.workerActiveNum++
  79. q.workerNumMu.Unlock()
  80. defer func() {
  81. q.workerNumMu.Lock()
  82. q.workerActiveNum--
  83. q.workerNumMu.Unlock()
  84. }()
  85. unhandled := q.safeHandler(batch...)
  86. // if none of the items were handled, it should back-off for a few seconds
  87. // in this case the handler (eg: document indexer) may have encountered some errors/failures
  88. if len(unhandled) == len(batch) && unhandledItemRequeueDuration.Load() != 0 {
  89. if q.isFlushing.Load() {
  90. return // do not requeue items when flushing, since all items failed, requeue them will continue failing.
  91. }
  92. log.Error("Queue %q failed to handle batch of %d items, backoff for a few seconds", q.GetName(), len(batch))
  93. // TODO: ideally it shouldn't "sleep" here (blocks the worker, then blocks flush).
  94. // It could debounce the requeue operation, and try to requeue the items in the future.
  95. select {
  96. case <-q.ctxRun.Done():
  97. case <-time.After(time.Duration(unhandledItemRequeueDuration.Load())):
  98. }
  99. }
  100. for _, item := range unhandled {
  101. if err := q.Push(item); err != nil {
  102. if !q.basePushForShutdown(item) {
  103. log.Error("Failed to requeue item for queue %q when calling handler: %v", q.GetName(), err)
  104. }
  105. }
  106. }
  107. }
  108. // basePushForShutdown tries to requeue items into the base queue when the WorkerPoolQueue is shutting down.
  109. // If the queue is shutting down, it returns true and try to push the items
  110. // Otherwise it does nothing and returns false
  111. func (q *WorkerPoolQueue[T]) basePushForShutdown(items ...T) bool {
  112. shutdownTimeout := time.Duration(q.shutdownTimeout.Load())
  113. if shutdownTimeout == 0 {
  114. return false
  115. }
  116. ctxShutdown, ctxShutdownCancel := context.WithTimeout(context.Background(), shutdownTimeout)
  117. defer ctxShutdownCancel()
  118. for _, item := range items {
  119. // if there is still any error, the queue can do nothing instead of losing the items
  120. if err := q.baseQueue.PushItem(ctxShutdown, q.marshal(item)); err != nil {
  121. log.Error("Failed to requeue item for queue %q when shutting down: %v", q.GetName(), err)
  122. }
  123. }
  124. return true
  125. }
  126. func resetIdleTicker(t *time.Ticker, dur time.Duration) {
  127. t.Reset(dur)
  128. select {
  129. case <-t.C:
  130. default:
  131. }
  132. }
  133. // doStartNewWorker starts a new worker for the queue, the worker reads from worker's channel and handles the items.
  134. func (q *WorkerPoolQueue[T]) doStartNewWorker(wp *workerGroup[T]) {
  135. wp.wg.Go(func() {
  136. log.Debug("Queue %q starts new worker", q.GetName())
  137. defer log.Debug("Queue %q stops idle worker", q.GetName())
  138. t := time.NewTicker(workerIdleDuration)
  139. defer t.Stop()
  140. keepWorking := true
  141. stopWorking := func() {
  142. q.workerNumMu.Lock()
  143. keepWorking = false
  144. q.workerNum--
  145. q.workerNumMu.Unlock()
  146. }
  147. for keepWorking {
  148. select {
  149. case <-wp.ctxWorker.Done():
  150. stopWorking()
  151. case batch, ok := <-q.batchChan:
  152. if !ok {
  153. stopWorking()
  154. continue
  155. }
  156. q.doWorkerHandle(batch)
  157. // reset the idle ticker, and drain the tick after reset in case a tick is already triggered
  158. resetIdleTicker(t, workerIdleDuration) // key code for TestWorkerPoolQueueWorkerIdleReset
  159. case <-t.C:
  160. q.workerNumMu.Lock()
  161. keepWorking = q.workerNum <= 1 // keep the last worker running
  162. if !keepWorking {
  163. q.workerNum--
  164. }
  165. q.workerNumMu.Unlock()
  166. }
  167. }
  168. })
  169. }
  170. // doFlush flushes the queue: it tries to read all items from the queue and handles them.
  171. // It is for testing purpose only. It's not designed to work for a cluster.
  172. func (q *WorkerPoolQueue[T]) doFlush(wg *workerGroup[T], flush flushType) {
  173. q.isFlushing.Store(true)
  174. defer q.isFlushing.Store(false)
  175. log.Debug("Queue %q starts flushing", q.GetName())
  176. defer log.Debug("Queue %q finishes flushing", q.GetName())
  177. // stop all workers, and prepare a new worker context to start new workers
  178. wg.ctxWorkerCancel()
  179. wg.wg.Wait()
  180. defer func() {
  181. close(flush.c)
  182. wg.doPrepareWorkerContext()
  183. }()
  184. if flush.timeout < 0 {
  185. // discard everything
  186. wg.batchBuffer = nil
  187. for {
  188. select {
  189. case <-wg.popItemChan:
  190. case <-wg.popItemErr:
  191. case <-q.batchChan:
  192. case <-q.ctxRun.Done():
  193. return
  194. default:
  195. return
  196. }
  197. }
  198. }
  199. // drain the batch channel first
  200. loop:
  201. for {
  202. select {
  203. case batch := <-q.batchChan:
  204. q.doWorkerHandle(batch)
  205. default:
  206. break loop
  207. }
  208. }
  209. // drain the popItem channel
  210. emptyCounter := 0
  211. for {
  212. select {
  213. case <-q.ctxRun.Done():
  214. log.Debug("Queue %q is shutting down", q.GetName())
  215. return
  216. case data, dataOk := <-wg.popItemChan:
  217. if !dataOk {
  218. return
  219. }
  220. emptyCounter = 0
  221. if v, jsonOk := q.unmarshal(data); !jsonOk {
  222. continue
  223. } else {
  224. q.doWorkerHandle([]T{v})
  225. }
  226. case err := <-wg.popItemErr:
  227. if !q.isCtxRunCanceled() {
  228. log.Error("Failed to pop item from queue %q (doFlush): %v", q.GetName(), err)
  229. }
  230. return
  231. case <-time.After(20 * time.Millisecond):
  232. // There is no reliable way to make sure all queue items are consumed by the Flush, there always might be some items stored in some buffers/temp variables.
  233. // If we run Gitea in a cluster, we can even not guarantee all items are consumed in a deterministic instance.
  234. // Luckily, the "Flush" trick is only used in tests, so far so good.
  235. if cnt, _ := q.baseQueue.Len(q.ctxRun); cnt == 0 && len(wg.popItemChan) == 0 {
  236. emptyCounter++
  237. }
  238. if emptyCounter >= 2 {
  239. return
  240. }
  241. }
  242. }
  243. }
  244. func (q *WorkerPoolQueue[T]) isCtxRunCanceled() bool {
  245. select {
  246. case <-q.ctxRun.Done():
  247. return true
  248. default:
  249. return false
  250. }
  251. }
  252. var skipFlushChan = make(chan flushType) // an empty flush chan, used to skip reading other flush requests
  253. // doRun is the main loop of the queue. All related "doXxx" functions are executed in its context.
  254. func (q *WorkerPoolQueue[T]) doRun() {
  255. pprof.SetGoroutineLabels(q.ctxRun)
  256. log.Debug("Queue %q starts running", q.GetName())
  257. defer log.Debug("Queue %q stops running", q.GetName())
  258. wg := &workerGroup[T]{q: q}
  259. wg.doPrepareWorkerContext()
  260. wg.popItemChan, wg.popItemErr = popItemByChan(q.ctxRun, q.baseQueue.PopItem)
  261. defer func() {
  262. q.ctxRunCancel()
  263. // drain all data on the fly
  264. // since the queue is shutting down, the items can't be dispatched to workers because the context is canceled
  265. // it can't call doWorkerHandle either, because there is no chance to push unhandled items back to the queue
  266. var unhandled []T
  267. close(q.batchChan)
  268. for batch := range q.batchChan {
  269. unhandled = append(unhandled, batch...)
  270. }
  271. unhandled = append(unhandled, wg.batchBuffer...)
  272. for data := range wg.popItemChan {
  273. if v, ok := q.unmarshal(data); ok {
  274. unhandled = append(unhandled, v)
  275. }
  276. }
  277. shutdownTimeout := time.Duration(q.shutdownTimeout.Load())
  278. if shutdownTimeout != 0 {
  279. // if there is a shutdown context, try to push the items back to the base queue
  280. q.basePushForShutdown(unhandled...)
  281. workerDone := make(chan struct{})
  282. // the only way to wait for the workers, because the handlers do not have context to wait for
  283. go func() { wg.wg.Wait(); close(workerDone) }()
  284. select {
  285. case <-workerDone:
  286. case <-time.After(shutdownTimeout):
  287. log.Error("Queue %q is shutting down, but workers are still running after timeout", q.GetName())
  288. }
  289. } else {
  290. // if there is no shutdown context, just call the handler to try to handle the items. if the handler fails again, the items are lost
  291. q.safeHandler(unhandled...)
  292. }
  293. close(q.shutdownDone)
  294. }()
  295. var batchDispatchC <-chan time.Time = infiniteTimerC
  296. for {
  297. select {
  298. case flush := <-q.flushChan:
  299. // before flushing, it needs to try to dispatch the batch to worker first, in case there is no worker running
  300. // after the flushing, there is at least one worker running, so "doFlush" could wait for workers to finish
  301. // since we are already in a "flush" operation, so the dispatching function shouldn't read the flush chan.
  302. q.doDispatchBatchToWorker(wg, skipFlushChan)
  303. q.doFlush(wg, flush)
  304. case <-q.ctxRun.Done():
  305. log.Debug("Queue %q is shutting down", q.GetName())
  306. return
  307. case data, dataOk := <-wg.popItemChan:
  308. if !dataOk {
  309. return
  310. }
  311. if v, jsonOk := q.unmarshal(data); !jsonOk {
  312. testRecorder.Record("pop:corrupted:%s", data) // in rare cases the levelqueue(leveldb) might be corrupted
  313. continue
  314. } else {
  315. wg.batchBuffer = append(wg.batchBuffer, v)
  316. }
  317. if len(wg.batchBuffer) >= q.batchLength {
  318. q.doDispatchBatchToWorker(wg, q.flushChan)
  319. } else if batchDispatchC == infiniteTimerC {
  320. batchDispatchC = time.After(batchDebounceDuration)
  321. } // else: batchDispatchC is already a debounce timer, it will be triggered soon
  322. case <-batchDispatchC:
  323. batchDispatchC = infiniteTimerC
  324. q.doDispatchBatchToWorker(wg, q.flushChan)
  325. case err := <-wg.popItemErr:
  326. if !q.isCtxRunCanceled() {
  327. log.Error("Failed to pop item from queue %q (doRun): %v", q.GetName(), err)
  328. }
  329. return
  330. }
  331. }
  332. }