gitea源码

workerqueue_test.go 9.0KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. // Copyright 2023 The Gitea Authors. All rights reserved.
  2. // SPDX-License-Identifier: MIT
  3. package queue
  4. import (
  5. "slices"
  6. "strconv"
  7. "sync"
  8. "sync/atomic"
  9. "testing"
  10. "time"
  11. "code.gitea.io/gitea/modules/setting"
  12. "code.gitea.io/gitea/modules/test"
  13. "github.com/stretchr/testify/assert"
  14. )
  15. func runWorkerPoolQueue[T any](q *WorkerPoolQueue[T]) func() {
  16. go q.Run()
  17. return func() {
  18. q.ShutdownWait(1 * time.Second)
  19. }
  20. }
  21. func TestWorkerPoolQueueUnhandled(t *testing.T) {
  22. oldUnhandledItemRequeueDuration := unhandledItemRequeueDuration.Load()
  23. unhandledItemRequeueDuration.Store(0)
  24. defer unhandledItemRequeueDuration.Store(oldUnhandledItemRequeueDuration)
  25. mu := sync.Mutex{}
  26. test := func(t *testing.T, queueSetting setting.QueueSettings) {
  27. queueSetting.Length = 100
  28. queueSetting.Type = "channel"
  29. queueSetting.Datadir = t.TempDir() + "/test-queue"
  30. m := map[int]int{}
  31. // odds are handled once, evens are handled twice
  32. handler := func(items ...int) (unhandled []int) {
  33. testRecorder.Record("handle:%v", items)
  34. for _, item := range items {
  35. mu.Lock()
  36. if item%2 == 0 && m[item] == 0 {
  37. unhandled = append(unhandled, item)
  38. }
  39. m[item]++
  40. mu.Unlock()
  41. }
  42. return unhandled
  43. }
  44. q, _ := newWorkerPoolQueueForTest("test-workpoolqueue", queueSetting, handler, false)
  45. stop := runWorkerPoolQueue(q)
  46. for i := 0; i < queueSetting.Length; i++ {
  47. testRecorder.Record("push:%v", i)
  48. assert.NoError(t, q.Push(i))
  49. }
  50. assert.NoError(t, q.FlushWithContext(t.Context(), 0))
  51. stop()
  52. ok := true
  53. for i := 0; i < queueSetting.Length; i++ {
  54. if i%2 == 0 {
  55. ok = ok && assert.Equal(t, 2, m[i], "test %s: item %d", t.Name(), i)
  56. } else {
  57. ok = ok && assert.Equal(t, 1, m[i], "test %s: item %d", t.Name(), i)
  58. }
  59. }
  60. if !ok {
  61. t.Logf("m: %v", m)
  62. t.Logf("records: %v", testRecorder.Records())
  63. }
  64. testRecorder.Reset()
  65. }
  66. runCount := 2 // we can run these tests even hundreds times to see its stability
  67. t.Run("1/1", func(t *testing.T) {
  68. for range runCount {
  69. test(t, setting.QueueSettings{BatchLength: 1, MaxWorkers: 1})
  70. }
  71. })
  72. t.Run("3/1", func(t *testing.T) {
  73. for range runCount {
  74. test(t, setting.QueueSettings{BatchLength: 3, MaxWorkers: 1})
  75. }
  76. })
  77. t.Run("4/5", func(t *testing.T) {
  78. for range runCount {
  79. test(t, setting.QueueSettings{BatchLength: 4, MaxWorkers: 5})
  80. }
  81. })
  82. }
  83. func TestWorkerPoolQueuePersistence(t *testing.T) {
  84. runCount := 2 // we can run these tests even hundreds times to see its stability
  85. t.Run("1/1", func(t *testing.T) {
  86. for range runCount {
  87. testWorkerPoolQueuePersistence(t, setting.QueueSettings{BatchLength: 1, MaxWorkers: 1, Length: 100})
  88. }
  89. })
  90. t.Run("3/1", func(t *testing.T) {
  91. for range runCount {
  92. testWorkerPoolQueuePersistence(t, setting.QueueSettings{BatchLength: 3, MaxWorkers: 1, Length: 100})
  93. }
  94. })
  95. t.Run("4/5", func(t *testing.T) {
  96. for range runCount {
  97. testWorkerPoolQueuePersistence(t, setting.QueueSettings{BatchLength: 4, MaxWorkers: 5, Length: 100})
  98. }
  99. })
  100. }
  101. func testWorkerPoolQueuePersistence(t *testing.T, queueSetting setting.QueueSettings) {
  102. testCount := queueSetting.Length
  103. queueSetting.Type = "level"
  104. queueSetting.Datadir = t.TempDir() + "/test-queue"
  105. mu := sync.Mutex{}
  106. var tasksQ1, tasksQ2 []string
  107. q1 := func() {
  108. startWhenAllReady := make(chan struct{}) // only start data consuming when the "testCount" tasks are all pushed into queue
  109. stopAt20Shutdown := make(chan struct{}) // stop and shutdown at the 20th item
  110. testHandler := func(data ...string) []string {
  111. <-startWhenAllReady
  112. time.Sleep(10 * time.Millisecond)
  113. for _, s := range data {
  114. mu.Lock()
  115. tasksQ1 = append(tasksQ1, s)
  116. mu.Unlock()
  117. if s == "task-20" {
  118. close(stopAt20Shutdown)
  119. }
  120. }
  121. return nil
  122. }
  123. q, _ := newWorkerPoolQueueForTest("pr_patch_checker_test", queueSetting, testHandler, true)
  124. stop := runWorkerPoolQueue(q)
  125. for i := range testCount {
  126. _ = q.Push("task-" + strconv.Itoa(i))
  127. }
  128. close(startWhenAllReady)
  129. <-stopAt20Shutdown // it's possible to have more than 20 tasks executed
  130. stop()
  131. }
  132. q1() // run some tasks and shutdown at an intermediate point
  133. time.Sleep(100 * time.Millisecond) // because the handler in q1 has a slight delay, we need to wait for it to finish
  134. q2 := func() {
  135. testHandler := func(data ...string) []string {
  136. for _, s := range data {
  137. mu.Lock()
  138. tasksQ2 = append(tasksQ2, s)
  139. mu.Unlock()
  140. }
  141. return nil
  142. }
  143. q, _ := newWorkerPoolQueueForTest("pr_patch_checker_test", queueSetting, testHandler, true)
  144. stop := runWorkerPoolQueue(q)
  145. assert.NoError(t, q.FlushWithContext(t.Context(), 0))
  146. stop()
  147. }
  148. q2() // restart the queue to continue to execute the tasks in it
  149. assert.NotEmpty(t, tasksQ1)
  150. assert.NotEmpty(t, tasksQ2)
  151. assert.Equal(t, testCount, len(tasksQ1)+len(tasksQ2))
  152. }
  153. func TestWorkerPoolQueueActiveWorkers(t *testing.T) {
  154. defer test.MockVariableValue(&workerIdleDuration, 300*time.Millisecond)()
  155. handler := func(items ...int) (unhandled []int) {
  156. time.Sleep(100 * time.Millisecond)
  157. return nil
  158. }
  159. q, _ := newWorkerPoolQueueForTest("test-workpoolqueue", setting.QueueSettings{Type: "channel", BatchLength: 1, MaxWorkers: 1, Length: 100}, handler, false)
  160. stop := runWorkerPoolQueue(q)
  161. for i := range 5 {
  162. assert.NoError(t, q.Push(i))
  163. }
  164. time.Sleep(50 * time.Millisecond)
  165. assert.Equal(t, 1, q.GetWorkerNumber())
  166. assert.Equal(t, 1, q.GetWorkerActiveNumber())
  167. time.Sleep(500 * time.Millisecond)
  168. assert.Equal(t, 1, q.GetWorkerNumber())
  169. assert.Equal(t, 0, q.GetWorkerActiveNumber())
  170. time.Sleep(workerIdleDuration)
  171. assert.Equal(t, 1, q.GetWorkerNumber()) // there is at least one worker after the queue begins working
  172. stop()
  173. q, _ = newWorkerPoolQueueForTest("test-workpoolqueue", setting.QueueSettings{Type: "channel", BatchLength: 1, MaxWorkers: 3, Length: 100}, handler, false)
  174. stop = runWorkerPoolQueue(q)
  175. for i := range 15 {
  176. assert.NoError(t, q.Push(i))
  177. }
  178. time.Sleep(50 * time.Millisecond)
  179. assert.Equal(t, 3, q.GetWorkerNumber())
  180. assert.Equal(t, 3, q.GetWorkerActiveNumber())
  181. time.Sleep(500 * time.Millisecond)
  182. assert.Equal(t, 3, q.GetWorkerNumber())
  183. assert.Equal(t, 0, q.GetWorkerActiveNumber())
  184. time.Sleep(workerIdleDuration)
  185. assert.Equal(t, 1, q.GetWorkerNumber()) // there is at least one worker after the queue begins working
  186. stop()
  187. }
  188. func TestWorkerPoolQueueShutdown(t *testing.T) {
  189. oldUnhandledItemRequeueDuration := unhandledItemRequeueDuration.Load()
  190. unhandledItemRequeueDuration.Store(int64(100 * time.Millisecond))
  191. defer unhandledItemRequeueDuration.Store(oldUnhandledItemRequeueDuration)
  192. // simulate a slow handler, it doesn't handle any item (all items will be pushed back to the queue)
  193. handlerCalled := make(chan struct{})
  194. handler := func(items ...int) (unhandled []int) {
  195. if items[0] == 0 {
  196. close(handlerCalled)
  197. }
  198. time.Sleep(400 * time.Millisecond)
  199. return items
  200. }
  201. qs := setting.QueueSettings{Type: "level", Datadir: t.TempDir() + "/queue", BatchLength: 3, MaxWorkers: 4, Length: 20}
  202. q, _ := newWorkerPoolQueueForTest("test-workpoolqueue", qs, handler, false)
  203. stop := runWorkerPoolQueue(q)
  204. for i := 0; i < qs.Length; i++ {
  205. assert.NoError(t, q.Push(i))
  206. }
  207. <-handlerCalled
  208. time.Sleep(200 * time.Millisecond) // wait for a while to make sure all workers are active
  209. assert.Equal(t, 4, q.GetWorkerActiveNumber())
  210. stop() // stop triggers shutdown
  211. assert.Equal(t, 0, q.GetWorkerActiveNumber())
  212. // no item was ever handled, so we still get all of them again
  213. q, _ = newWorkerPoolQueueForTest("test-workpoolqueue", qs, handler, false)
  214. assert.Equal(t, 20, q.GetQueueItemNumber())
  215. }
  216. func TestWorkerPoolQueueWorkerIdleReset(t *testing.T) {
  217. defer test.MockVariableValue(&workerIdleDuration, 10*time.Millisecond)()
  218. defer mockBackoffDuration(5 * time.Millisecond)()
  219. var q *WorkerPoolQueue[int]
  220. var handledCount atomic.Int32
  221. var hasOnlyOneWorkerRunning atomic.Bool
  222. handler := func(items ...int) (unhandled []int) {
  223. handledCount.Add(int32(len(items)))
  224. // make each work have different duration, and check the active worker number periodically
  225. var activeNums []int
  226. for i := 0; i < 5-items[0]%2; i++ {
  227. time.Sleep(workerIdleDuration * 2)
  228. activeNums = append(activeNums, q.GetWorkerActiveNumber())
  229. }
  230. // When the queue never becomes empty, the existing workers should keep working
  231. // It is not 100% true at the moment because the data-race in workergroup.go is not resolved, see that TODO */
  232. // If the "active worker numbers" is like [2 2 ... 1 1], it means that an existing worker exited and the no new worker is started.
  233. if slices.Equal([]int{1, 1}, activeNums[len(activeNums)-2:]) {
  234. hasOnlyOneWorkerRunning.Store(true)
  235. }
  236. return nil
  237. }
  238. q, _ = newWorkerPoolQueueForTest("test-workpoolqueue", setting.QueueSettings{Type: "channel", BatchLength: 1, MaxWorkers: 2, Length: 100}, handler, false)
  239. stop := runWorkerPoolQueue(q)
  240. for i := range 100 {
  241. assert.NoError(t, q.Push(i))
  242. }
  243. time.Sleep(500 * time.Millisecond)
  244. assert.Greater(t, int(handledCount.Load()), 4) // make sure there are enough items handled during the test
  245. assert.False(t, hasOnlyOneWorkerRunning.Load(), "a slow handler should not block other workers from starting")
  246. stop()
  247. }