gitea源码

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269
  1. // Copyright 2019 The Gitea Authors. All rights reserved.
  2. // SPDX-License-Identifier: MIT
  3. package graceful
  4. import (
  5. "context"
  6. "runtime/pprof"
  7. "sync"
  8. "time"
  9. "code.gitea.io/gitea/modules/gtprof"
  10. "code.gitea.io/gitea/modules/log"
  11. "code.gitea.io/gitea/modules/process"
  12. "code.gitea.io/gitea/modules/setting"
  13. )
  14. type state uint8
  15. const (
  16. stateInit state = iota
  17. stateRunning
  18. stateShuttingDown
  19. stateTerminate
  20. )
  21. type RunCanceler interface {
  22. Run()
  23. Cancel()
  24. }
  25. // There are some places that could inherit sockets:
  26. //
  27. // * HTTP or HTTPS main listener
  28. // * HTTP or HTTPS install listener
  29. // * HTTP redirection fallback
  30. // * Builtin SSH listener
  31. //
  32. // If you add a new place you must increment this number
  33. // and add a function to call manager.InformCleanup if it's not going to be used
  34. const numberOfServersToCreate = 4
  35. var (
  36. manager *Manager
  37. initOnce sync.Once
  38. )
  39. // GetManager returns the Manager
  40. func GetManager() *Manager {
  41. initManager(context.Background())
  42. return manager
  43. }
  44. // InitManager creates the graceful manager in the provided context
  45. func InitManager(ctx context.Context) {
  46. if manager != nil {
  47. log.Error("graceful.InitManager called more than once")
  48. }
  49. initManager(ctx) // FIXME: this design is not right, it conflicts with the "Background" context used in GetManager
  50. }
  51. func initManager(ctx context.Context) {
  52. initOnce.Do(func() {
  53. manager = newGracefulManager(ctx)
  54. // Set the process default context to the HammerContext
  55. process.DefaultContext = manager.HammerContext()
  56. })
  57. }
  58. // RunWithCancel helps to run a function with a custom context, the Cancel function will be called at shutdown
  59. // The Cancel function should stop the Run function in predictable time.
  60. func (g *Manager) RunWithCancel(rc RunCanceler) {
  61. g.RunAtShutdown(context.Background(), rc.Cancel)
  62. g.runningServerWaitGroup.Add(1)
  63. defer g.runningServerWaitGroup.Done()
  64. defer func() {
  65. if err := recover(); err != nil {
  66. log.Critical("PANIC during RunWithCancel: %v\nStacktrace: %s", err, log.Stack(2))
  67. g.doShutdown()
  68. }
  69. }()
  70. rc.Run()
  71. }
  72. // RunWithShutdownContext takes a function that has a context to watch for shutdown.
  73. // After the provided context is Done(), the main function must return once shutdown is complete.
  74. // (Optionally the HammerContext may be obtained and waited for however, this should be avoided if possible.)
  75. func (g *Manager) RunWithShutdownContext(run func(context.Context)) {
  76. g.runningServerWaitGroup.Add(1)
  77. defer g.runningServerWaitGroup.Done()
  78. defer func() {
  79. if err := recover(); err != nil {
  80. log.Critical("PANIC during RunWithShutdownContext: %v\nStacktrace: %s", err, log.Stack(2))
  81. g.doShutdown()
  82. }
  83. }()
  84. ctx := g.ShutdownContext()
  85. pprof.SetGoroutineLabels(ctx) // We don't have a label to restore back to but I think this is fine
  86. run(ctx)
  87. }
  88. // RunAtTerminate adds to the terminate wait group and creates a go-routine to run the provided function at termination
  89. func (g *Manager) RunAtTerminate(terminate func()) {
  90. g.terminateWaitGroup.Add(1)
  91. g.lock.Lock()
  92. defer g.lock.Unlock()
  93. g.toRunAtTerminate = append(g.toRunAtTerminate,
  94. func() {
  95. defer g.terminateWaitGroup.Done()
  96. defer func() {
  97. if err := recover(); err != nil {
  98. log.Critical("PANIC during RunAtTerminate: %v\nStacktrace: %s", err, log.Stack(2))
  99. }
  100. }()
  101. terminate()
  102. })
  103. }
  104. // RunAtShutdown creates a go-routine to run the provided function at shutdown
  105. func (g *Manager) RunAtShutdown(ctx context.Context, shutdown func()) {
  106. g.lock.Lock()
  107. defer g.lock.Unlock()
  108. g.toRunAtShutdown = append(g.toRunAtShutdown,
  109. func() {
  110. defer func() {
  111. if err := recover(); err != nil {
  112. log.Critical("PANIC during RunAtShutdown: %v\nStacktrace: %s", err, log.Stack(2))
  113. }
  114. }()
  115. select {
  116. case <-ctx.Done():
  117. return
  118. default:
  119. shutdown()
  120. }
  121. })
  122. }
  123. func (g *Manager) doShutdown() {
  124. if !g.setStateTransition(stateRunning, stateShuttingDown) {
  125. g.DoImmediateHammer()
  126. return
  127. }
  128. g.lock.Lock()
  129. g.shutdownCtxCancel()
  130. atShutdownCtx := pprof.WithLabels(g.hammerCtx, pprof.Labels(gtprof.LabelGracefulLifecycle, "post-shutdown"))
  131. pprof.SetGoroutineLabels(atShutdownCtx)
  132. for _, fn := range g.toRunAtShutdown {
  133. go fn()
  134. }
  135. g.lock.Unlock()
  136. if setting.GracefulHammerTime >= 0 {
  137. go g.doHammerTime(setting.GracefulHammerTime)
  138. }
  139. go func() {
  140. g.runningServerWaitGroup.Wait()
  141. // Mop up any remaining unclosed events.
  142. g.doHammerTime(0)
  143. <-time.After(1 * time.Second)
  144. g.doTerminate()
  145. g.terminateWaitGroup.Wait()
  146. g.lock.Lock()
  147. g.managerCtxCancel()
  148. g.lock.Unlock()
  149. }()
  150. }
  151. func (g *Manager) doHammerTime(d time.Duration) {
  152. time.Sleep(d)
  153. g.lock.Lock()
  154. select {
  155. case <-g.hammerCtx.Done():
  156. default:
  157. log.Warn("Setting Hammer condition")
  158. g.hammerCtxCancel()
  159. atHammerCtx := pprof.WithLabels(g.terminateCtx, pprof.Labels(gtprof.LabelGracefulLifecycle, "post-hammer"))
  160. pprof.SetGoroutineLabels(atHammerCtx)
  161. }
  162. g.lock.Unlock()
  163. }
  164. func (g *Manager) doTerminate() {
  165. if !g.setStateTransition(stateShuttingDown, stateTerminate) {
  166. return
  167. }
  168. g.lock.Lock()
  169. select {
  170. case <-g.terminateCtx.Done():
  171. default:
  172. log.Warn("Terminating")
  173. g.terminateCtxCancel()
  174. atTerminateCtx := pprof.WithLabels(g.managerCtx, pprof.Labels(gtprof.LabelGracefulLifecycle, "post-terminate"))
  175. pprof.SetGoroutineLabels(atTerminateCtx)
  176. for _, fn := range g.toRunAtTerminate {
  177. go fn()
  178. }
  179. }
  180. g.lock.Unlock()
  181. }
  182. // IsChild returns if the current process is a child of previous Gitea process
  183. func (g *Manager) IsChild() bool {
  184. return g.isChild
  185. }
  186. // IsShutdown returns a channel which will be closed at shutdown.
  187. // The order of closure is shutdown, hammer (potentially), terminate
  188. func (g *Manager) IsShutdown() <-chan struct{} {
  189. return g.shutdownCtx.Done()
  190. }
  191. // IsHammer returns a channel which will be closed at hammer.
  192. // Servers running within the running server wait group should respond to IsHammer
  193. // if not shutdown already
  194. func (g *Manager) IsHammer() <-chan struct{} {
  195. return g.hammerCtx.Done()
  196. }
  197. // ServerDone declares a running server done and subtracts one from the
  198. // running server wait group. Users probably do not want to call this
  199. // and should use one of the RunWithShutdown* functions
  200. func (g *Manager) ServerDone() {
  201. g.runningServerWaitGroup.Done()
  202. }
  203. func (g *Manager) setStateTransition(oldState, newState state) bool {
  204. g.lock.Lock()
  205. if g.state != oldState {
  206. g.lock.Unlock()
  207. return false
  208. }
  209. g.state = newState
  210. g.lock.Unlock()
  211. return true
  212. }
  213. // InformCleanup tells the cleanup wait group that we have either taken a listener or will not be taking a listener.
  214. // At the moment the total number of servers (numberOfServersToCreate) are pre-defined as a const before global init,
  215. // so this function MUST be called if a server is not used.
  216. func (g *Manager) InformCleanup() {
  217. g.createServerCond.L.Lock()
  218. defer g.createServerCond.L.Unlock()
  219. g.createdServer++
  220. g.createServerCond.Signal()
  221. }
  222. // Done allows the manager to be viewed as a context.Context, it returns a channel that is closed when the server is finished terminating
  223. func (g *Manager) Done() <-chan struct{} {
  224. return g.managerCtx.Done()
  225. }
  226. // Err allows the manager to be viewed as a context.Context done at Terminate
  227. func (g *Manager) Err() error {
  228. return g.managerCtx.Err()
  229. }
  230. // Value allows the manager to be viewed as a context.Context done at Terminate
  231. func (g *Manager) Value(key any) any {
  232. return g.managerCtx.Value(key)
  233. }
  234. // Deadline returns nil as there is no fixed Deadline for the manager, it allows the manager to be viewed as a context.Context
  235. func (g *Manager) Deadline() (deadline time.Time, ok bool) {
  236. return g.managerCtx.Deadline()
  237. }