gitea源码

logger_manager.go 3.3KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. // Copyright 2021 The Gitea Authors. All rights reserved.
  2. // SPDX-License-Identifier: MIT
  3. package routing
  4. import (
  5. "context"
  6. "net/http"
  7. "sync"
  8. "time"
  9. "code.gitea.io/gitea/modules/graceful"
  10. "code.gitea.io/gitea/modules/process"
  11. )
  12. // Event indicates when the printer is triggered
  13. type Event int
  14. const (
  15. // StartEvent at the beginning of a request
  16. StartEvent Event = iota
  17. // StillExecutingEvent the request is still executing
  18. StillExecutingEvent
  19. // EndEvent the request has ended (either completed or failed)
  20. EndEvent
  21. )
  22. // Printer is used to output the log for a request
  23. type Printer func(trigger Event, record *requestRecord)
  24. type requestRecordsManager struct {
  25. print Printer
  26. lock sync.Mutex
  27. requestRecords map[uint64]*requestRecord
  28. count uint64
  29. }
  30. func (manager *requestRecordsManager) startSlowQueryDetector(threshold time.Duration) {
  31. go graceful.GetManager().RunWithShutdownContext(func(ctx context.Context) {
  32. ctx, _, finished := process.GetManager().AddTypedContext(ctx, "Service: SlowQueryDetector", process.SystemProcessType, true)
  33. defer finished()
  34. // This go-routine checks all active requests every second.
  35. // If a request has been running for a long time (eg: /user/events), we also print a log with "still-executing" message
  36. // After the "still-executing" log is printed, the record will be removed from the map to prevent from duplicated logs in future
  37. // We do not care about accurate duration here. It just does the check periodically, 0.5s or 1.5s are all OK.
  38. t := time.NewTicker(time.Second)
  39. for {
  40. select {
  41. case <-ctx.Done():
  42. return
  43. case <-t.C:
  44. now := time.Now()
  45. var slowRequests []*requestRecord
  46. // find all slow requests with lock
  47. manager.lock.Lock()
  48. for index, record := range manager.requestRecords {
  49. if now.Sub(record.startTime) < threshold {
  50. continue
  51. }
  52. slowRequests = append(slowRequests, record)
  53. delete(manager.requestRecords, index)
  54. }
  55. manager.lock.Unlock()
  56. // print logs for slow requests
  57. for _, record := range slowRequests {
  58. manager.print(StillExecutingEvent, record)
  59. }
  60. }
  61. }
  62. })
  63. }
  64. func (manager *requestRecordsManager) handler(next http.Handler) http.Handler {
  65. return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
  66. record := &requestRecord{
  67. startTime: time.Now(),
  68. request: req,
  69. responseWriter: w,
  70. }
  71. // generate a record index an insert into the map
  72. manager.lock.Lock()
  73. record.index = manager.count
  74. manager.count++
  75. manager.requestRecords[record.index] = record
  76. manager.lock.Unlock()
  77. defer func() {
  78. // just in case there is a panic. now the panics are all recovered in middleware.go
  79. localPanicErr := recover()
  80. if localPanicErr != nil {
  81. record.lock.Lock()
  82. record.panicError = localPanicErr
  83. record.lock.Unlock()
  84. }
  85. // remove from the record map
  86. manager.lock.Lock()
  87. delete(manager.requestRecords, record.index)
  88. manager.lock.Unlock()
  89. // log the end of the request
  90. manager.print(EndEvent, record)
  91. if localPanicErr != nil {
  92. // the panic wasn't recovered before us, so we should pass it up, and let the framework handle the panic error
  93. panic(localPanicErr)
  94. }
  95. }()
  96. req = req.WithContext(context.WithValue(req.Context(), contextKey, record))
  97. manager.print(StartEvent, record)
  98. next.ServeHTTP(w, req)
  99. })
  100. }