1// Copyright 2023 The Gitea Authors. All rights reserved.
2// SPDX-License-Identifier: MIT
3
4package queue
5
6import (
7 "context"
8 "fmt"
9 "sync"
10 "sync/atomic"
11 "time"
12
13 "forgejo.org/modules/json"
14 "forgejo.org/modules/log"
15 "forgejo.org/modules/process"
16 "forgejo.org/modules/setting"
17)
18
19// WorkerPoolQueue is a queue that uses a pool of workers to process items
20// It can use different underlying (base) queue types
21type WorkerPoolQueue[T any] struct {
22 ctxRun context.Context
23 ctxRunCancel context.CancelFunc
24
25 shutdownDone chan struct{}
26 shutdownTimeout atomic.Int64 // in case some buggy handlers (workers) would hang forever, "shutdown" should finish in predictable time
27
28 origHandler HandlerFuncT[T]
29 safeHandler HandlerFuncT[T]
30
31 baseQueueType string
32 baseConfig *BaseConfig
33 baseQueue baseQueue
34
35 batchChan chan []T
36 flushChan chan flushType
37
38 batchLength int
39 workerNum int
40 workerMaxNum int
41 workerActiveNum int
42 workerNumMu sync.Mutex
43}
44
45type flushType chan struct{}
46
47var _ ManagedWorkerPoolQueue = (*WorkerPoolQueue[any])(nil)
48
49func (q *WorkerPoolQueue[T]) GetName() string {
50 return q.baseConfig.ManagedName
51}
52
53func (q *WorkerPoolQueue[T]) GetType() string {
54 return q.baseQueueType
55}
56
57func (q *WorkerPoolQueue[T]) GetItemTypeName() string {
58 var t T
59 return fmt.Sprintf("%T", t)
60}
61
62func (q *WorkerPoolQueue[T]) GetWorkerNumber() int {
63 q.workerNumMu.Lock()
64 defer q.workerNumMu.Unlock()
65 return q.workerNum
66}
67
68func (q *WorkerPoolQueue[T]) GetWorkerActiveNumber() int {
69 q.workerNumMu.Lock()
70 defer q.workerNumMu.Unlock()
71 return q.workerActiveNum
72}
73
74func (q *WorkerPoolQueue[T]) GetWorkerMaxNumber() int {
75 q.workerNumMu.Lock()
76 defer q.workerNumMu.Unlock()
77 return q.workerMaxNum
78}
79
80func (q *WorkerPoolQueue[T]) SetWorkerMaxNumber(num int) {
81 q.workerNumMu.Lock()
82 defer q.workerNumMu.Unlock()
83 q.workerMaxNum = num
84}
85
86func (q *WorkerPoolQueue[T]) GetQueueItemNumber() int {
87 cnt, err := q.baseQueue.Len(q.ctxRun)
88 if err != nil {
89 log.Error("Failed to get number of items in queue %q: %v", q.GetName(), err)
90 }
91 return cnt
92}
93
94func (q *WorkerPoolQueue[T]) FlushWithContext(ctx context.Context, timeout time.Duration) (err error) {
95 if q.isBaseQueueDummy() {
96 return nil
97 }
98
99 log.Debug("Try to flush queue %q with timeout %v", q.GetName(), timeout)
100 defer log.Debug("Finish flushing queue %q, err: %v", q.GetName(), err)
101
102 var after <-chan time.Time
103 after = infiniteTimerC
104 if timeout > 0 {
105 after = time.After(timeout)
106 }
107 c := make(flushType)
108
109 // send flush request
110 // if it blocks, it means that there is a flush in progress or the queue hasn't been started yet
111 select {
112 case q.flushChan <- c:
113 case <-ctx.Done():
114 return ctx.Err()
115 case <-q.ctxRun.Done():
116 return q.ctxRun.Err()
117 case <-after:
118 return context.DeadlineExceeded
119 }
120
121 // wait for flush to finish
122 select {
123 case <-c:
124 return nil
125 case <-ctx.Done():
126 return ctx.Err()
127 case <-q.ctxRun.Done():
128 return q.ctxRun.Err()
129 case <-after:
130 return context.DeadlineExceeded
131 }
132}
133
134// RemoveAllItems removes all items in the baes queue
135func (q *WorkerPoolQueue[T]) RemoveAllItems(ctx context.Context) error {
136 return q.baseQueue.RemoveAll(ctx)
137}
138
139func (q *WorkerPoolQueue[T]) marshal(data T) []byte {
140 bs, err := json.Marshal(data)
141 if err != nil {
142 log.Error("Failed to marshal item for queue %q: %v", q.GetName(), err)
143 return nil
144 }
145 return bs
146}
147
148func (q *WorkerPoolQueue[T]) unmarshal(data []byte) (t T, ok bool) {
149 if err := json.Unmarshal(data, &t); err != nil {
150 log.Error("Failed to unmarshal item from queue %q: %v", q.GetName(), err)
151 return t, false
152 }
153 return t, true
154}
155
156func (q *WorkerPoolQueue[T]) isBaseQueueDummy() bool {
157 _, isDummy := q.baseQueue.(*baseDummy)
158 return isDummy
159}
160
161// Push adds an item to the queue, it may block for a while and then returns an error if the queue is full
162func (q *WorkerPoolQueue[T]) Push(data T) error {
163 if q.isBaseQueueDummy() && q.safeHandler != nil {
164 // FIXME: the "immediate" queue is only for testing, but it really causes problems because its behavior is different from a real queue.
165 // Even if tests pass, it doesn't mean that there is no bug in code.
166 if data, ok := q.unmarshal(q.marshal(data)); ok {
167 q.safeHandler(data)
168 }
169 }
170 return q.baseQueue.PushItem(q.ctxRun, q.marshal(data))
171}
172
173// Has only works for unique queues. Keep in mind that this check may not be reliable (due to lacking of proper transaction support)
174// There could be a small chance that duplicate items appear in the queue
175func (q *WorkerPoolQueue[T]) Has(data T) (bool, error) {
176 return q.baseQueue.HasItem(q.ctxRun, q.marshal(data))
177}
178
179func (q *WorkerPoolQueue[T]) Run() {
180 q.doRun()
181}
182
183func (q *WorkerPoolQueue[T]) Cancel() {
184 q.ctxRunCancel()
185}
186
187// ShutdownWait shuts down the queue, waits for all workers to finish their jobs, and pushes the unhandled items back to the base queue
188// It waits for all workers (handlers) to finish their jobs, in case some buggy handlers would hang forever, a reasonable timeout is needed
189func (q *WorkerPoolQueue[T]) ShutdownWait(timeout time.Duration) {
190 q.shutdownTimeout.Store(int64(timeout))
191 q.ctxRunCancel()
192 <-q.shutdownDone
193}
194
195func getNewQueue(t string, cfg *BaseConfig, unique bool) (string, baseQueue, error) {
196 switch t {
197 case "dummy", "immediate":
198 queue, err := newBaseDummy(cfg, unique)
199
200 return t, queue, err
201 case "channel":
202 queue, err := newBaseChannelGeneric(cfg, unique)
203
204 return t, queue, err
205 case "redis":
206 queue, err := newBaseRedisGeneric(cfg, unique, nil)
207
208 return t, queue, err
209 default: // level(leveldb,levelqueue,persistable-channel)
210 queue, err := newBaseLevelQueueGeneric(cfg, unique)
211
212 return "level", queue, err
213 }
214}
215
216func newWorkerPoolQueueForTest[T any](name string, queueSetting setting.QueueSettings, handler HandlerFuncT[T], unique bool) (*WorkerPoolQueue[T], error) {
217 return NewWorkerPoolQueueWithContext(context.Background(), name, queueSetting, handler, unique)
218}
219
220func NewWorkerPoolQueueWithContext[T any](ctx context.Context, name string, queueSetting setting.QueueSettings, handler HandlerFuncT[T], unique bool) (*WorkerPoolQueue[T], error) {
221 if handler == nil {
222 log.Debug("Use dummy queue for %q because handler is nil and caller doesn't want to process the queue items", name)
223 queueSetting.Type = "dummy"
224 }
225
226 var w WorkerPoolQueue[T]
227 var err error
228
229 w.baseConfig = toBaseConfig(name, queueSetting)
230
231 w.baseQueueType, w.baseQueue, err = getNewQueue(queueSetting.Type, w.baseConfig, unique)
232 if err != nil {
233 return nil, err
234 }
235 log.Trace("Created queue %q of type %q", name, w.baseQueueType)
236
237 w.ctxRun, _, w.ctxRunCancel = process.GetManager().AddTypedContext(ctx, "Queue: "+w.GetName(), process.SystemProcessType, false)
238 w.batchChan = make(chan []T)
239 w.flushChan = make(chan flushType)
240 w.shutdownDone = make(chan struct{})
241 w.shutdownTimeout.Store(int64(shutdownDefaultTimeout))
242 w.workerMaxNum = queueSetting.MaxWorkers
243 w.batchLength = queueSetting.BatchLength
244
245 w.origHandler = handler
246 w.safeHandler = func(t ...T) (unhandled []T) {
247 defer func() {
248 err := recover()
249 if err != nil {
250 log.Error("Recovered from panic in queue %q handler: %v\n%s", name, err, log.Stack(2))
251 }
252 }()
253 if w.origHandler != nil {
254 return w.origHandler(t...)
255 }
256 return nil
257 }
258
259 return &w, nil
260}