• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

taosdata / TDengine / #3621

22 Feb 2025 11:44AM UTC coverage: 2.037% (-61.5%) from 63.573%
#3621

push

travis-ci

web-flow
Merge pull request #29874 from taosdata/merge/mainto3.0

merge: from main to 3.0 branch

4357 of 287032 branches covered (1.52%)

Branch coverage included in aggregate %.

0 of 174 new or added lines in 18 files covered. (0.0%)

213359 existing lines in 469 files now uncovered.

7260 of 283369 relevant lines covered (2.56%)

23737.72 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/source/util/src/tqueue.c
1
/*
2
 * Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
3
 *
4
 * This program is free software: you can use, redistribute, and/or modify
5
 * it under the terms of the GNU Affero General Public License, version 3
6
 * or later ("AGPL"), as published by the Free Software Foundation.
7
 *
8
 * This program is distributed in the hope that it will be useful, but WITHOUT
9
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
 * FITNESS FOR A PARTICULAR PURPOSE.
11
 *
12
 * You should have received a copy of the GNU Affero General Public License
13
 * along with this program. If not, see <http://www.gnu.org/licenses/>.
14
 */
15

16
#define _DEFAULT_SOURCE
17
#include "taoserror.h"
18
#include "tlog.h"
19
#include "tqueue.h"
20
#include "tutil.h"
21

22
int64_t tsQueueMemoryAllowed = 0;
23
int64_t tsQueueMemoryUsed = 0;
24

25
int64_t tsApplyMemoryAllowed = 0;
26
int64_t tsApplyMemoryUsed = 0;
27
struct STaosQueue {
28
  STaosQnode   *head;
29
  STaosQnode   *tail;
30
  STaosQueue   *next;     // for queue set
31
  STaosQset    *qset;     // for queue set
32
  void         *ahandle;  // for queue set
33
  FItem         itemFp;
34
  FItems        itemsFp;
35
  TdThreadMutex mutex;
36
  int64_t       memOfItems;
37
  int32_t       numOfItems;
38
  int64_t       threadId;
39
  int64_t       memLimit;
40
  int64_t       itemLimit;
41
};
42

43
struct STaosQset {
44
  STaosQueue   *head;
45
  STaosQueue   *current;
46
  TdThreadMutex mutex;
47
  tsem_t        sem;
48
  int32_t       numOfQueues;
49
  int32_t       numOfItems;
50
};
51

52
struct STaosQall {
53
  STaosQnode *current;
54
  STaosQnode *start;
55
  int32_t     numOfItems;
56
  int64_t     memOfItems;
57
  int32_t     unAccessedNumOfItems;
58
  int64_t     unAccessMemOfItems;
59
};
60

UNCOV
61
void taosSetQueueMemoryCapacity(STaosQueue *queue, int64_t cap) { queue->memLimit = cap; }
×
UNCOV
62
void taosSetQueueCapacity(STaosQueue *queue, int64_t size) { queue->itemLimit = size; }
×
63

UNCOV
64
int32_t taosOpenQueue(STaosQueue **queue) {
×
UNCOV
65
  *queue = taosMemoryCalloc(1, sizeof(STaosQueue));
×
UNCOV
66
  if (*queue == NULL) {
×
67
    return terrno;
×
68
  }
69

UNCOV
70
  int32_t code = taosThreadMutexInit(&(*queue)->mutex, NULL);
×
UNCOV
71
  if (code) {
×
72
    taosMemoryFreeClear(*queue);
×
73
    return (terrno = TAOS_SYSTEM_ERROR(code));
×
74
  }
75

UNCOV
76
  uDebug("queue:%p is opened", queue);
×
UNCOV
77
  return 0;
×
78
}
79

UNCOV
80
void taosSetQueueFp(STaosQueue *queue, FItem itemFp, FItems itemsFp) {
×
UNCOV
81
  if (queue == NULL) return;
×
UNCOV
82
  queue->itemFp = itemFp;
×
UNCOV
83
  queue->itemsFp = itemsFp;
×
84
}
85

UNCOV
86
void taosCloseQueue(STaosQueue *queue) {
×
UNCOV
87
  if (queue == NULL) return;
×
88
  STaosQnode *pTemp;
89
  STaosQset  *qset;
90

UNCOV
91
  (void)taosThreadMutexLock(&queue->mutex);
×
UNCOV
92
  STaosQnode *pNode = queue->head;
×
UNCOV
93
  queue->head = NULL;
×
UNCOV
94
  qset = queue->qset;
×
UNCOV
95
  (void)taosThreadMutexUnlock(&queue->mutex);
×
96

UNCOV
97
  if (queue->qset) {
×
UNCOV
98
    taosRemoveFromQset(qset, queue);
×
99
  }
100

UNCOV
101
  while (pNode) {
×
UNCOV
102
    pTemp = pNode;
×
UNCOV
103
    pNode = pNode->next;
×
UNCOV
104
    taosMemoryFree(pTemp);
×
105
  }
106

UNCOV
107
  (void)taosThreadMutexDestroy(&queue->mutex);
×
UNCOV
108
  taosMemoryFree(queue);
×
109

UNCOV
110
  uDebug("queue:%p is closed", queue);
×
111
}
112

UNCOV
113
bool taosQueueEmpty(STaosQueue *queue) {
×
UNCOV
114
  if (queue == NULL) return true;
×
115

UNCOV
116
  bool empty = false;
×
UNCOV
117
  (void)taosThreadMutexLock(&queue->mutex);
×
UNCOV
118
  if (queue->head == NULL && queue->tail == NULL && queue->numOfItems == 0 /*&& queue->memOfItems == 0*/) {
×
UNCOV
119
    empty = true;
×
120
  }
UNCOV
121
  (void)taosThreadMutexUnlock(&queue->mutex);
×
122

UNCOV
123
  return empty;
×
124
}
125

UNCOV
126
void taosUpdateItemSize(STaosQueue *queue, int32_t items) {
×
UNCOV
127
  if (queue == NULL) return;
×
128

UNCOV
129
  (void)taosThreadMutexLock(&queue->mutex);
×
UNCOV
130
  queue->numOfItems -= items;
×
UNCOV
131
  (void)taosThreadMutexUnlock(&queue->mutex);
×
132
}
133

UNCOV
134
int32_t taosQueueItemSize(STaosQueue *queue) {
×
UNCOV
135
  if (queue == NULL) return 0;
×
136

UNCOV
137
  (void)taosThreadMutexLock(&queue->mutex);
×
UNCOV
138
  int32_t numOfItems = queue->numOfItems;
×
UNCOV
139
  (void)taosThreadMutexUnlock(&queue->mutex);
×
140

UNCOV
141
  uTrace("queue:%p, numOfItems:%d memOfItems:%" PRId64, queue, queue->numOfItems, queue->memOfItems);
×
UNCOV
142
  return numOfItems;
×
143
}
144

UNCOV
145
int64_t taosQueueMemorySize(STaosQueue *queue) {
×
UNCOV
146
  (void)taosThreadMutexLock(&queue->mutex);
×
UNCOV
147
  int64_t memOfItems = queue->memOfItems;
×
UNCOV
148
  (void)taosThreadMutexUnlock(&queue->mutex);
×
UNCOV
149
  return memOfItems;
×
150
}
151

UNCOV
152
int32_t taosAllocateQitem(int32_t size, EQItype itype, int64_t dataSize, void **item) {
×
UNCOV
153
  int64_t alloced = -1;
×
154

UNCOV
155
  if (itype == RPC_QITEM) {
×
UNCOV
156
    alloced = atomic_add_fetch_64(&tsQueueMemoryUsed, size + dataSize);
×
UNCOV
157
    if (alloced > tsQueueMemoryAllowed) {
×
158
      uError("failed to alloc qitem, size:%" PRId64 " alloc:%" PRId64 " allowed:%" PRId64, size + dataSize, alloced,
×
159
             tsQueueMemoryAllowed);
160
      (void)atomic_sub_fetch_64(&tsQueueMemoryUsed, size + dataSize);
×
161
      return (terrno = TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE);
×
162
    }
UNCOV
163
  } else if (itype == APPLY_QITEM) {
×
UNCOV
164
    alloced = atomic_add_fetch_64(&tsApplyMemoryUsed, size + dataSize);
×
UNCOV
165
    if (alloced > tsApplyMemoryAllowed) {
×
166
      uDebug("failed to alloc qitem, size:%" PRId64 " alloc:%" PRId64 " allowed:%" PRId64, size + dataSize, alloced,
×
167
             tsApplyMemoryAllowed);
168
      (void)atomic_sub_fetch_64(&tsApplyMemoryUsed, size + dataSize);
×
169
      return (terrno = TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE);
×
170
    }
171
  }
172

UNCOV
173
  *item = NULL;
×
UNCOV
174
  STaosQnode *pNode = taosMemoryCalloc(1, sizeof(STaosQnode) + size);
×
UNCOV
175
  if (pNode == NULL) {
×
176
    if (itype == RPC_QITEM) {
×
177
      (void)atomic_sub_fetch_64(&tsQueueMemoryUsed, size + dataSize);
×
178
    } else if (itype == APPLY_QITEM) {
×
179
      (void)atomic_sub_fetch_64(&tsApplyMemoryUsed, size + dataSize);
×
180
    }
181
    return terrno;
×
182
  }
183

UNCOV
184
  pNode->dataSize = dataSize;
×
UNCOV
185
  pNode->size = size;
×
UNCOV
186
  pNode->itype = itype;
×
UNCOV
187
  pNode->timestamp = taosGetTimestampUs();
×
UNCOV
188
  uTrace("item:%p, node:%p is allocated, alloc:%" PRId64, pNode->item, pNode, alloced);
×
UNCOV
189
  *item = pNode->item;
×
UNCOV
190
  return 0;
×
191
}
192

UNCOV
193
void taosFreeQitem(void *pItem) {
×
UNCOV
194
  if (pItem == NULL) return;
×
195

UNCOV
196
  STaosQnode *pNode = (STaosQnode *)((char *)pItem - sizeof(STaosQnode));
×
UNCOV
197
  int64_t     alloced = -1;
×
UNCOV
198
  if (pNode->itype == RPC_QITEM) {
×
UNCOV
199
    alloced = atomic_sub_fetch_64(&tsQueueMemoryUsed, pNode->size + pNode->dataSize);
×
UNCOV
200
  } else if (pNode->itype == APPLY_QITEM) {
×
UNCOV
201
    alloced = atomic_sub_fetch_64(&tsApplyMemoryUsed, pNode->size + pNode->dataSize);
×
202
  }
UNCOV
203
  uTrace("item:%p, node:%p is freed, alloc:%" PRId64, pItem, pNode, alloced);
×
204

UNCOV
205
  taosMemoryFree(pNode);
×
206
}
207

UNCOV
208
int32_t taosWriteQitem(STaosQueue *queue, void *pItem) {
×
UNCOV
209
  int32_t     code = 0;
×
UNCOV
210
  STaosQnode *pNode = (STaosQnode *)(((char *)pItem) - sizeof(STaosQnode));
×
UNCOV
211
  pNode->timestamp = taosGetTimestampUs();
×
UNCOV
212
  pNode->next = NULL;
×
213

UNCOV
214
  (void)taosThreadMutexLock(&queue->mutex);
×
UNCOV
215
  if (queue->memLimit > 0 && (queue->memOfItems + pNode->size + pNode->dataSize) > queue->memLimit) {
×
216
    code = TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY;
×
217
    uError("item:%p failed to put into queue:%p, queue mem limit: %" PRId64 ", reason: %s" PRId64, pItem, queue,
×
218
           queue->memLimit, tstrerror(code));
219

220
    (void)taosThreadMutexUnlock(&queue->mutex);
×
221
    return code;
×
UNCOV
222
  } else if (queue->itemLimit > 0 && queue->numOfItems + 1 > queue->itemLimit) {
×
223
    code = TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY;
×
224
    uError("item:%p failed to put into queue:%p, queue size limit: %" PRId64 ", reason: %s" PRId64, pItem, queue,
×
225
           queue->itemLimit, tstrerror(code));
226
    (void)taosThreadMutexUnlock(&queue->mutex);
×
227
    return code;
×
228
  }
229

UNCOV
230
  if (queue->tail) {
×
UNCOV
231
    queue->tail->next = pNode;
×
UNCOV
232
    queue->tail = pNode;
×
233
  } else {
UNCOV
234
    queue->head = pNode;
×
UNCOV
235
    queue->tail = pNode;
×
236
  }
UNCOV
237
  queue->numOfItems++;
×
UNCOV
238
  queue->memOfItems += (pNode->size + pNode->dataSize);
×
UNCOV
239
  if (queue->qset) {
×
UNCOV
240
    (void)atomic_add_fetch_32(&queue->qset->numOfItems, 1);
×
241
  }
242

UNCOV
243
  uTrace("item:%p is put into queue:%p, items:%d mem:%" PRId64, pItem, queue, queue->numOfItems, queue->memOfItems);
×
244

UNCOV
245
  (void)taosThreadMutexUnlock(&queue->mutex);
×
246

UNCOV
247
  if (queue->qset) {
×
UNCOV
248
    if (tsem_post(&queue->qset->sem) != 0) {
×
249
      uError("failed to post semaphore for queue set:%p", queue->qset);
×
250
    }
251
  }
UNCOV
252
  return code;
×
253
}
254

UNCOV
255
void taosReadQitem(STaosQueue *queue, void **ppItem) {
×
UNCOV
256
  STaosQnode *pNode = NULL;
×
257

UNCOV
258
  (void)taosThreadMutexLock(&queue->mutex);
×
259

UNCOV
260
  if (queue->head) {
×
UNCOV
261
    pNode = queue->head;
×
UNCOV
262
    *ppItem = pNode->item;
×
UNCOV
263
    queue->head = pNode->next;
×
UNCOV
264
    if (queue->head == NULL) {
×
UNCOV
265
      queue->tail = NULL;
×
266
    }
UNCOV
267
    queue->numOfItems--;
×
UNCOV
268
    queue->memOfItems -= (pNode->size + pNode->dataSize);
×
UNCOV
269
    if (queue->qset) {
×
270
      (void)atomic_sub_fetch_32(&queue->qset->numOfItems, 1);
×
271
    }
UNCOV
272
    uTrace("item:%p is read out from queue:%p, items:%d mem:%" PRId64, *ppItem, queue, queue->numOfItems,
×
273
           queue->memOfItems);
274
  }
275

UNCOV
276
  (void)taosThreadMutexUnlock(&queue->mutex);
×
UNCOV
277
}
×
278

UNCOV
279
int32_t taosAllocateQall(STaosQall **qall) {
×
UNCOV
280
  *qall = taosMemoryCalloc(1, sizeof(STaosQall));
×
UNCOV
281
  if (*qall == NULL) {
×
282
    return terrno;
×
283
  }
UNCOV
284
  return 0;
×
285
}
286

UNCOV
287
void taosFreeQall(STaosQall *qall) { taosMemoryFree(qall); }
×
288

UNCOV
289
int32_t taosReadAllQitems(STaosQueue *queue, STaosQall *qall) {
×
UNCOV
290
  int32_t numOfItems = 0;
×
291
  bool    empty;
292

UNCOV
293
  (void)taosThreadMutexLock(&queue->mutex);
×
294

UNCOV
295
  empty = queue->head == NULL;
×
UNCOV
296
  if (!empty) {
×
UNCOV
297
    memset(qall, 0, sizeof(STaosQall));
×
UNCOV
298
    qall->current = queue->head;
×
UNCOV
299
    qall->start = queue->head;
×
UNCOV
300
    qall->numOfItems = queue->numOfItems;
×
UNCOV
301
    qall->memOfItems = queue->memOfItems;
×
302

UNCOV
303
    qall->unAccessedNumOfItems = queue->numOfItems;
×
UNCOV
304
    qall->unAccessMemOfItems = queue->memOfItems;
×
305

UNCOV
306
    numOfItems = qall->numOfItems;
×
307

UNCOV
308
    queue->head = NULL;
×
UNCOV
309
    queue->tail = NULL;
×
UNCOV
310
    queue->numOfItems = 0;
×
UNCOV
311
    queue->memOfItems = 0;
×
UNCOV
312
    uTrace("read %d items from queue:%p, items:%d mem:%" PRId64, numOfItems, queue, queue->numOfItems,
×
313
           queue->memOfItems);
UNCOV
314
    if (queue->qset) {
×
315
      (void)atomic_sub_fetch_32(&queue->qset->numOfItems, qall->numOfItems);
×
316
    }
317
  }
318

UNCOV
319
  (void)taosThreadMutexUnlock(&queue->mutex);
×
320

321
  // if source queue is empty, we set destination qall to empty too.
UNCOV
322
  if (empty) {
×
UNCOV
323
    qall->current = NULL;
×
UNCOV
324
    qall->start = NULL;
×
UNCOV
325
    qall->numOfItems = 0;
×
326
  }
UNCOV
327
  return numOfItems;
×
328
}
329

UNCOV
330
int32_t taosGetQitem(STaosQall *qall, void **ppItem) {
×
331
  STaosQnode *pNode;
UNCOV
332
  int32_t     num = 0;
×
333

UNCOV
334
  pNode = qall->current;
×
UNCOV
335
  if (pNode) qall->current = pNode->next;
×
336

UNCOV
337
  if (pNode) {
×
UNCOV
338
    *ppItem = pNode->item;
×
UNCOV
339
    num = 1;
×
340

UNCOV
341
    qall->unAccessedNumOfItems -= 1;
×
UNCOV
342
    qall->unAccessMemOfItems -= pNode->dataSize;
×
343

UNCOV
344
    uTrace("item:%p is fetched", *ppItem);
×
345
  } else {
UNCOV
346
    *ppItem = NULL;
×
347
  }
348

UNCOV
349
  return num;
×
350
}
351

UNCOV
352
int32_t taosOpenQset(STaosQset **qset) {
×
UNCOV
353
  *qset = taosMemoryCalloc(sizeof(STaosQset), 1);
×
UNCOV
354
  if (*qset == NULL) {
×
355
    return terrno;
×
356
  }
357

UNCOV
358
  (void)taosThreadMutexInit(&(*qset)->mutex, NULL);
×
UNCOV
359
  if (tsem_init(&(*qset)->sem, 0, 0) != 0) {
×
360
    taosMemoryFree(*qset);
×
361
    return terrno;
×
362
  }
363

UNCOV
364
  uDebug("qset:%p is opened", qset);
×
UNCOV
365
  return 0;
×
366
}
367

UNCOV
368
void taosCloseQset(STaosQset *qset) {
×
UNCOV
369
  if (qset == NULL) return;
×
370

371
  // remove all the queues from qset
UNCOV
372
  (void)taosThreadMutexLock(&qset->mutex);
×
UNCOV
373
  while (qset->head) {
×
UNCOV
374
    STaosQueue *queue = qset->head;
×
UNCOV
375
    qset->head = qset->head->next;
×
376

UNCOV
377
    queue->qset = NULL;
×
UNCOV
378
    queue->next = NULL;
×
379
  }
UNCOV
380
  (void)taosThreadMutexUnlock(&qset->mutex);
×
381

UNCOV
382
  (void)taosThreadMutexDestroy(&qset->mutex);
×
UNCOV
383
  if (tsem_destroy(&qset->sem) != 0) {
×
384
    uError("failed to destroy semaphore for qset:%p", qset);
×
385
  }
UNCOV
386
  taosMemoryFree(qset);
×
UNCOV
387
  uDebug("qset:%p is closed", qset);
×
388
}
389

390
// tsem_post 'qset->sem', so that reader threads waiting for it
391
// resumes execution and return, should only be used to signal the
392
// thread to exit.
UNCOV
393
void taosQsetThreadResume(STaosQset *qset) {
×
UNCOV
394
  uDebug("qset:%p, it will exit", qset);
×
UNCOV
395
  if (tsem_post(&qset->sem) != 0) {
×
396
    uError("failed to post semaphore for qset:%p", qset);
×
397
  }
UNCOV
398
}
×
399

UNCOV
400
int32_t taosAddIntoQset(STaosQset *qset, STaosQueue *queue, void *ahandle) {
×
UNCOV
401
  if (queue->qset) return TSDB_CODE_INVALID_PARA;
×
402

UNCOV
403
  (void)taosThreadMutexLock(&qset->mutex);
×
404

UNCOV
405
  queue->next = qset->head;
×
UNCOV
406
  queue->ahandle = ahandle;
×
UNCOV
407
  qset->head = queue;
×
UNCOV
408
  qset->numOfQueues++;
×
409

UNCOV
410
  (void)taosThreadMutexLock(&queue->mutex);
×
UNCOV
411
  (void)atomic_add_fetch_32(&qset->numOfItems, queue->numOfItems);
×
UNCOV
412
  queue->qset = qset;
×
UNCOV
413
  (void)taosThreadMutexUnlock(&queue->mutex);
×
414

UNCOV
415
  (void)taosThreadMutexUnlock(&qset->mutex);
×
416

UNCOV
417
  uTrace("queue:%p is added into qset:%p", queue, qset);
×
UNCOV
418
  return 0;
×
419
}
420

UNCOV
421
void taosRemoveFromQset(STaosQset *qset, STaosQueue *queue) {
×
UNCOV
422
  STaosQueue *tqueue = NULL;
×
423

UNCOV
424
  (void)taosThreadMutexLock(&qset->mutex);
×
425

UNCOV
426
  if (qset->head) {
×
UNCOV
427
    if (qset->head == queue) {
×
UNCOV
428
      qset->head = qset->head->next;
×
UNCOV
429
      tqueue = queue;
×
430
    } else {
UNCOV
431
      STaosQueue *prev = qset->head;
×
UNCOV
432
      tqueue = qset->head->next;
×
UNCOV
433
      while (tqueue) {
×
UNCOV
434
        if (tqueue == queue) {
×
UNCOV
435
          prev->next = tqueue->next;
×
UNCOV
436
          break;
×
437
        } else {
UNCOV
438
          prev = tqueue;
×
UNCOV
439
          tqueue = tqueue->next;
×
440
        }
441
      }
442
    }
443

UNCOV
444
    if (tqueue) {
×
UNCOV
445
      if (qset->current == queue) qset->current = tqueue->next;
×
UNCOV
446
      qset->numOfQueues--;
×
447

UNCOV
448
      (void)taosThreadMutexLock(&queue->mutex);
×
UNCOV
449
      (void)atomic_sub_fetch_32(&qset->numOfItems, queue->numOfItems);
×
UNCOV
450
      queue->qset = NULL;
×
UNCOV
451
      queue->next = NULL;
×
UNCOV
452
      (void)taosThreadMutexUnlock(&queue->mutex);
×
453
    }
454
  }
455

UNCOV
456
  (void)taosThreadMutexUnlock(&qset->mutex);
×
457

UNCOV
458
  uDebug("queue:%p is removed from qset:%p", queue, qset);
×
UNCOV
459
}
×
460

UNCOV
461
int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, SQueueInfo *qinfo) {
×
UNCOV
462
  STaosQnode *pNode = NULL;
×
UNCOV
463
  int32_t     code = 0;
×
464

UNCOV
465
  if (tsem_wait(&qset->sem) != 0) {
×
466
    uError("failed to wait semaphore for qset:%p", qset);
×
467
  }
468

UNCOV
469
  (void)taosThreadMutexLock(&qset->mutex);
×
470

UNCOV
471
  for (int32_t i = 0; i < qset->numOfQueues; ++i) {
×
UNCOV
472
    if (qset->current == NULL) qset->current = qset->head;
×
UNCOV
473
    STaosQueue *queue = qset->current;
×
UNCOV
474
    if (queue) qset->current = queue->next;
×
UNCOV
475
    if (queue == NULL) break;
×
UNCOV
476
    if (queue->head == NULL) continue;
×
477

UNCOV
478
    (void)taosThreadMutexLock(&queue->mutex);
×
479

UNCOV
480
    if (queue->head) {
×
UNCOV
481
      pNode = queue->head;
×
UNCOV
482
      *ppItem = pNode->item;
×
UNCOV
483
      qinfo->ahandle = queue->ahandle;
×
UNCOV
484
      qinfo->fp = queue->itemFp;
×
UNCOV
485
      qinfo->queue = queue;
×
UNCOV
486
      qinfo->timestamp = pNode->timestamp;
×
487

UNCOV
488
      queue->head = pNode->next;
×
UNCOV
489
      if (queue->head == NULL) queue->tail = NULL;
×
490
      // queue->numOfItems--;
UNCOV
491
      queue->memOfItems -= (pNode->size + pNode->dataSize);
×
UNCOV
492
      (void)atomic_sub_fetch_32(&qset->numOfItems, 1);
×
UNCOV
493
      code = 1;
×
UNCOV
494
      uTrace("item:%p is read out from queue:%p, items:%d mem:%" PRId64, *ppItem, queue, queue->numOfItems - 1,
×
495
             queue->memOfItems);
496
    }
497

UNCOV
498
    (void)taosThreadMutexUnlock(&queue->mutex);
×
UNCOV
499
    if (pNode) break;
×
500
  }
501

UNCOV
502
  (void)taosThreadMutexUnlock(&qset->mutex);
×
503

UNCOV
504
  return code;
×
505
}
506

UNCOV
507
int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, SQueueInfo *qinfo) {
×
508
  STaosQueue *queue;
UNCOV
509
  int32_t     code = 0;
×
510

UNCOV
511
  if (tsem_wait(&qset->sem) != 0) {
×
512
    uError("failed to wait semaphore for qset:%p", qset);
×
513
  }
UNCOV
514
  (void)taosThreadMutexLock(&qset->mutex);
×
515

UNCOV
516
  for (int32_t i = 0; i < qset->numOfQueues; ++i) {
×
UNCOV
517
    if (qset->current == NULL) qset->current = qset->head;
×
UNCOV
518
    queue = qset->current;
×
UNCOV
519
    if (queue) qset->current = queue->next;
×
UNCOV
520
    if (queue == NULL) break;
×
UNCOV
521
    if (queue->head == NULL) continue;
×
522

UNCOV
523
    (void)taosThreadMutexLock(&queue->mutex);
×
524

UNCOV
525
    if (queue->head) {
×
UNCOV
526
      qall->current = queue->head;
×
UNCOV
527
      qall->start = queue->head;
×
UNCOV
528
      qall->numOfItems = queue->numOfItems;
×
UNCOV
529
      qall->memOfItems = queue->memOfItems;
×
UNCOV
530
      qall->unAccessedNumOfItems = queue->numOfItems;
×
UNCOV
531
      qall->unAccessMemOfItems = queue->memOfItems;
×
532

UNCOV
533
      code = qall->numOfItems;
×
UNCOV
534
      qinfo->ahandle = queue->ahandle;
×
UNCOV
535
      qinfo->fp = queue->itemsFp;
×
UNCOV
536
      qinfo->queue = queue;
×
UNCOV
537
      qinfo->timestamp = queue->head->timestamp;
×
538

UNCOV
539
      queue->head = NULL;
×
UNCOV
540
      queue->tail = NULL;
×
541
      // queue->numOfItems = 0;
UNCOV
542
      queue->memOfItems = 0;
×
UNCOV
543
      uTrace("read %d items from queue:%p, items:0 mem:%" PRId64, code, queue, queue->memOfItems);
×
544

UNCOV
545
      (void)atomic_sub_fetch_32(&qset->numOfItems, qall->numOfItems);
×
UNCOV
546
      for (int32_t j = 1; j < qall->numOfItems; ++j) {
×
UNCOV
547
        if (tsem_wait(&qset->sem) != 0) {
×
548
          uError("failed to wait semaphore for qset:%p", qset);
×
549
        }
550
      }
551
    }
552

UNCOV
553
    (void)taosThreadMutexUnlock(&queue->mutex);
×
554

UNCOV
555
    if (code != 0) break;
×
556
  }
557

UNCOV
558
  (void)taosThreadMutexUnlock(&qset->mutex);
×
UNCOV
559
  return code;
×
560
}
561

UNCOV
562
int32_t taosQallItemSize(STaosQall *qall) { return qall->numOfItems; }
×
563
int64_t taosQallMemSize(STaosQall *qall) { return qall->memOfItems; }
×
564

UNCOV
565
int64_t taosQallUnAccessedItemSize(STaosQall *qall) { return qall->unAccessedNumOfItems; }
×
UNCOV
566
int64_t taosQallUnAccessedMemSize(STaosQall *qall) { return qall->unAccessMemOfItems; }
×
567

568
void    taosResetQitems(STaosQall *qall) { qall->current = qall->start; }
×
UNCOV
569
int32_t taosGetQueueNumber(STaosQset *qset) { return qset->numOfQueues; }
×
570

UNCOV
571
void taosQueueSetThreadId(STaosQueue *pQueue, int64_t threadId) { pQueue->threadId = threadId; }
×
572

UNCOV
573
int64_t taosQueueGetThreadId(STaosQueue *pQueue) { return pQueue->threadId; }
×
574

575
#if 0
576

577
void taosResetQsetThread(STaosQset *qset, void *pItem) {
578
  if (pItem == NULL) return;
579
  STaosQnode *pNode = (STaosQnode *)((char *)pItem - sizeof(STaosQnode));
580

581
  (void)taosThreadMutexLock(&qset->mutex);
582
  for (int32_t i = 0; i < pNode->queue->numOfItems; ++i) {
583
    tsem_post(&qset->sem);
584
  }
585
  (void)taosThreadMutexUnlock(&qset->mutex);
586
}
587

588
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc