• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

nickg / nvc / 16099227452

06 Jul 2025 12:45PM UTC coverage: 92.335% (+0.05%) from 92.284%
16099227452

push

github

nickg
Handle underscores in Verilog decimal literals

Fixes #1230

18 of 20 new or added lines in 1 file covered. (90.0%)

598 existing lines in 16 files now uncovered.

71069 of 76969 relevant lines covered (92.33%)

564781.41 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

94.07
/src/rt/model.c
1
//
2
//  Copyright (C) 2011-2024  Nick Gasson
3
//
4
//  This program is free software: you can redistribute it and/or modify
5
//  it under the terms of the GNU General Public License as published by
6
//  the Free Software Foundation, either version 3 of the License, or
7
//  (at your option) any later version.
8
//
9
//  This program is distributed in the hope that it will be useful,
10
//  but WITHOUT ANY WARRANTY; without even the implied warranty of
11
//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
//  GNU General Public License for more details.
13
//
14
//  You should have received a copy of the GNU General Public License
15
//  along with this program.  If not, see <http://www.gnu.org/licenses/>.
16
//
17

18
#include "util.h"
19
#include "array.h"
20
#include "common.h"
21
#include "debug.h"
22
#include "hash.h"
23
#include "jit/jit-exits.h"
24
#include "jit/jit.h"
25
#include "lib.h"
26
#include "option.h"
27
#include "psl/psl-node.h"
28
#include "rt/assert.h"
29
#include "rt/copy.h"
30
#include "rt/heap.h"
31
#include "rt/model.h"
32
#include "rt/random.h"
33
#include "rt/structs.h"
34
#include "thread.h"
35
#include "tree.h"
36
#include "type.h"
37
#include "vlog/vlog-node.h"
38
#include "vlog/vlog-util.h"
39

40
#include <assert.h>
41
#include <inttypes.h>
42
#include <stdlib.h>
43
#include <string.h>
44

45
typedef struct _rt_callback rt_callback_t;
46
typedef struct _memblock memblock_t;
47

48
typedef struct _rt_callback {
49
   rt_event_fn_t  fn;
50
   void          *user;
51
   rt_callback_t *next;
52
} rt_callback_t;
53

54
typedef enum {
55
   EVENT_TIMEOUT,
56
   EVENT_DRIVER,
57
   EVENT_PROCESS,
58
} event_kind_t;
59

60
#define MEMBLOCK_ALIGN   64
61
#define MEMBLOCK_PAGE_SZ 0x800000
62
#define TRIGGER_TAB_SIZE 64
63

64
#if ASAN_ENABLED
65
#define MEMBLOCK_REDZONE 16
66
#else
67
#define MEMBLOCK_REDZONE 0
68
#endif
69

70
typedef struct _memblock {
71
   memblock_t *chain;
72
   size_t      alloc;
73
   size_t      limit;
74
   uint8_t     data[];
75
} memblock_t;
76

77
STATIC_ASSERT(sizeof(memblock_t) <= MEMBLOCK_ALIGN);
78

79
typedef struct {
80
   waveform_t    *free_waveforms;
81
   tlab_t        *tlab;
82
   rt_wakeable_t *active_obj;
83
   rt_scope_t    *active_scope;
84
} __attribute__((aligned(64))) model_thread_t;
85

86
typedef void (*defer_fn_t)(rt_model_t *, void *);
87

88
typedef struct {
89
   defer_fn_t  fn;
90
   void       *arg;
91
} defer_task_t;
92

93
typedef struct {
94
   defer_task_t *tasks;
95
   unsigned      count;
96
   unsigned      max;
97
} deferq_t;
98

99
typedef struct _rt_model {
100
   tree_t             top;
101
   hash_t            *scopes;
102
   rt_scope_t        *root;
103
   mspace_t          *mspace;
104
   jit_t             *jit;
105
   rt_nexus_t        *nexuses;
106
   rt_nexus_t       **nexus_tail;
107
   delta_cycle_t      stop_delta;
108
   int                iteration;
109
   uint64_t           now;
110
   bool               can_create_delta;
111
   bool               next_is_delta;
112
   bool               force_stop;
113
   unsigned           n_signals;
114
   heap_t            *eventq_heap;
115
   ihash_t           *res_memo;
116
   rt_watch_t        *watches;
117
   deferq_t           procq;
118
   deferq_t           delta_procq;
119
   deferq_t           driverq;
120
   deferq_t           delta_driverq;
121
   deferq_t           postponedq;
122
   deferq_t           implicitq;
123
   heap_t            *driving_heap;
124
   heap_t            *effective_heap;
125
   rt_callback_t     *global_cbs[RT_LAST_EVENT];
126
   cover_data_t      *cover;
127
   nvc_rusage_t       ready_rusage;
128
   nvc_lock_t         memlock;
129
   memblock_t        *memblocks;
130
   model_thread_t    *threads[MAX_THREADS];
131
   signal_list_t      eventsigs;
132
   bool               shuffle;
133
   bool               liveness;
134
   rt_trigger_t      *triggertab[TRIGGER_TAB_SIZE];
135
} rt_model_t;
136

137
#define FMT_VALUES_SZ   128
138
#define NEXUS_INDEX_MIN 8
139
#define TRACE_SIGNALS   1
140
#define WAVEFORM_CHUNK  256
141
#define PENDING_MIN     4
142
#define MAX_RANK        UINT8_MAX
143

144
#define TRACE(...) do {                                 \
145
      if (unlikely(__trace_on))                         \
146
         __model_trace(get_model(), __VA_ARGS__);       \
147
   } while (0)
148

149
#define MODEL_ENTRY(m)                                                  \
150
   rt_model_t *__save __attribute__((unused, cleanup(__model_exit)));   \
151
   __model_entry(m, &__save);                                           \
152

153
#if USE_EMUTLS
154
static rt_model_t *__model = NULL;
155
#else
156
static __thread rt_model_t *__model = NULL;
157
#endif
158

159
static bool __trace_on = false;
160

161
static void *source_value(rt_nexus_t *nexus, rt_source_t *src);
162
static void free_value(rt_nexus_t *n, rt_value_t v);
163
static rt_nexus_t *clone_nexus(rt_model_t *m, rt_nexus_t *old, int offset);
164
static void put_driving(rt_model_t *m, rt_nexus_t *n, const void *value);
165
static void put_effective(rt_model_t *m, rt_nexus_t *n, const void *value);
166
static void update_implicit_signal(rt_model_t *m, rt_implicit_t *imp);
167
static bool run_trigger(rt_model_t *m, rt_trigger_t *t);
168
static void reset_scope(rt_model_t *m, rt_scope_t *s);
169
static void async_run_process(rt_model_t *m, void *arg);
170
static void async_update_property(rt_model_t *m, void *arg);
171
static void async_update_driver(rt_model_t *m, void *arg);
172
static void async_fast_driver(rt_model_t *m, void *arg);
173
static void async_fast_all_drivers(rt_model_t *m, void *arg);
174
static void async_pseudo_source(rt_model_t *m, void *arg);
175
static void async_transfer_signal(rt_model_t *m, void *arg);
176
static void async_update_implicit_signal(rt_model_t *m, void *arg);
177

178
static int fmt_time_r(char *buf, size_t len, int64_t t, const char *sep)
8,763✔
179
{
180
   static const struct {
8,763✔
181
      int64_t time;
182
      const char *unit;
183
   } units[] = {
184
      { INT64_C(1), "fs" },
185
      { INT64_C(1000), "ps" },
186
      { INT64_C(1000000), "ns" },
187
      { INT64_C(1000000000), "us" },
188
      { INT64_C(1000000000000), "ms" },
189
      { 0, NULL }
190
   };
191

192
   int u = 0;
8,763✔
193
   while (units[u + 1].unit && (t % units[u + 1].time == 0))
38,229✔
194
      ++u;
195

196
   return checked_sprintf(buf, len, "%"PRIi64"%s%s",
17,526✔
197
                          t / units[u].time, sep, units[u].unit);
8,763✔
198
}
199

200
__attribute__((format(printf, 2, 3)))
201
static void __model_trace(rt_model_t *m, const char *fmt, ...)
×
202
{
203
   va_list ap;
×
204
   va_start(ap, fmt);
×
205

206
   static nvc_lock_t lock = 0;
×
207
   {
208
      SCOPED_LOCK(lock);
×
209

210
      if (m->iteration < 0)
×
211
         fprintf(stderr, "TRACE (init): ");
×
212
      else {
213
         char buf[64];
×
214
         fmt_time_r(buf, sizeof(buf), m->now, "");
×
215
         fprintf(stderr, "TRACE %s+%d: ", buf, m->iteration);
×
216
      }
217
      vfprintf(stderr, fmt, ap);
×
218
      fprintf(stderr, "\n");
×
219
      fflush(stderr);
×
220
   }
221

222
   va_end(ap);
×
223
}
×
224

225
static const char *trace_time(uint64_t value)
8✔
226
{
227
   static __thread char buf[2][32];
8✔
228
   static __thread int which = 0;
8✔
229

230
   which ^= 1;
8✔
231
   fmt_time_r(buf[which], 32, value, "");
8✔
232
   return buf[which];
8✔
233
}
234

235
static const char *trace_states(bit_mask_t *mask)
×
236
{
237
   static __thread text_buf_t *tb = NULL;
×
238

239
   if (tb == NULL)
×
240
      tb = tb_new();
×
241

242
   tb_rewind(tb);
×
243
   tb_append(tb, '{');
×
244

245
   size_t bit = -1;
×
246
   while (mask_iter(mask, &bit))
×
247
      tb_printf(tb, "%s%zd", tb_len(tb) > 1 ? "," : "", bit);
×
248

249
   tb_append(tb, '}');
×
250

251
   return tb_get(tb);
×
252
}
253

254
static const char *trace_nexus(rt_nexus_t *n)
×
255
{
256
   static __thread text_buf_t *tb = NULL;
×
257

258
   if (tb == NULL)
×
259
      tb = tb_new();
×
260

261
   tb_rewind(tb);
×
262

263
   if (is_signal_scope(n->signal->parent))
×
264
      tb_printf(tb, "%s.", istr(n->signal->parent->name));
×
265

266
   tb_istr(tb, tree_ident(n->signal->where));
×
267

268
   if (n->width * n->size < n->signal->shared.size)
×
269
      tb_printf(tb, "[%d:%d]", n->offset, n->offset + n->width - 1);
×
270

271
   return tb_get(tb);
×
272
}
273

274
static void model_diag_cb(diag_t *d, void *arg)
13,157✔
275
{
276
   rt_model_t *m = arg;
13,157✔
277

278
   if (m->iteration < 0)
13,157✔
279
      diag_printf(d, "(init): ");
4,410✔
280
   else  {
281
      char tmbuf[64];
8,747✔
282
      fmt_time_r(tmbuf, sizeof(tmbuf), m->now, "");
8,747✔
283

284
      diag_printf(d, "%s+%d: ", tmbuf, m->iteration);
8,747✔
285
   }
286
}
13,157✔
287

288
static void __model_entry(rt_model_t *m, rt_model_t **save)
31,054✔
289
{
290
   if (__model == NULL)
31,054✔
291
      diag_add_hint_fn(model_diag_cb, m);
18,145✔
292

293
   *save = __model;
31,054✔
294
   __model = m;
31,054✔
295
}
31,054✔
296

297
static void __model_exit(rt_model_t **save)
31,049✔
298
{
299
   __model = *save;
31,049✔
300
   *save = NULL;
31,049✔
301

302
   if (__model == NULL)
31,049✔
303
      diag_remove_hint_fn(model_diag_cb);
18,140✔
304
}
31,049✔
305

306
static char *fmt_values_r(const void *values, size_t len, char *buf, size_t max)
×
307
{
308
   char *p = buf;
×
309
   const uint8_t *vptr = values;
×
310

311
   for (unsigned i = 0; i < len; i++) {
×
312
      if (buf + max - p <= 5) {
×
313
         checked_sprintf(p, buf + max - p, "...");
×
314
         break;
×
315
      }
316
      else
317
         p += checked_sprintf(p, buf + max - p, "%02x", *vptr++);
×
318
   }
319

320
   return buf;
×
321
}
322

323
static const char *fmt_nexus(rt_nexus_t *n, const void *values)
×
324
{
325
   static char buf[FMT_VALUES_SZ*2 + 2];
×
326
   return fmt_values_r(values, n->size * n->width, buf, sizeof(buf));
×
327
}
328

329
static const char *fmt_values(const void *values, uint32_t len)
×
330
{
331
   static char buf[FMT_VALUES_SZ*2 + 2];
×
332
   return fmt_values_r(values, len, buf, sizeof(buf));
×
333
}
334

335
static const char *fmt_jit_value(jit_scalar_t value, bool scalar, uint32_t len)
×
336
{
337
   static char buf[FMT_VALUES_SZ*2 + 2];
×
338
   if (scalar) {
×
339
      checked_sprintf(buf, sizeof(buf), "%"PRIx64, value.integer);
×
340
      return buf;
×
341
   }
342
   else
343
      return fmt_values_r(value.pointer, len, buf, sizeof(buf));
×
344
}
345

346
static model_thread_t *model_thread(rt_model_t *m)
6,596,489✔
347
{
348
#if RT_MULTITHREADED
349
   const int my_id = thread_id();
350

351
   if (unlikely(m->threads[my_id] == NULL))
352
      return (m->threads[my_id] = xcalloc(sizeof(model_thread_t)));
353

354
   return m->threads[my_id];
355
#else
356
   assert(thread_id() == 0);
6,596,489✔
357
   return m->threads[0];
6,596,489✔
358
#endif
359
}
360

361
__attribute__((cold, noinline))
362
static void deferq_grow(deferq_t *dq)
10,385✔
363
{
364
   dq->max = MAX(dq->max * 2, 64);
10,385✔
365
   dq->tasks = xrealloc_array(dq->tasks, dq->max, sizeof(defer_task_t));
10,385✔
366
}
10,385✔
367

368
static inline void deferq_do(deferq_t *dq, defer_fn_t fn, void *arg)
1,569,283✔
369
{
370
   if (unlikely(dq->count == dq->max))
1,569,283✔
371
      deferq_grow(dq);
10,385✔
372

373
   dq->tasks[dq->count++] = (defer_task_t){ fn, arg };
1,569,283✔
374
}
1,569,283✔
375

376
static void deferq_scan(deferq_t *dq, scan_fn_t fn, void *arg)
16✔
377
{
378
   for (int i = 0; i < dq->count; i++)
40✔
379
      (*fn)(dq->tasks[i].fn, dq->tasks[i].arg, arg);
24✔
380
}
16✔
381

382
static void deferq_shuffle(deferq_t *dq)
404✔
383
{
384
   int cur = dq->count;
404✔
385
   while (cur > 0) {
2,424✔
386
      const int swap = get_random() % cur--;
2,020✔
387
      const defer_task_t tmp = dq->tasks[cur];
2,020✔
388
      dq->tasks[cur] = dq->tasks[swap];
2,020✔
389
      dq->tasks[swap] = tmp;
2,020✔
390
   }
391
}
404✔
392

393
static void deferq_run(rt_model_t *m, deferq_t *dq)
3,244,903✔
394
{
395
   const defer_task_t *tasks = dq->tasks;
3,244,903✔
396
   const int count = dq->count;
3,244,903✔
397

398
   int i = 0;
3,244,903✔
399
   for (; i < count - 1; i++) {
3,726,842✔
400
      // Prefetch ahead the next task argument to avoid cache misses
401
      // when we execute it
402
      prefetch_read(tasks[i + 1].arg);
481,939✔
403
      (*tasks[i].fn)(m, tasks[i].arg);
481,939✔
404
   }
405
   for (; i < count; i++)
4,332,058✔
406
      (*tasks[i].fn)(m, tasks[i].arg);
1,087,155✔
407

408
   assert(dq->tasks == tasks);
3,244,903✔
409
   assert(dq->count == count);
3,244,903✔
410

411
   dq->count = 0;
3,244,903✔
412
}
3,244,903✔
413

414
static void *static_alloc(rt_model_t *m, size_t size)
71,969✔
415
{
416
   const int total_bytes = ALIGN_UP(size + MEMBLOCK_REDZONE, MEMBLOCK_ALIGN);
71,969✔
417

418
   RT_LOCK(m->memlock);
71,969✔
419

420
   memblock_t *mb = m->memblocks;
71,969✔
421

422
   if (mb == NULL || mb->alloc + total_bytes > mb->limit) {
71,969✔
423
      const size_t pagesz =
6,495✔
424
         MAX(MEMBLOCK_PAGE_SZ, total_bytes + 2 * MEMBLOCK_ALIGN);
6,495✔
425

426
      mb = map_huge_pages(MEMBLOCK_ALIGN, pagesz);
6,495✔
427
      mb->alloc = MEMBLOCK_ALIGN;
6,495✔
428
      mb->limit = pagesz - MEMBLOCK_ALIGN;   // Allow overreading in intrinsics
6,495✔
429

430
      ASAN_POISON(mb->data, pagesz - sizeof(memblock_t));
6,495✔
431

432
      m->memblocks = mb;
6,495✔
433
   }
434

435
   assert((mb->alloc & (MEMBLOCK_ALIGN - 1)) == 0);
71,969✔
436

437
   void *ptr = (void *)mb + mb->alloc;
71,969✔
438
   mb->alloc += total_bytes;
71,969✔
439

440
   ASAN_UNPOISON(ptr, size);
71,969✔
441
   return ptr;
71,969✔
442
}
443

444
static void global_event(rt_model_t *m, rt_event_t kind)
3,241,764✔
445
{
446
   rt_callback_t *list = m->global_cbs[kind];
3,241,764✔
447
   m->global_cbs[kind] = NULL;
3,241,764✔
448

449
   for (rt_callback_t *it = list, *tmp; it; it = tmp) {
3,243,437✔
450
      tmp = it->next;
1,673✔
451
      (*it->fn)(m, it->user);
1,673✔
452
      free(it);
1,673✔
453
   }
454
}
3,241,764✔
455

456
static void restore_scopes(rt_model_t *m, tree_t block, rt_scope_t *parent)
4,097✔
457
{
458
   rt_scope_t *s = create_scope(m, block, parent);
4,097✔
459

460
   const int nstmts = tree_stmts(block);
4,097✔
461
   for (int i = 0; i < nstmts; i++) {
10,781✔
462
      tree_t t = tree_stmt(block, i);
6,684✔
463
      if (tree_kind(t) == T_BLOCK)
6,684✔
464
         restore_scopes(m, t, s);
2,458✔
465
   }
466
}
4,097✔
467

468
rt_model_t *model_new(jit_t *jit, cover_data_t *cover)
6,495✔
469
{
470
   rt_model_t *m = xcalloc(sizeof(rt_model_t));
6,495✔
471
   m->scopes      = hash_new(256);
6,495✔
472
   m->mspace      = jit_get_mspace(jit);
6,495✔
473
   m->jit         = jit;
6,495✔
474
   m->nexus_tail  = &(m->nexuses);
6,495✔
475
   m->iteration   = -1;
6,495✔
476
   m->eventq_heap = heap_new(512);
6,495✔
477
   m->res_memo    = ihash_new(128);
6,495✔
478
   m->cover       = cover;
6,495✔
479

480
   m->driving_heap   = heap_new(64);
6,495✔
481
   m->effective_heap = heap_new(64);
6,495✔
482

483
   m->can_create_delta = true;
6,495✔
484

485
   m->threads[thread_id()] = static_alloc(m, sizeof(model_thread_t));
6,495✔
486

487
   __trace_on = opt_get_int(OPT_RT_TRACE);
6,495✔
488

489
   return m;
6,495✔
490
}
491

492
rt_model_t *get_model(void)
5,641,700✔
493
{
494
   assert(__model != NULL);
5,641,700✔
495
#ifdef USE_EMUTLS
496
   assert(thread_id() == 0);
497
#endif
498
   return __model;
5,641,700✔
499
}
500

501
rt_model_t *get_model_or_null(void)
4,005,631✔
502
{
503
   return __model;
4,005,631✔
504
}
505

506
static rt_wakeable_t *get_active_wakeable(void)
5,537,211✔
507
{
508
   return __model ? model_thread(__model)->active_obj : NULL;
5,537,211✔
509
}
510

511
rt_proc_t *get_active_proc(void)
5,463,755✔
512
{
513
   rt_wakeable_t *obj = get_active_wakeable();
5,463,755✔
514
   if (obj == NULL)
5,463,755✔
515
      return NULL;
516

517
   assert(obj->kind == W_PROC);
5,462,966✔
518
   return container_of(obj, rt_proc_t, wakeable);
519
}
520

521
rt_scope_t *get_active_scope(rt_model_t *m)
1,684✔
522
{
523
   return model_thread(m)->active_scope;
1,684✔
524
}
525

526
static void free_waveform(rt_model_t *m, waveform_t *w)
249,064✔
527
{
528
   model_thread_t *thread = model_thread(m);
249,064✔
529
   w->next = thread->free_waveforms;
249,064✔
530
   thread->free_waveforms = w;
249,064✔
531
}
249,064✔
532

533
static void cleanup_nexus(rt_model_t *m, rt_nexus_t *n)
53,955✔
534
{
535
   for (rt_source_t *s = &(n->sources); s; s = s->chain_input) {
108,764✔
536
      if (s->tag != SOURCE_PORT)
54,809✔
537
         continue;
41,503✔
538

539
      rt_conv_func_t *cf = s->u.port.conv_func;
13,306✔
540
      if (cf == NULL)
13,306✔
541
         continue;
12,077✔
542
      else if (cf->inputs != NULL && cf->inputs != cf->tail) {
1,229✔
543
         free(cf->inputs);
45✔
544
         s->u.port.conv_func->inputs = NULL;
45✔
545
      }
546
   }
547

548
   if (n->pending != NULL && pointer_tag(n->pending) == 0)
53,955✔
549
      free(n->pending);
1,743✔
550
}
53,955✔
551

552
static void cleanup_signal(rt_model_t *m, rt_signal_t *s)
44,865✔
553
{
554
   rt_nexus_t *n = &(s->nexus), *tmp;
44,865✔
555
   for (int i = 0; i < s->n_nexus; i++, n = tmp) {
98,820✔
556
      tmp = n->chain;
53,955✔
557
      cleanup_nexus(m, n);
53,955✔
558
   }
559

560
   free(s->index);
44,865✔
561
}
44,865✔
562

563
static void cleanup_scope(rt_model_t *m, rt_scope_t *scope)
32,107✔
564
{
565
   for (int i = 0; i < scope->procs.count; i++) {
44,383✔
566
      rt_proc_t *p = scope->procs.items[i];
12,276✔
567
      mptr_free(m->mspace, &(p->privdata));
12,276✔
568
      tlab_release(p->tlab);
12,276✔
569
      free(p);
12,276✔
570
   }
571
   ACLEAR(scope->procs);
32,107✔
572

573
   for (int i = 0; i < scope->signals.count; i++)
76,972✔
574
      cleanup_signal(m, scope->signals.items[i]);
44,865✔
575
   ACLEAR(scope->signals);
32,107✔
576

577
   for (int i = 0; i < scope->aliases.count; i++)
39,140✔
578
      free(scope->aliases.items[i]);
7,033✔
579
   ACLEAR(scope->aliases);
32,107✔
580

581
   for (int i = 0; i < scope->properties.count; i++) {
32,403✔
582
      rt_prop_t *p = scope->properties.items[i];
296✔
583
      mask_free(&p->state);
296✔
584
      mask_free(&p->newstate);
296✔
585
      mptr_free(m->mspace, &(p->privdata));
296✔
586
      free(p);
296✔
587
   }
588
   ACLEAR(scope->properties);
32,107✔
589

590
   for (int i = 0; i < scope->children.count; i++)
57,728✔
591
      cleanup_scope(m, scope->children.items[i]);
25,621✔
592
   ACLEAR(scope->children);
32,107✔
593

594
   mptr_free(m->mspace, &(scope->privdata));
32,107✔
595
   free(scope);
32,107✔
596
}
32,107✔
597

598
void model_free(rt_model_t *m)
6,486✔
599
{
600
   if (opt_get_int(OPT_RT_STATS)) {
6,486✔
601
      nvc_rusage_t ru;
4✔
602
      nvc_rusage(&ru);
4✔
603

604
      unsigned mem = 0;
4✔
605
      for (memblock_t *mb = m->memblocks; mb; mb = mb->chain)
8✔
606
         mem += mb->alloc;
4✔
607

608
      notef("setup:%ums run:%ums user:%ums sys:%ums maxrss:%ukB static:%ukB",
4✔
609
            m->ready_rusage.ms, ru.ms, ru.user, ru.sys, ru.rss, mem / 1024);
610
   }
611

612
   while (heap_size(m->eventq_heap) > 0) {
6,637✔
613
      void *e = heap_extract_min(m->eventq_heap);
151✔
614
      if (pointer_tag(e) == EVENT_TIMEOUT)
151✔
615
         free(untag_pointer(e, rt_callback_t));
×
616
   }
617

618
   if (m->root != NULL)
6,486✔
619
      cleanup_scope(m, m->root);
6,486✔
620

621
   for (int i = 0; i < MAX_THREADS; i++) {
421,590✔
622
      model_thread_t *thread = m->threads[i];
415,104✔
623
      if (thread != NULL)
415,104✔
624
         tlab_release(thread->tlab);
6,486✔
625
   }
626

627
   free(m->procq.tasks);
6,486✔
628
   free(m->delta_procq.tasks);
6,486✔
629
   free(m->postponedq.tasks);
6,486✔
630
   free(m->implicitq.tasks);
6,486✔
631
   free(m->driverq.tasks);
6,486✔
632
   free(m->delta_driverq.tasks);
6,486✔
633

634
   for (rt_watch_t *it = m->watches, *tmp; it; it = tmp) {
7,911✔
635
      tmp = it->chain_all;
1,425✔
636
      free(it);
1,425✔
637
   }
638

639
   for (int i = 0; i < RT_LAST_EVENT; i++) {
58,374✔
640
      for (rt_callback_t *it = m->global_cbs[i], *tmp; it; it = tmp) {
52,431✔
641
         tmp = it->next;
543✔
642
         free(it);
543✔
643
      }
644
   }
645

646
   for (memblock_t *mb = m->memblocks, *tmp; mb; mb = tmp) {
12,972✔
647
      tmp = mb->chain;
6,486✔
648
      nvc_munmap(mb, mb->limit + MEMBLOCK_ALIGN);
6,486✔
649
   }
650

651
   heap_free(m->effective_heap);
6,486✔
652
   heap_free(m->driving_heap);
6,486✔
653
   heap_free(m->eventq_heap);
6,486✔
654
   hash_free(m->scopes);
6,486✔
655
   ihash_free(m->res_memo);
6,486✔
656
   ACLEAR(m->eventsigs);
6,486✔
657
   free(m);
6,486✔
658
}
6,486✔
659

660
bool is_signal_scope(rt_scope_t *s)
1,684✔
661
{
662
   return s->kind == SCOPE_RECORD || s->kind == SCOPE_ARRAY;
1,684✔
663
}
664

665
rt_signal_t *find_signal(rt_scope_t *scope, tree_t decl)
1,341✔
666
{
667
   for (int i = 0; i < scope->signals.count; i++) {
2,645✔
668
      if (scope->signals.items[i]->where == decl)
2,609✔
669
         return scope->signals.items[i];
1,305✔
670
   }
671

672
   for (int i = 0; i < scope->aliases.count; i++) {
60✔
673
      if (scope->aliases.items[i]->where == decl)
60✔
674
         return scope->aliases.items[i]->signal;
36✔
675
   }
676

677
   return NULL;
678
}
679

680
rt_proc_t *find_proc(rt_scope_t *scope, tree_t proc)
5✔
681
{
682
   for (int i = 0; i < scope->procs.count; i++) {
7✔
683
      if (scope->procs.items[i]->where == proc)
7✔
684
         return scope->procs.items[i];
5✔
685
   }
686

687
   return NULL;
688
}
689

690
rt_watch_t *find_watch(rt_nexus_t *n, sig_event_fn_t fn)
604✔
691
{
692
   if (n->pending == NULL)
604✔
693
      return NULL;
694
   else if (pointer_tag(n->pending) == 1) {
128✔
695
      rt_wakeable_t *obj = untag_pointer(n->pending, rt_wakeable_t);
100✔
696
      if (obj->kind == W_WATCH) {
100✔
697
         rt_watch_t *w = container_of(obj, rt_watch_t, wakeable);
24✔
698
         if (w->fn == fn)
24✔
699
            return w;
700
      }
701

702
      return NULL;
76✔
703
   }
704
   else {
705
      rt_pending_t *p = untag_pointer(n->pending, rt_pending_t);
28✔
706

707
      for (int i = 0; i < p->count; i++) {
80✔
708
         rt_wakeable_t *obj = untag_pointer(p->wake[i], rt_wakeable_t);
64✔
709
         if (obj->kind == W_WATCH) {
64✔
710
            rt_watch_t *w = container_of(obj, rt_watch_t, wakeable);
12✔
711
            if (w->fn == fn)
12✔
712
               return w;
12✔
713
         }
714
      }
715

716
      return NULL;
717
   }
718
}
719

720
rt_scope_t *create_scope(rt_model_t *m, tree_t block, rt_scope_t *parent)
23,501✔
721
{
722
   if (parent == NULL) {
23,501✔
723
      assert(m->top == NULL);
6,495✔
724
      assert(tree_kind(block) == T_ELAB);
6,495✔
725

726
      m->top = block;
6,495✔
727

728
      m->root = xcalloc(sizeof(rt_scope_t));
6,495✔
729
      m->root->kind     = SCOPE_ROOT;
6,495✔
730
      m->root->where    = block;
6,495✔
731
      m->root->privdata = MPTR_INVALID;
6,495✔
732
      m->root->name     = lib_name(lib_work());
6,495✔
733

734
      if (tree_stmts(block) > 0)
6,495✔
735
         restore_scopes(m, tree_stmt(block, 0), m->root);
1,639✔
736

737
      return m->root;
6,495✔
738
   }
739
   else {
740
      rt_scope_t *s = xcalloc(sizeof(rt_scope_t));
17,006✔
741
      s->where    = block;
17,006✔
742
      s->kind     = SCOPE_INSTANCE;
17,006✔
743
      s->privdata = mptr_new(m->mspace, "block privdata");
17,006✔
744
      s->parent   = parent;
17,006✔
745
      s->name     = ident_prefix(parent->name, tree_ident(block), '.');
17,006✔
746

747
      APUSH(parent->children, s);
17,006✔
748

749
      hash_put(m->scopes, block, s);
17,006✔
750

751
      MODEL_ENTRY(m);
17,006✔
752

753
      TRACE("initialise scope %s", istr(s->name));
17,006✔
754

755
      model_thread_t *thread = model_thread(m);
17,006✔
756
      thread->active_scope = s;
17,006✔
757

758
      jit_handle_t handle = jit_lazy_compile(m->jit, s->name);
17,006✔
759
      if (handle == JIT_HANDLE_INVALID)
17,006✔
760
         fatal_trace("failed to compile %s", istr(s->name));
761

762
      jit_scalar_t result, context = { .pointer = NULL };
17,006✔
763
      jit_scalar_t p2 = { .integer = 0 };
17,006✔
764

765
      if (s->parent->kind != SCOPE_ROOT)
17,006✔
766
         context.pointer = *mptr_get(s->parent->privdata);
10,518✔
767

768
      tlab_t tlab = jit_null_tlab(m->jit);
17,006✔
769

770
      if (jit_fastcall(m->jit, handle, &result, context, p2, &tlab))
17,006✔
771
         *mptr_get(s->privdata) = result.pointer;
16,989✔
772
      else
773
         m->force_stop = true;
17✔
774

775
      assert(thread->active_scope == s);
17,006✔
776
      thread->active_scope = NULL;
17,006✔
777
      return s;
17,006✔
778
   }
779
}
780

781
rt_scope_t *find_scope(rt_model_t *m, tree_t container)
327✔
782
{
783
   return hash_get(m->scopes, container);
327✔
784
}
785

786
rt_scope_t *root_scope(rt_model_t *m)
171✔
787
{
788
   return m->root;
171✔
789
}
790

791
rt_scope_t *child_scope(rt_scope_t *scope, tree_t decl)
168✔
792
{
793
   for (int i = 0; i < scope->children.count; i++) {
212✔
794
      rt_scope_t *s = scope->children.items[i];
208✔
795
      if (s->where == decl)
208✔
796
         return s;
164✔
797
   }
798

799
   return NULL;
800
}
801

802
rt_scope_t *child_scope_at(rt_scope_t *scope, int index)
200✔
803
{
804
   return AGET(scope->children, index);
200✔
805
}
806

807
const void *signal_value(rt_signal_t *s)
27,538✔
808
{
809
   return s->shared.data;
27,538✔
810
}
811

812
const void *signal_last_value(rt_signal_t *s)
21,984✔
813
{
814
   return s->shared.data + s->shared.size;
21,984✔
815
}
816

817
uint8_t signal_size(rt_signal_t *s)
3,844✔
818
{
819
   return s->nexus.size;
3,844✔
820
}
821

822
uint32_t signal_width(rt_signal_t *s)
2,205✔
823
{
824
   return s->shared.size / s->nexus.size;
2,205✔
825
}
826

827
size_t signal_expand(rt_signal_t *s, uint64_t *buf, size_t max)
984✔
828
{
829
   const size_t total = s->shared.size / s->nexus.size;
984✔
830

831
#define SIGNAL_READ_EXPAND_U64(type) do {                               \
832
      const type *sp = (type *)s->shared.data;                          \
833
      for (int i = 0; i < max && i < total; i++)                        \
834
         buf[i] = sp[i];                                                \
835
   } while (0)
836

837
   FOR_ALL_SIZES(s->nexus.size, SIGNAL_READ_EXPAND_U64);
3,492✔
838

839
   return total;
984✔
840
}
841

842
static inline void set_pending(rt_wakeable_t *wake)
812,438✔
843
{
844
   assert(!wake->pending);
812,438✔
845
   assert(!wake->delayed);
812,438✔
846
   wake->pending = true;
812,438✔
847
}
812,438✔
848

849
static void deltaq_insert_proc(rt_model_t *m, uint64_t delta, rt_proc_t *proc)
442,603✔
850
{
851
   if (delta == 0) {
442,603✔
852
      set_pending(&proc->wakeable);
54,614✔
853
      deferq_do(&m->delta_procq, async_run_process, proc);
54,614✔
854
      m->next_is_delta = true;
54,614✔
855
   }
856
   else {
857
      assert(!proc->wakeable.delayed);
387,989✔
858
      proc->wakeable.delayed = true;
387,989✔
859

860
      void *e = tag_pointer(proc, EVENT_PROCESS);
387,989✔
861
      heap_insert(m->eventq_heap, m->now + delta, e);
387,989✔
862
   }
863
}
442,603✔
864

865
static void deltaq_insert_driver(rt_model_t *m, uint64_t delta,
72,817✔
866
                                 rt_source_t *source)
867
{
868
   if (delta == 0) {
72,817✔
869
      deferq_do(&m->delta_driverq, async_update_driver, source);
692✔
870
      m->next_is_delta = true;
692✔
871
   }
872
   else {
873
      void *e = tag_pointer(source, EVENT_DRIVER);
72,125✔
874
      heap_insert(m->eventq_heap, m->now + delta, e);
72,125✔
875
   }
876
}
72,817✔
877

878
static void deltaq_insert_pseudo_source(rt_model_t *m, rt_source_t *src)
2,714✔
879
{
880
   deferq_do(&m->delta_driverq, async_pseudo_source, src);
2,714✔
881
   m->next_is_delta = true;
2,714✔
882
}
2,714✔
883

884
static void reset_process(rt_model_t *m, rt_proc_t *proc)
12,218✔
885
{
886
   TRACE("reset process %s", istr(proc->name));
12,218✔
887

888
   assert(proc->tlab == NULL);
12,218✔
889
   assert(model_thread(m)->tlab == NULL);   // Not used during reset
12,218✔
890

891
   model_thread_t *thread = model_thread(m);
12,218✔
892
   thread->active_obj = &(proc->wakeable);
12,218✔
893
   thread->active_scope = proc->scope;
12,218✔
894

895
   jit_scalar_t context = {
24,436✔
896
      .pointer = *mptr_get(proc->scope->privdata)
12,218✔
897
   };
898
   jit_scalar_t state = { .pointer = NULL };
12,218✔
899
   jit_scalar_t result;
12,218✔
900

901
   tlab_t tlab = jit_null_tlab(m->jit);
12,218✔
902

903
   if (jit_fastcall(m->jit, proc->handle, &result, state, context, &tlab))
12,218✔
904
      *mptr_get(proc->privdata) = result.pointer;
12,174✔
905
   else
906
      m->force_stop = true;
44✔
907

908
   thread->active_obj = NULL;
12,218✔
909
   thread->active_scope = NULL;
12,218✔
910

911
   // Schedule the process to run immediately
912
   deltaq_insert_proc(m, 0, proc);
12,218✔
913
}
12,218✔
914

915
static void reset_property(rt_model_t *m, rt_prop_t *prop)
296✔
916
{
917
   TRACE("reset property %s", istr(prop->name));
296✔
918

919
   assert(model_thread(m)->tlab == NULL);   // Not used during reset
296✔
920

921
   model_thread_t *thread = model_thread(m);
296✔
922
   thread->active_obj = &(prop->wakeable);
296✔
923
   thread->active_scope = prop->scope;
296✔
924

925
   tlab_t tlab = jit_null_tlab(m->jit);
296✔
926

927
   jit_scalar_t args[] = {
296✔
928
      { .pointer = NULL },
929
      { .pointer = *mptr_get(prop->scope->privdata) },
296✔
930
      { .integer = -1 },
931
   }, results[2];
296✔
932

933
   if (jit_vfastcall(m->jit, prop->handle, args, ARRAY_LEN(args),
296✔
934
                     results, ARRAY_LEN(results), &tlab))
935
      *mptr_get(prop->privdata) = results[0].pointer;
296✔
936
   else
937
      m->force_stop = true;
×
938

939
   TRACE("needs %"PRIi64" state bits", results[1].integer);
296✔
940

941
   mask_init(&prop->state, results[1].integer);
296✔
942
   mask_init(&prop->newstate, results[1].integer);
296✔
943

944
   mask_set(&prop->state, 0);
296✔
945
   mask_set(&prop->state, results[1].integer - 1);   // Update prev() variables
296✔
946

947
   thread->active_obj = NULL;
296✔
948
   thread->active_scope = NULL;
296✔
949
}
296✔
950

951
static void run_process(rt_model_t *m, rt_proc_t *proc)
643,493✔
952
{
953
   TRACE("run %sprocess %s", *mptr_get(proc->privdata) ? "" :  "stateless ",
643,493✔
954
         istr(proc->name));
955

956
   rt_wakeable_t *obj = &(proc->wakeable);
643,493✔
957

958
   if (obj->trigger != NULL && !run_trigger(m, obj->trigger))
643,493✔
959
      return;   // Filtered
30,310✔
960

961
   model_thread_t *thread = model_thread(m);
613,183✔
962
   assert(thread->tlab != NULL);
613,183✔
963
   assert(thread->tlab->alloc == 0);
613,183✔
964

965
   thread->active_obj = obj;
613,183✔
966
   thread->active_scope = proc->scope;
613,183✔
967

968
   // Stateless processes have NULL privdata so pass a dummy pointer
969
   // value in so it can be distinguished from a reset
970
   jit_scalar_t state = {
1,226,366✔
971
      .pointer = *mptr_get(proc->privdata) ?: (void *)-1
613,183✔
972
   };
973

974
   jit_scalar_t result;
613,183✔
975
   jit_scalar_t context = {
1,226,366✔
976
      .pointer = *mptr_get(proc->scope->privdata)
613,183✔
977
   };
978

979
   if (!jit_fastcall(m->jit, proc->handle, &result, state, context,
613,183✔
980
                     proc->tlab ?: thread->tlab))
613,183✔
981
      m->force_stop = true;
517✔
982

983
   if (proc->tlab != NULL && result.pointer == NULL) {
613,183✔
984
      tlab_release(proc->tlab);
183✔
985
      proc->tlab = NULL;
183✔
986
   }
987
   else if (proc->tlab == NULL && result.pointer != NULL) {
613,000✔
988
      TRACE("claiming TLAB for private use (used %u/%u)",
664✔
989
            thread->tlab->alloc, thread->tlab->limit);
990
      proc->tlab = thread->tlab;
664✔
991
      thread->tlab = tlab_acquire(m->mspace);
664✔
992
   }
993
   else if (proc->tlab == NULL)
612,336✔
994
      tlab_reset(thread->tlab);   // All data inside the TLAB is dead
551,388✔
995

996
   thread->active_obj = NULL;
613,183✔
997
   thread->active_scope = NULL;
613,183✔
998
}
999

1000
static void reset_scope(rt_model_t *m, rt_scope_t *s)
23,646✔
1001
{
1002
   for (int i = 0; i < s->children.count; i++)
42,701✔
1003
      reset_scope(m, s->children.items[i]);
19,055✔
1004

1005
   for (int i = 0; i < s->procs.count; i++)
35,864✔
1006
      reset_process(m, s->procs.items[i]);
12,218✔
1007

1008
   for (int i = 0; i < s->properties.count; i++)
23,942✔
1009
      reset_property(m, s->properties.items[i]);
296✔
1010
}
23,646✔
1011

1012
static res_memo_t *memo_resolution_fn(rt_model_t *m, rt_signal_t *signal,
8,573✔
1013
                                      ffi_closure_t closure, int32_t nlits,
1014
                                      res_flags_t flags)
1015
{
1016
   // Optimise some common resolution functions by memoising them
1017

1018
   res_memo_t *memo = ihash_get(m->res_memo, closure.handle);
8,573✔
1019
   if (memo != NULL)
8,573✔
1020
      return memo;
1021

1022
   memo = static_alloc(m, sizeof(res_memo_t));
1,210✔
1023
   memo->closure = closure;
1,210✔
1024
   memo->flags   = flags;
1,210✔
1025

1026
   ihash_put(m->res_memo, memo->closure.handle, memo);
1,210✔
1027

1028
   if (nlits == 0 || nlits > 16)
1,210✔
1029
      return memo;
1030

1031
   const vhdl_severity_t old_severity = set_exit_severity(SEVERITY_NOTE);
1,002✔
1032

1033
   jit_set_silent(m->jit, true);
1,002✔
1034

1035
   // Memoise the function for all two value cases
1036

1037
   for (int i = 0; i < nlits; i++) {
9,626✔
1038
      for (int j = 0; j < nlits; j++) {
85,292✔
1039
         int8_t args[2] = { i, j };
76,668✔
1040
         jit_scalar_t result;
76,668✔
1041
         if (jit_try_call(m->jit, memo->closure.handle, &result,
76,668✔
1042
                          memo->closure.context, args, 2)) {
1043
            assert(result.integer < nlits && result.integer >= 0);
76,048✔
1044
            memo->tab2[i][j] = result.integer;
76,048✔
1045
         }
1046
      }
1047
   }
1048

1049
   // Memoise the function for all single value cases and determine if the
1050
   // function behaves like the identity function
1051

1052
   bool identity = true;
1053
   for (int i = 0; i < nlits; i++) {
9,626✔
1054
      int8_t args[1] = { i };
8,624✔
1055
      jit_scalar_t result;
8,624✔
1056
      if (jit_try_call(m->jit, memo->closure.handle, &result,
8,624✔
1057
                       memo->closure.context, args, 1)) {
1058
         memo->tab1[i] = result.integer;
8,534✔
1059
         identity = identity && (memo->tab1[i] == i);
8,543✔
1060
      }
1061
   }
1062

1063
   if (model_exit_status(m) == 0) {
1,002✔
1064
      memo->flags |= R_MEMO;
982✔
1065
      if (identity)
982✔
1066
         memo->flags |= R_IDENT;
975✔
1067
   }
1068

1069
   TRACE("memoised resolution function %s for type %s",
1,002✔
1070
         istr(jit_get_name(m->jit, closure.handle)),
1071
         type_pp(tree_type(signal->where)));
1072

1073
   jit_set_silent(m->jit, false);
1,002✔
1074
   jit_reset_exit_status(m->jit);
1,002✔
1075

1076
   set_exit_severity(old_severity);
1,002✔
1077

1078
   return memo;
1,002✔
1079
}
1080

1081
static inline void *nexus_effective(rt_nexus_t *n)
1,342,324✔
1082
{
1083
   return n->signal->shared.data + n->offset;
1,342,324✔
1084
}
1085

1086
static inline void *nexus_last_value(rt_nexus_t *n)
1,008,953✔
1087
{
1088
   return n->signal->shared.data + n->offset + n->signal->shared.size;
1,008,953✔
1089
}
1090

1091
static inline void *nexus_driving(rt_nexus_t *n)
5,608✔
1092
{
1093
   assert(n->flags & NET_F_EFFECTIVE);
5,608✔
1094
   return n->signal->shared.data + n->offset + 2*n->signal->shared.size;
5,608✔
1095
}
1096

1097
static inline void *nexus_initial(rt_nexus_t *n)
11,138✔
1098
{
1099
   assert(n->flags & NET_F_HAS_INITIAL);
11,138✔
1100
   return n->signal->shared.data + n->offset + 2*n->signal->shared.size;
11,138✔
1101
}
1102

1103
static rt_value_t alloc_value(rt_model_t *m, rt_nexus_t *n)
95,984✔
1104
{
1105
   rt_value_t result = {};
95,984✔
1106

1107
   const size_t valuesz = n->size * n->width;
95,984✔
1108
   if (valuesz > sizeof(rt_value_t)) {
95,984✔
1109
      if (n->free_value != NULL) {
6,825✔
1110
         result.ext = n->free_value;
404✔
1111
         n->free_value = *(void **)result.ext;
404✔
1112
      }
1113
      else
1114
         result.ext = static_alloc(m, valuesz);
6,421✔
1115
   }
1116

1117
   return result;
95,984✔
1118
}
1119

1120
static void free_value(rt_nexus_t *n, rt_value_t v)
72,811✔
1121
{
1122
   const size_t valuesz = n->width * n->size;
72,811✔
1123
   if (valuesz > sizeof(rt_value_t)) {
72,811✔
1124
      *(void **)v.ext = n->free_value;
452✔
1125
      n->free_value = v.ext;
452✔
1126
   }
1127
}
72,811✔
1128

1129
static inline uint8_t *value_ptr(rt_nexus_t *n, rt_value_t *v)
1,791,933✔
1130
{
1131
   const size_t valuesz = n->width * n->size;
1,791,933✔
1132
   if (valuesz <= sizeof(rt_value_t))
1,791,933✔
1133
      return v->bytes;
1,380,965✔
1134
   else
1135
      return v->ext;
410,968✔
1136
}
1137

1138
static void copy_value_ptr(rt_nexus_t *n, rt_value_t *v, const void *p)
4,897,531✔
1139
{
1140
   const size_t valuesz = n->width * n->size;
4,897,531✔
1141
   if (valuesz <= sizeof(rt_value_t)) {
4,897,531✔
1142
#if ASAN_ENABLED
1143
      memcpy(v->bytes, p, valuesz);
4,735,548✔
1144
#else
1145
      v->qword = unaligned_load(p, uint64_t);
1146
#endif
1147
   }
1148
   else
1149
      memcpy(v->ext, p, valuesz);
161,983✔
1150
}
4,897,531✔
1151

1152
static inline bool cmp_values(rt_nexus_t *n, rt_value_t a, rt_value_t b)
36✔
1153
{
1154
   const size_t valuesz = n->width * n->size;
36✔
1155
   if (valuesz <= sizeof(rt_value_t))
36✔
1156
      return a.qword == b.qword;
36✔
1157
   else
1158
      return cmp_bytes(a.ext, b.ext, valuesz);
×
1159
}
1160

1161
static inline bool is_pseudo_source(source_kind_t kind)
2,917✔
1162
{
1163
   return kind == SOURCE_FORCING || kind == SOURCE_DEPOSIT
2,917✔
1164
      || kind == SOURCE_IMPLICIT;
2,917✔
1165
}
1166

1167
static void check_multiple_sources(rt_nexus_t *n, source_kind_t kind)
862✔
1168
{
1169
   if (n->signal->resolution != NULL || is_pseudo_source(kind))
862✔
1170
      return;
854✔
1171

1172
   diag_t *d;
8✔
1173
   if (is_signal_scope(n->signal->parent)) {
8✔
1174
      rt_scope_t *root = n->signal->parent;
1175
      for (; is_signal_scope(root->parent); root = root->parent);
8✔
1176

1177
      d = diag_new(DIAG_FATAL, tree_loc(root->where));
4✔
1178
      diag_printf(d, "element %s of signal %s has multiple sources",
8✔
1179
                  istr(tree_ident(n->signal->where)),
4✔
1180
                  istr(tree_ident(root->where)));
1181
      diag_hint(d, tree_loc(n->signal->where), "element %s declared here",
4✔
1182
                istr(tree_ident(n->signal->where)));
4✔
1183
      diag_hint(d, tree_loc(root->where), "composite signal %s declared with "
4✔
1184
                "unresolved type %s", istr(tree_ident(root->where)),
1185
                type_pp(tree_type(root->where)));
1186
   }
1187
   else {
1188
      d = diag_new(DIAG_FATAL, tree_loc(n->signal->where));
4✔
1189
      diag_printf(d, "unresolved signal %s has multiple sources",
4✔
1190
                  istr(tree_ident(n->signal->where)));
4✔
1191
      diag_hint(d, tree_loc(n->signal->where), "signal %s declared with "
8✔
1192
                "unresolved type %s", istr(tree_ident(n->signal->where)),
4✔
1193
                type_pp(tree_type(n->signal->where)));
4✔
1194
   }
1195

1196
   if (n->sources.tag == SOURCE_DRIVER) {
8✔
1197
      const rt_proc_t *p = n->sources.u.driver.proc;
4✔
1198
      diag_hint(d, tree_loc(p->where), "driven by process %s", istr(p->name));
4✔
1199
   }
1200
   else if (n->sources.tag == SOURCE_PORT) {
4✔
1201
      const rt_signal_t *s = n->sources.u.port.input->signal;
4✔
1202
      tree_t where = s->where;
4✔
1203
      if (is_signal_scope(s->parent)) {
4✔
1204
         for (rt_scope_t *it = s->parent; is_signal_scope(it); it = it->parent)
8✔
1205
            where = it->where;
4✔
1206
      }
1207

1208
      if (tree_kind(where) == T_PORT_DECL)
4✔
1209
         diag_hint(d, tree_loc(where), "connected to %s port %s",
×
1210
                   port_mode_str(tree_subkind(where)), istr(tree_ident(where)));
×
1211
      else
1212
         diag_hint(d, tree_loc(where), "connected to signal %s",
4✔
1213
                   istr(tree_ident(where)));
1214
   }
1215

1216
   if (kind == SOURCE_DRIVER) {
8✔
1217
      const rt_proc_t *p = get_active_proc();
8✔
1218
      diag_hint(d, tree_loc(p->where), "driven by process %s", istr(p->name));
8✔
1219
   }
1220

1221
   diag_emit(d);
8✔
1222
   jit_abort_with_status(EXIT_FAILURE);
8✔
1223
}
1224

1225
static rt_source_t *add_source(rt_model_t *m, rt_nexus_t *n, source_kind_t kind)
36,769✔
1226
{
1227
   rt_source_t *src = NULL;
36,769✔
1228
   if (n->n_sources == 0)
36,769✔
1229
      src = &(n->sources);
35,907✔
1230
   else {
1231
      check_multiple_sources(n, kind);
862✔
1232

1233
      rt_source_t **p;
854✔
1234
      for (p = &(n->sources.chain_input); *p; p = &((*p)->chain_input))
1,715✔
1235
         ;
1236
      *p = src = static_alloc(m, sizeof(rt_source_t));
854✔
1237
   }
1238

1239
   // The only interesting values of n_sources are 0, 1, and 2
1240
   if (n->n_sources < UINT8_MAX)
36,761✔
1241
      n->n_sources++;
36,761✔
1242

1243
   if (n->n_sources > 1)
36,761✔
1244
      n->flags &= ~NET_F_FAST_DRIVER;
854✔
1245

1246
   src->chain_input  = NULL;
36,761✔
1247
   src->chain_output = NULL;
36,761✔
1248
   src->tag          = kind;
36,761✔
1249
   src->disconnected = 0;
36,761✔
1250
   src->fastqueued   = 0;
36,761✔
1251
   src->sigqueued    = 0;
36,761✔
1252
   src->pseudoqueued = 0;
36,761✔
1253

1254
   switch (kind) {
36,761✔
1255
   case SOURCE_DRIVER:
21,298✔
1256
      {
1257
         src->u.driver.proc  = NULL;
21,298✔
1258
         src->u.driver.nexus = n;
21,298✔
1259

1260
         waveform_t *w0 = &(src->u.driver.waveforms);
21,298✔
1261
         w0->when  = TIME_HIGH;
21,298✔
1262
         w0->next  = NULL;
21,298✔
1263
      }
1264
      break;
21,298✔
1265

1266
   case SOURCE_PORT:
13,306✔
1267
      src->u.port.conv_func = NULL;
13,306✔
1268
      src->u.port.input     = NULL;
13,306✔
1269
      src->u.port.output    = n;
13,306✔
1270
      break;
13,306✔
1271

1272
   case SOURCE_DEPOSIT:
2,157✔
1273
   case SOURCE_FORCING:
1274
   case SOURCE_IMPLICIT:
1275
      src->u.pseudo.nexus = n;
2,157✔
1276
      src->u.pseudo.value = alloc_value(m, n);
2,157✔
1277
      break;
2,157✔
1278
   }
1279

1280
   return src;
36,761✔
1281
}
1282

1283
static inline int map_index(rt_index_t *index, unsigned offset)
10,320✔
1284
{
1285
   if (likely(index->how >= 0))
10,320✔
1286
      return offset >> index->how;
10,263✔
1287
   else
1288
      return offset / -(index->how);
57✔
1289
}
1290

1291
static inline int unmap_index(rt_index_t *index, unsigned key)
4,153✔
1292
{
1293
   if (likely(index->how >= 0))
4,153✔
1294
      return key << index->how;
4,137✔
1295
   else
1296
      return key * -(index->how);
16✔
1297
}
1298

1299
static inline bool index_valid(rt_index_t *index, unsigned offset)
8,012✔
1300
{
1301
   if (likely(index->how >= 0))
8,012✔
1302
      return (offset >> index->how) << index->how == offset;
7,963✔
1303
   else
1304
      return offset % -(index->how) == 0;
49✔
1305
}
1306

1307
static void build_index(rt_signal_t *signal)
288✔
1308
{
1309
   const unsigned signal_w = signal->shared.size / signal->nexus.size;
288✔
1310

1311
   int shift = INT_MAX, gcd = 0;
288✔
1312
   rt_nexus_t *n = &(signal->nexus);
288✔
1313
   for (int i = 0, offset = 0; i < signal->n_nexus;
2,596✔
1314
        i++, offset += n->width, n = n->chain) {
2,308✔
1315
      if (offset > 0) {
2,308✔
1316
         const int tzc = __builtin_ctz(offset);
2,020✔
1317
         shift = MIN(shift, tzc);
2,020✔
1318
      }
1319

1320
      // Compute greatest common divisor
1321
      for (int b = offset; b > 0;) {
6,081✔
1322
         int temp = b;
3,773✔
1323
         b = gcd % b;
3,773✔
1324
         gcd = temp;
3,773✔
1325
      }
1326
   }
1327

1328
   const int how = gcd > 1 && gcd > (1 << shift) && gcd > 1 ? -gcd : shift;
288✔
1329
   const int count =
576✔
1330
      how < 0 ? (signal_w - how - 1) / -how : (signal_w >> shift) + 1;
288✔
1331

1332
   TRACE("create index for signal %s how=%d count=%d",
288✔
1333
         istr(tree_ident(signal->where)), how, count);
1334

1335
   rt_index_t *index = xcalloc_flex(sizeof(rt_index_t), count,
288✔
1336
                                    sizeof(rt_nexus_t *));
1337
   index->how = how;
288✔
1338

1339
   n = &(signal->nexus);
288✔
1340
   for (int i = 0, offset = 0; i < signal->n_nexus;
2,596✔
1341
        i++, offset += n->width, n = n->chain) {
2,308✔
1342
      index->nexus[map_index(index, offset)] = n;
2,308✔
1343
   }
1344

1345
   free(signal->index);
288✔
1346
   signal->index = index;
288✔
1347
}
288✔
1348

1349
static void update_index(rt_signal_t *s, rt_nexus_t *n)
3,859✔
1350
{
1351
   const unsigned offset = n->offset / n->size;
3,859✔
1352

1353
   if (!index_valid(s->index, offset)) {
3,859✔
1354
      TRACE("rebuild index for %s offset=%d how=%d",
4✔
1355
            istr(tree_ident(s->where)), offset, s->index->how);
1356
      build_index(s);
4✔
1357
      assert(s->index->nexus[map_index(s->index, offset)] == n);
4✔
1358
   }
1359
   else {
1360
      const int elt = map_index(s->index, offset);
3,855✔
1361
      assert(s->index->nexus[elt] == NULL);
3,855✔
1362
      s->index->nexus[elt] = n;
3,855✔
1363
   }
1364
}
3,859✔
1365

1366
static rt_nexus_t *lookup_index(rt_signal_t *s, int *offset)
10,555✔
1367
{
1368
   if (likely(offset == 0 || s->index == NULL))
10,555✔
1369
      return &(s->nexus);
6,402✔
1370
   else if (!index_valid(s->index, *offset)) {
4,153✔
1371
      TRACE("invalid index for %s offset=%d how=%d", istr(tree_ident(s->where)),
×
1372
            *offset, s->index->how);
1373
      free(s->index);
×
1374
      s->index = NULL;
×
1375
      return &(s->nexus);
×
1376
   }
1377
   else {
1378
      const int key = map_index(s->index, *offset);
4,153✔
1379
      for (int k = key; k >= 0; k--) {
4,637✔
1380
         rt_nexus_t *n = s->index->nexus[k];
4,637✔
1381
         if (n != NULL) {
4,637✔
1382
            *offset = unmap_index(s->index, key - k);
4,153✔
1383
            return n;
4,153✔
1384
         }
1385
      }
1386
      return &(s->nexus);
×
1387
   }
1388
}
1389

1390
static waveform_t *alloc_waveform(rt_model_t *m)
72,937✔
1391
{
1392
   model_thread_t *thread = model_thread(m);
72,937✔
1393

1394
   if (thread->free_waveforms == NULL) {
72,937✔
1395
      // Ensure waveforms are always within one cache line
1396
      STATIC_ASSERT(sizeof(waveform_t) <= 32);
691✔
1397
      char *mem = static_alloc(m, WAVEFORM_CHUNK * 32);
691✔
1398
      for (int i = 1; i < WAVEFORM_CHUNK; i++)
176,896✔
1399
         free_waveform(m, (waveform_t *)(mem + i*32));
176,205✔
1400

1401
      return (waveform_t *)mem;
1402
   }
1403
   else {
1404
      waveform_t *w = thread->free_waveforms;
72,246✔
1405
      thread->free_waveforms = w->next;
72,246✔
1406
      prefetch_write(w->next);
72,246✔
1407
      w->next = NULL;
72,246✔
1408
      return w;
72,246✔
1409
   }
1410
}
1411

1412
static void add_conversion_input(rt_model_t *m, rt_conv_func_t *cf,
1,010✔
1413
                                 rt_nexus_t *in)
1414
{
1415
   if (cf->ninputs == cf->maxinputs) {
1,010✔
1416
      const size_t per_block = MEMBLOCK_ALIGN / sizeof(conv_input_t);
65✔
1417
      cf->maxinputs = ALIGN_UP(MAX(4, cf->maxinputs * 2), per_block);
65✔
1418

1419
      if (cf->inputs == cf->tail) {
65✔
1420
         void *new = xmalloc_array(cf->maxinputs, sizeof(conv_input_t));
45✔
1421
         memcpy(new, cf->inputs, cf->ninputs * sizeof(conv_input_t));
45✔
1422
         cf->inputs = new;
45✔
1423
      }
1424
      else
1425
         cf->inputs = xrealloc_array(cf->inputs, cf->maxinputs,
20✔
1426
                                     sizeof(conv_input_t));
1427
   }
1428

1429
   cf->inputs[cf->ninputs++] = (conv_input_t){
2,020✔
1430
      .nexus  = in,
1431
      .result = alloc_value(m, in),
1,010✔
1432
   };
1433
}
1,010✔
1434

1435
static rt_value_t *find_conversion_input(rt_conv_func_t *cf, rt_nexus_t *n)
64✔
1436
{
1437
   for (int i = 0; i < cf->ninputs; i++) {
64✔
1438
      if (cf->inputs[i].nexus == n)
64✔
1439
         return &(cf->inputs[i].result);
64✔
1440
   }
1441

1442
   return NULL;
1443
}
1444

1445
static void split_value(rt_nexus_t *nexus, rt_value_t *v_new,
2,505✔
1446
                        rt_value_t *v_old, int offset)
1447
{
1448
   const int split = offset * nexus->size;
2,505✔
1449
   const int oldsz = (offset + nexus->width) * nexus->size;
2,505✔
1450
   const int newsz = nexus->width * nexus->size;
2,505✔
1451

1452
   if (split > sizeof(rt_value_t) && newsz > sizeof(rt_value_t)) {
2,505✔
1453
      // Split the external memory with no copying
1454
      v_new->ext = (char *)v_old->ext + split;
124✔
1455
   }
1456
   else if (newsz > sizeof(rt_value_t)) {
2,381✔
1457
      // Wasting up to eight bytes at the start of the the old waveform
1458
      char *ext = v_old->ext;
1,664✔
1459
      v_old->qword = *(uint64_t *)ext;
1,664✔
1460
      v_new->ext = ext + split;
1,664✔
1461
   }
1462
   else if (split > sizeof(rt_value_t)) {
717✔
1463
      // Wasting up to eight bytes at the end of the the old waveform
1464
      memcpy(v_new->bytes, v_old->ext + split, newsz);
60✔
1465
   }
1466
   else if (oldsz > sizeof(rt_value_t)) {
657✔
1467
      // The memory backing this waveform is lost now but this can only
1468
      // happen a bounded number of times as nexuses only ever shrink
1469
      char *ext = v_old->ext;
116✔
1470
      memcpy(v_new->bytes, ext + split, newsz);
116✔
1471
      v_old->qword = *(uint64_t *)ext;
116✔
1472
   }
1473
   else {
1474
      // This trick with shifting probably only works on little-endian
1475
      // systems
1476
      v_new->qword = v_old->qword >> (split * 8);
541✔
1477
   }
1478
}
2,505✔
1479

1480
static void clone_source(rt_model_t *m, rt_nexus_t *nexus,
4,767✔
1481
                         rt_source_t *old, int offset)
1482
{
1483
   rt_source_t *new = add_source(m, nexus, old->tag);
4,767✔
1484

1485
   switch (old->tag) {
4,767✔
1486
   case SOURCE_PORT:
2,294✔
1487
      {
1488
         new->u.port.input = old->u.port.input;
2,294✔
1489

1490
         if (old->u.port.conv_func != NULL)
2,294✔
1491
            new->u.port.conv_func = old->u.port.conv_func;
16✔
1492
         else {
1493
            if (old->u.port.input->width == offset)
2,278✔
1494
               new->u.port.input = old->u.port.input->chain;  // Cycle breaking
2,064✔
1495
            else {
1496
               RT_LOCK(old->u.port.input->signal->lock);
214✔
1497
               rt_nexus_t *n = clone_nexus(m, old->u.port.input, offset);
214✔
1498
               new->u.port.input = n;
214✔
1499
            }
1500
            assert(new->u.port.input->width == nexus->width);
2,278✔
1501
         }
1502
      }
1503
      break;
1504

1505
   case SOURCE_DRIVER:
2,469✔
1506
      {
1507
         new->u.driver.proc = old->u.driver.proc;
2,469✔
1508

1509
         // Current transaction
1510
         waveform_t *w_new = &(new->u.driver.waveforms);
2,469✔
1511
         waveform_t *w_old = &(old->u.driver.waveforms);
2,469✔
1512
         w_new->when = w_old->when;
2,469✔
1513
         w_new->next = NULL;
2,469✔
1514

1515
         split_value(nexus, &w_new->value, &w_old->value, offset);
2,469✔
1516

1517
         // Pending fast driver update
1518
         if ((nexus->flags & NET_F_FAST_DRIVER) && old->fastqueued) {
2,469✔
1519
            rt_nexus_t *n0 = &(nexus->signal->nexus);
15✔
1520
            if (!n0->sources.sigqueued)
15✔
1521
               deferq_do(&m->delta_driverq, async_fast_driver, new);
15✔
1522
            new->fastqueued = 1;
15✔
1523
         }
1524

1525
         new->was_active = old->was_active;
2,469✔
1526

1527
         // Future transactions
1528
         for (w_old = w_old->next; w_old; w_old = w_old->next) {
2,501✔
1529
            w_new = (w_new->next = alloc_waveform(m));
32✔
1530
            w_new->when = w_old->when;
32✔
1531
            w_new->next = NULL;
32✔
1532

1533
            split_value(nexus, &w_new->value, &w_old->value, offset);
32✔
1534

1535
            assert(w_old->when >= m->now);
32✔
1536
            deltaq_insert_driver(m, w_new->when - m->now, new);
32✔
1537
         }
1538
      }
1539
      break;
1540

1541
   case SOURCE_FORCING:
4✔
1542
   case SOURCE_DEPOSIT:
1543
      split_value(nexus, &(new->u.pseudo.value), &(old->u.pseudo.value),
4✔
1544
                  offset);
1545
      assert(!old->pseudoqueued);   // TODO
4✔
1546
      break;
1547

1548
   case SOURCE_IMPLICIT:
1549
      break;
1550
   }
1551
}
4,767✔
1552

1553
static rt_nexus_t *clone_nexus(rt_model_t *m, rt_nexus_t *old, int offset)
9,090✔
1554
{
1555
   assert(offset < old->width);
9,090✔
1556

1557
   rt_signal_t *signal = old->signal;
9,090✔
1558
   MULTITHREADED_ONLY(assert_lock_held(&signal->lock));
9,090✔
1559
   signal->n_nexus++;
9,090✔
1560

1561
   if (signal->n_nexus == 2 && (old->flags & NET_F_FAST_DRIVER))
9,090✔
1562
      signal->shared.flags |= NET_F_FAST_DRIVER;
1,656✔
1563

1564
   rt_nexus_t *new = static_alloc(m, sizeof(rt_nexus_t));
9,090✔
1565
   new->width        = old->width - offset;
9,090✔
1566
   new->size         = old->size;
9,090✔
1567
   new->signal       = signal;
9,090✔
1568
   new->offset       = old->offset + offset * old->size;
9,090✔
1569
   new->chain        = old->chain;
9,090✔
1570
   new->flags        = old->flags;
9,090✔
1571
   new->active_delta = old->active_delta;
9,090✔
1572
   new->event_delta  = old->event_delta;
9,090✔
1573
   new->last_event   = old->last_event;
9,090✔
1574
   new->rank         = old->rank;
9,090✔
1575

1576
   old->chain = new;
9,090✔
1577
   old->width = offset;
9,090✔
1578

1579
   if (old->pending == NULL)
9,090✔
1580
      new->pending = NULL;
7,710✔
1581
   else if (pointer_tag(old->pending) == 1)
1,380✔
1582
      new->pending = old->pending;
1,368✔
1583
   else {
1584
      rt_pending_t *old_p = untag_pointer(old->pending, rt_pending_t);
12✔
1585
      rt_pending_t *new_p = xmalloc_flex(sizeof(rt_pending_t), old_p->count,
12✔
1586
                                         sizeof(rt_wakeable_t *));
1587

1588
      new_p->count = new_p->max = old_p->count;
12✔
1589

1590
      for (int i = 0; i < old_p->count; i++)
68✔
1591
         new_p->wake[i] = old_p->wake[i];
56✔
1592

1593
      new->pending = tag_pointer(new_p, 0);
12✔
1594
   }
1595

1596
   if (new->chain == NULL)
9,090✔
1597
      m->nexus_tail = &(new->chain);
1,720✔
1598

1599
   if (old->n_sources > 0) {
9,090✔
1600
      for (rt_source_t *it = &(old->sources); it; it = it->chain_input)
9,325✔
1601
         clone_source(m, new, it, offset);
4,767✔
1602
   }
1603

1604
   for (rt_source_t *old_o = old->outputs; old_o; old_o = old_o->chain_output) {
11,404✔
1605
      assert(old_o->tag == SOURCE_PORT || old_o->tag == SOURCE_IMPLICIT);
2,314✔
1606

1607
      if (old_o->tag == SOURCE_PORT && old_o->u.port.conv_func != NULL) {
2,314✔
1608
         new->outputs = old_o;
32✔
1609
         add_conversion_input(m, old_o->u.port.conv_func, new);
32✔
1610
      }
1611
      else {
1612
         rt_nexus_t *out_n;
2,282✔
1613
         if (old_o->tag == SOURCE_IMPLICIT)
2,282✔
1614
            out_n = old_o->u.port.output;
4✔
1615
         else if (old_o->u.port.output->width == offset)
2,278✔
1616
            out_n = old_o->u.port.output->chain;   // Cycle breaking
214✔
1617
         else {
1618
            RT_LOCK(old_o->u.port.output->signal->lock);
2,064✔
1619
            out_n = clone_nexus(m, old_o->u.port.output, offset);
2,064✔
1620
         }
1621

1622
         for (rt_source_t *s = &(out_n->sources); s; s = s->chain_input) {
3,075✔
1623
            if (s->tag != old_o->tag)
3,075✔
1624
               continue;
4✔
1625
            else if (s->u.port.input == new || s->u.port.input == old) {
3,071✔
1626
               s->u.port.input = new;
2,282✔
1627
               s->chain_output = new->outputs;
2,282✔
1628
               new->outputs = s;
2,282✔
1629
               break;
2,282✔
1630
            }
1631
         }
1632
      }
1633
   }
1634

1635
   if (signal->index == NULL && signal->n_nexus >= NEXUS_INDEX_MIN)
9,090✔
1636
      build_index(signal);
284✔
1637
   else if (signal->index != NULL)
8,806✔
1638
      update_index(signal, new);
3,859✔
1639

1640
   return new;
9,090✔
1641
}
1642

1643
static rt_nexus_t *split_nexus_slow(rt_model_t *m, rt_signal_t *s,
10,555✔
1644
                                    int offset, int count)
1645
{
1646
   rt_nexus_t *result = NULL;
10,555✔
1647
   for (rt_nexus_t *it = lookup_index(s, &offset); count > 0; it = it->chain) {
31,368✔
1648
      if (offset >= it->width) {
20,813✔
1649
         offset -= it->width;
8,474✔
1650
         continue;
8,474✔
1651
      }
1652
      else if (offset > 0) {
12,339✔
1653
         clone_nexus(m, it, offset);
1,681✔
1654
         offset = 0;
1,681✔
1655
         continue;
1,681✔
1656
      }
1657
      else {
1658
         if (it->width > count)
10,658✔
1659
            clone_nexus(m, it, count);
5,121✔
1660

1661
         count -= it->width;
10,658✔
1662

1663
         if (result == NULL)
10,658✔
1664
            result = it;
10,551✔
1665
      }
1666
   }
1667

1668
   return result;
10,555✔
1669
}
1670

1671
static inline rt_nexus_t *split_nexus(rt_model_t *m, rt_signal_t *s,
5,139,814✔
1672
                                      int offset, int count)
1673
{
1674
   MULTITHREADED_ONLY(assert_lock_held(&s->lock));
5,139,814✔
1675

1676
   rt_nexus_t *n0 = &(s->nexus);
5,139,814✔
1677
   if (likely(offset == 0 && n0->width == count))
5,139,814✔
1678
      return n0;
1679
   else if (offset == 0 && count == s->shared.size / n0->size)
11,213✔
1680
      return n0;
1681

1682
   return split_nexus_slow(m, s, offset, count);
10,555✔
1683
}
1684

1685
static void setup_signal(rt_model_t *m, rt_signal_t *s, tree_t where,
44,869✔
1686
                         unsigned count, unsigned size, sig_flags_t flags,
1687
                         unsigned offset)
1688
{
1689
   rt_scope_t *parent = model_thread(m)->active_scope;
44,869✔
1690

1691
   s->where   = where;
44,869✔
1692
   s->n_nexus = 1;
44,869✔
1693
   s->offset  = offset;
44,869✔
1694
   s->parent  = parent;
44,869✔
1695

1696
   s->shared.flags = flags;
44,869✔
1697
   s->shared.size  = count * size;
44,869✔
1698

1699
   APUSH(parent->signals, s);
44,869✔
1700

1701
   s->nexus.width        = count;
44,869✔
1702
   s->nexus.size         = size;
44,869✔
1703
   s->nexus.n_sources    = 0;
44,869✔
1704
   s->nexus.offset       = 0;
44,869✔
1705
   s->nexus.flags        = flags | NET_F_FAST_DRIVER | NET_F_HAS_INITIAL;
44,869✔
1706
   s->nexus.signal       = s;
44,869✔
1707
   s->nexus.pending      = NULL;
44,869✔
1708
   s->nexus.active_delta = DELTA_CYCLE_MAX;
44,869✔
1709
   s->nexus.event_delta  = DELTA_CYCLE_MAX;
44,869✔
1710
   s->nexus.last_event   = TIME_HIGH;
44,869✔
1711

1712
   *m->nexus_tail = &(s->nexus);
44,869✔
1713
   m->nexus_tail = &(s->nexus.chain);
44,869✔
1714

1715
   m->n_signals++;
44,869✔
1716
}
44,869✔
1717

1718
static void copy_sub_signal_sources(rt_scope_t *scope, void *buf, int stride)
880✔
1719
{
1720
   assert(is_signal_scope(scope));
880✔
1721

1722
   for (int i = 0; i < scope->signals.count; i++) {
3,076✔
1723
      rt_signal_t *s = scope->signals.items[i];
2,196✔
1724
      rt_nexus_t *n = &(s->nexus);
2,196✔
1725
      for (unsigned i = 0; i < s->n_nexus; i++) {
4,440✔
1726
         unsigned o = 0;
2,244✔
1727
         for (rt_source_t *src = &(n->sources); src; src = src->chain_input) {
6,908✔
1728
            const void *data = source_value(n, src);
4,664✔
1729
            if (data == NULL)
4,664✔
1730
               continue;
×
1731

1732
            memcpy(buf + s->offset + (o++ * stride), data, n->size * n->width);
4,664✔
1733
         }
1734
      }
1735
   }
1736

1737
   for (int i = 0; i < scope->children.count; i++)
1,424✔
1738
      copy_sub_signal_sources(scope->children.items[i], buf, stride);
544✔
1739
}
880✔
1740

1741
static void convert_driving(rt_conv_func_t *cf)
2,192✔
1742
{
1743
   rt_model_t *m = get_model();
2,192✔
1744

1745
   if (cf->effective.handle == JIT_HANDLE_INVALID) {
2,192✔
1746
      // Ensure effective value is only updated once per cycle
1747
      if (cf->when == m->now && cf->iteration == m->iteration)
1,616✔
1748
         return;
716✔
1749

1750
      cf->when = m->now;
900✔
1751
      cf->iteration = m->iteration;
900✔
1752
   }
1753

1754
   TRACE("call driving conversion function %s",
1,476✔
1755
         istr(jit_get_name(m->jit, cf->driving.handle)));
1756

1757
   model_thread_t *thread = model_thread(m);
1,476✔
1758

1759
   jit_scalar_t context = { .pointer = cf->driving.context };
1,476✔
1760
   jit_scalar_t arg = { .pointer = cf }, result;
1,476✔
1761
   if (!jit_fastcall(m->jit, cf->driving.handle, &result, context, arg,
1,476✔
1762
                     thread->tlab))
1763
      m->force_stop = true;
12✔
1764

1765
   tlab_reset(thread->tlab);   // No allocations can be live past here
1,476✔
1766
}
1767

1768
static void convert_effective(rt_conv_func_t *cf)
32✔
1769
{
1770
   rt_model_t *m = get_model();
32✔
1771

1772
   // Ensure effective value is only updated once per cycle
1773
   if (cf->when == m->now && cf->iteration == m->iteration)
32✔
1774
      return;
×
1775

1776
   cf->when = m->now;
32✔
1777
   cf->iteration = m->iteration;
32✔
1778

1779
   TRACE("call effective conversion function %s",
32✔
1780
         istr(jit_get_name(m->jit, cf->effective.handle)));
1781

1782
   model_thread_t *thread = model_thread(m);
32✔
1783

1784
   jit_scalar_t context = { .pointer = cf->effective.context };
32✔
1785
   jit_scalar_t arg = { .pointer = cf }, result;
32✔
1786
   if (!jit_fastcall(m->jit, cf->effective.handle, &result, context, arg,
32✔
1787
                     thread->tlab))
1788
      m->force_stop = true;
×
1789

1790
   tlab_reset(thread->tlab);   // No allocations can be live past here
32✔
1791
}
1792

1793
static void *source_value(rt_nexus_t *nexus, rt_source_t *src)
444,177✔
1794
{
1795
   switch (src->tag) {
444,177✔
1796
   case SOURCE_DRIVER:
308,912✔
1797
      if (unlikely(src->disconnected))
308,912✔
1798
         return NULL;
1799
      else
1800
         return value_ptr(nexus, &(src->u.driver.waveforms.value));
308,912✔
1801

1802
   case SOURCE_PORT:
135,265✔
1803
      if (likely(src->u.port.conv_func == NULL)) {
135,265✔
1804
         if (src->u.port.input->flags & NET_F_EFFECTIVE)
134,021✔
1805
            return nexus_driving(src->u.port.input);
692✔
1806
         else
1807
            return nexus_effective(src->u.port.input);
133,329✔
1808
      }
1809
      else {
1810
         convert_driving(src->u.port.conv_func);
1,244✔
1811
         return value_ptr(nexus, &src->u.port.conv_result);
1,244✔
1812
      }
1813

1814
   case SOURCE_FORCING:
×
1815
   case SOURCE_DEPOSIT:
1816
      assert(src->disconnected);
×
1817
      return NULL;
1818

1819
   case SOURCE_IMPLICIT:
1820
      return NULL;
1821
   }
1822

1823
   return NULL;
1824
}
1825

1826
static void call_resolution(rt_model_t *m, rt_nexus_t *n, res_memo_t *r,
431,665✔
1827
                            int nonnull, rt_source_t *s0)
1828
{
1829
   if ((n->flags & NET_F_R_IDENT) && nonnull == 1) {
431,665✔
1830
      // Resolution function behaves like identity for a single driver
1831
      put_driving(m, n, source_value(n, s0));
429,725✔
1832
   }
1833
   else if ((r->flags & R_MEMO) && nonnull == 1) {
1,940✔
1834
      // Resolution function has been memoised so do a table lookup
1835

1836
      model_thread_t *thread = model_thread(m);
4✔
1837
      assert(thread->tlab != NULL);
4✔
1838

1839
      void *resolved = tlab_alloc(thread->tlab, n->width * n->size);
4✔
1840
      char *p0 = source_value(n, s0);
4✔
1841

1842
      for (int j = 0; j < n->width; j++) {
8✔
1843
         const int index = ((uint8_t *)p0)[j];
4✔
1844
         ((int8_t *)resolved)[j] = r->tab1[index];
4✔
1845
      }
1846

1847
      put_driving(m, n, resolved);
4✔
1848
      tlab_reset(thread->tlab);   // No allocations can be live past here
4✔
1849
   }
1850
   else if ((r->flags & R_MEMO) && nonnull == 2) {
1,936✔
1851
      // Resolution function has been memoised so do a table lookup
1852

1853
      model_thread_t *thread = model_thread(m);
384✔
1854
      assert(thread->tlab != NULL);
384✔
1855

1856
      void *resolved = tlab_alloc(thread->tlab, n->width * n->size);
384✔
1857

1858
      char *p0 = source_value(n, s0), *p1 = NULL;
384✔
1859
      for (rt_source_t *s1 = s0->chain_input;
384✔
1860
           s1 && (p1 = source_value(n, s1)) == NULL;
384✔
1861
           s1 = s1->chain_input)
×
1862
         ;
1863

1864
      for (int j = 0; j < n->width; j++)
1,176✔
1865
         ((int8_t *)resolved)[j] = r->tab2[(int)p0[j]][(int)p1[j]];
792✔
1866

1867
      put_driving(m, n, resolved);
384✔
1868
      tlab_reset(thread->tlab);   // No allocations can be live past here
384✔
1869
   }
1870
   else if (r->flags & R_COMPOSITE) {
1,552✔
1871
      // Call resolution function of composite type
1872

1873
      rt_scope_t *scope = n->signal->parent, *rscope = scope;
336✔
1874
      while (is_signal_scope(scope->parent)) {
496✔
1875
         scope = scope->parent;
160✔
1876
         if (scope->flags & SCOPE_F_RESOLVED)
160✔
1877
            rscope = scope;
48✔
1878
      }
1879

1880
      TRACE("resolved composite signal needs %d bytes", scope->size);
336✔
1881

1882
      model_thread_t *thread = model_thread(m);
336✔
1883
      assert(thread->tlab != NULL);
336✔
1884

1885
      uint8_t *inputs = tlab_alloc(thread->tlab, nonnull * scope->size);
336✔
1886
      copy_sub_signal_sources(scope, inputs, scope->size);
336✔
1887

1888
      jit_scalar_t result;
336✔
1889
      if (jit_try_call(m->jit, r->closure.handle, &result,
336✔
1890
                       r->closure.context, inputs, nonnull))
1891
         put_driving(m, n, result.pointer + n->signal->offset
328✔
1892
                     + n->offset - rscope->offset);
328✔
1893
      else
1894
         m->force_stop = true;
8✔
1895

1896
      tlab_reset(thread->tlab);   // No allocations can be live past here
336✔
1897
   }
1898
   else {
1899
      model_thread_t *thread = model_thread(m);
1,216✔
1900
      assert(thread->tlab != NULL);
1,216✔
1901

1902
      void *resolved = tlab_alloc(thread->tlab, n->width * n->size);
1,216✔
1903

1904
      for (int j = 0; j < n->width; j++) {
4,484✔
1905
#define CALL_RESOLUTION_FN(type) do {                                   \
1906
            type vals[nonnull];                                         \
1907
            unsigned o = 0;                                             \
1908
            for (rt_source_t *s = s0; s; s = s->chain_input) {          \
1909
               const void *data = source_value(n, s);                   \
1910
               if (data != NULL)                                        \
1911
                  vals[o++] = ((const type *)data)[j];                  \
1912
            }                                                           \
1913
            assert(o == nonnull);                                       \
1914
            type *p = (type *)resolved;                                 \
1915
            jit_scalar_t result;                                        \
1916
            if (!jit_try_call(m->jit, r->closure.handle, &result,       \
1917
                              r->closure.context, vals, nonnull))       \
1918
               m->force_stop = true;                                    \
1919
            p[j] = result.integer;                                      \
1920
         } while (0)
1921

1922
         FOR_ALL_SIZES(n->size, CALL_RESOLUTION_FN);
12,284✔
1923
      }
1924

1925
      put_driving(m, n, resolved);
1,216✔
1926
      tlab_reset(thread->tlab);   // No allocations can be live past here
1,216✔
1927
   }
1928
}
431,665✔
1929

1930
static rt_source_t *get_pseudo_source(rt_model_t *m, rt_nexus_t *n,
2,726✔
1931
                                      source_kind_t kind)
1932
{
1933
   assert(is_pseudo_source(kind));
2,726✔
1934

1935
   if (n->n_sources > 0) {
2,726✔
1936
      for (rt_source_t *s = &(n->sources); s; s = s->chain_input) {
832✔
1937
         if (s->tag == kind)
777✔
1938
            return s;
683✔
1939
      }
1940
   }
1941

1942
   return add_source(m, n, kind);
2,043✔
1943
}
1944

1945
__attribute__((cold, noinline))
1946
static void schedule_implicit_update(rt_model_t *m, rt_nexus_t *n)
360✔
1947
{
1948
   rt_implicit_t *imp = container_of(n->signal, rt_implicit_t, signal);
360✔
1949

1950
   if (!imp->wakeable.pending) {
360✔
1951
      deferq_do(&m->implicitq, async_update_implicit_signal, imp);
280✔
1952
      set_pending(&imp->wakeable);
280✔
1953
   }
1954
}
360✔
1955

1956
static void calculate_driving_value(rt_model_t *m, rt_nexus_t *n)
970,818✔
1957
{
1958
   // Algorithm for driving values is in LRM 08 section 14.7.3.2
1959

1960
   // If S has no source, then the driving value of S is given by the
1961
   // default value associated with S
1962
   if (n->n_sources == 0) {
970,818✔
1963
      put_driving(m, n, nexus_initial(n));
10,416✔
1964
      return;
10,416✔
1965
   }
1966

1967
   res_memo_t *r = n->signal->resolution;
960,402✔
1968

1969
   int nonnull = 0;
960,402✔
1970
   rt_source_t *s0 = NULL;
960,402✔
1971
   for (rt_source_t *s = &(n->sources); s; s = s->chain_input) {
1,919,927✔
1972
      if (s->disconnected)
962,552✔
1973
         continue;
115✔
1974
      else if (s->tag == SOURCE_FORCING) {
962,437✔
1975
         // If S is driving-value forced, the driving value of S is
1976
         // unchanged from its previous value; no further steps are
1977
         // required.
1978
         put_driving(m, n, value_ptr(n, &(s->u.pseudo.value)));
219✔
1979
         return;
219✔
1980
      }
1981
      else if (s->tag == SOURCE_DEPOSIT) {
962,218✔
1982
         // If a driving-value deposit is scheduled for S or for a
1983
         // signal of which S is a subelement, the driving value of S is
1984
         // the driving deposit value for S or the element of the
1985
         // driving deposit value for the signal of which S is a
1986
         // subelement, as appropriate.
1987
         s->disconnected = 1;
2,448✔
1988
         put_driving(m, n, value_ptr(n, &(s->u.pseudo.value)));
2,448✔
1989
         return;
2,448✔
1990
      }
1991
      else if (unlikely(s->tag == SOURCE_IMPLICIT)) {
959,770✔
1992
         // At least one of the inputs is active so schedule an update
1993
         // to the value of an implicit 'TRANSACTION or 'QUIET signal
1994
         schedule_implicit_update(m, n);
360✔
1995
         return;
360✔
1996
      }
1997
      else if (s0 == NULL)
959,410✔
1998
         s0 = s;
957,658✔
1999
      nonnull++;
959,410✔
2000
   }
2001

2002
   if (unlikely(s0 == NULL)) {
957,375✔
2003
      // If S is of signal kind register and all the sources of S have
2004
      // values determined by the null transaction, then the driving
2005
      // value of S is unchanged from its previous value.
2006
      if (n->signal->shared.flags & SIG_F_REGISTER)
64✔
2007
         put_driving(m, n, nexus_effective(n));
4✔
2008
      else if (r == NULL || is_pseudo_source(n->sources.tag))
60✔
2009
         put_driving(m, n, nexus_initial(n));
16✔
2010
      else
2011
         call_resolution(m, n, r, nonnull, s0);
44✔
2012
   }
2013
   else if (r == NULL) {
957,311✔
2014
      switch (s0->tag) {
525,690✔
2015
      case SOURCE_DRIVER:
508,680✔
2016
         // If S has one source that is a driver and S is not a resolved
2017
         // signal, then the driving value of S is the current value of
2018
         // that driver.
2019
         assert(!s0->disconnected);
508,680✔
2020
         put_driving(m, n, value_ptr(n, &(s0->u.driver.waveforms.value)));
508,680✔
2021
         break;
508,680✔
2022

2023
      case SOURCE_PORT:
17,010✔
2024
         // If S has one source that is a port and S is not a resolved
2025
         // signal, then the driving value of S is the driving value of
2026
         // the formal part of the association element that associates S
2027
         // with that port
2028
         if (likely(s0->u.port.conv_func == NULL)) {
17,010✔
2029
            if (s0->u.port.input->flags & NET_F_EFFECTIVE)
16,062✔
2030
               put_driving(m, n, nexus_driving(s0->u.port.input));
105✔
2031
            else
2032
               put_driving(m, n, nexus_effective(s0->u.port.input));
15,957✔
2033
         }
2034
         else {
2035
            convert_driving(s0->u.port.conv_func);
948✔
2036
            put_driving(m, n, value_ptr(n, &(s0->u.port.conv_result)));
948✔
2037
         }
2038
         break;
2039

2040
      default:
2041
         break;
2042
      }
2043
   }
2044
   else {
2045
      // Otherwise, the driving value of S is obtained by executing the
2046
      // resolution function associated with S
2047
      call_resolution(m, n, r, nonnull, s0);
431,621✔
2048
   }
2049
}
2050

2051
static void calculate_effective_value(rt_model_t *m, rt_nexus_t *n)
2,630✔
2052
{
2053
   // Algorithm for effective values is in LRM 08 section 14.7.7.3
2054

2055
   // If S is a connected port of mode in or inout, then the effective
2056
   // value of S is the same as the effective value of the actual part
2057
   // of the association element that associates an actual with S
2058
   if (n->flags & NET_F_INOUT) {
2,630✔
2059
      for (rt_source_t *s = n->outputs; s; s = s->chain_output) {
1,977✔
2060
         if (s->tag == SOURCE_PORT) {
825✔
2061
            if (likely(s->u.port.conv_func == NULL))
825✔
2062
               put_effective(m, n, nexus_effective(s->u.port.output));
793✔
2063
            else {
2064
               rt_value_t *v = find_conversion_input(s->u.port.conv_func, n);
32✔
2065
               assert(v != NULL);
32✔
2066

2067
               convert_effective(s->u.port.conv_func);
32✔
2068
               put_effective(m, n, value_ptr(n, v));
32✔
2069
            }
2070
            return;
825✔
2071
         }
2072
      }
2073
   }
2074

2075
   // If S is a signal declared by a signal declaration, a port of mode
2076
   // out or buffer, or an unconnected port of mode inout, then the
2077
   // effective value of S is the same as the driving value of S.
2078
   if (n->flags & NET_F_EFFECTIVE)
1,805✔
2079
      put_effective(m, n, nexus_driving(n));
1,805✔
2080

2081
   // If S is an unconnected port of mode in, the effective value of S
2082
   // is given by the default value associated with S.
2083
}
2084

2085
static void calculate_initial_value(rt_model_t *m, rt_nexus_t *n)
39,033✔
2086
{
2087
   calculate_driving_value(m, n);
39,033✔
2088

2089
   if (n->flags & NET_F_EFFECTIVE) {
39,033✔
2090
      // Driving and effective values must be calculated separately
2091
      assert(n->flags & NET_F_PENDING);
1,142✔
2092
   }
2093
   else {
2094
      // Effective value is always the same as the driving value
2095
      memcpy(nexus_last_value(n), nexus_effective(n), n->size * n->width);
37,891✔
2096
   }
2097
}
39,033✔
2098

2099
static int nexus_rank(rt_nexus_t *n)
50,109✔
2100
{
2101
   if (n->rank > 0)
50,109✔
2102
      return n->rank;   // Already calculated
4,400✔
2103
   else if (n->n_sources > 0) {
45,709✔
2104
      int rank = 0;
33,985✔
2105
      for (rt_source_t *s = &(n->sources); s; s = s->chain_input) {
68,746✔
2106
         if (s->tag != SOURCE_PORT)
34,761✔
2107
            continue;
24,321✔
2108
         else if (s->u.port.conv_func != NULL) {
10,440✔
2109
            rt_conv_func_t *cf = s->u.port.conv_func;
2110
            for (int i = 0; i < cf->ninputs; i++)
2,548✔
2111
               rank = MAX(rank, nexus_rank(cf->inputs[i].nexus) + 1);
1,592✔
2112
         }
2113
         else
2114
            rank = MAX(rank, nexus_rank(s->u.port.input) + 1);
9,484✔
2115
      }
2116
      return (n->rank = rank);
33,985✔
2117
   }
2118
   else
2119
      return 0;
2120
}
2121

2122
cover_data_t *get_coverage(rt_model_t *m)
988✔
2123
{
2124
   return m->cover;
988✔
2125
}
2126

2127
#if TRACE_SIGNALS
2128
static void dump_one_signal(rt_model_t *m, rt_scope_t *scope, rt_signal_t *s,
×
2129
                            tree_t alias)
2130
{
2131
   rt_nexus_t *n = &(s->nexus);
×
2132

2133
   LOCAL_TEXT_BUF tb = tb_new();
×
2134
   if (is_signal_scope(scope))
×
2135
      tb_printf(tb, "%s.", istr(scope->name));
×
2136
   tb_cat(tb, istr(tree_ident(alias ?: s->where)));
×
2137
   if (alias != NULL)
×
2138
      tb_append(tb, '*');
×
2139

2140
   for (int nth = 0; nth < s->n_nexus; nth++, n = n->chain) {
×
2141
      int n_outputs = 0;
×
2142
      for (rt_source_t *s = n->outputs; s != NULL; s = s->chain_output)
×
2143
         n_outputs++;
×
2144

2145
      const void *driving = NULL;
×
2146
      if (n->flags & NET_F_EFFECTIVE)
×
2147
         driving = nexus_driving(n);
×
2148

2149
      fprintf(stderr, "%-20s %-5d %-4d %-7d %-7d %-4d ",
×
2150
              nth == 0 ? tb_get(tb) : "+",
×
2151
              n->width, n->size, n->n_sources, n_outputs, n->rank);
×
2152

2153
      if (n->event_delta == m->iteration && n->last_event == m->now)
×
2154
         fprintf(stderr, "%s -> ", fmt_nexus(n, nexus_last_value(n)));
×
2155

2156
      fputs(fmt_nexus(n, nexus_effective(n)), stderr);
×
2157

2158
      if (driving != NULL)
×
2159
         fprintf(stderr, " (%s)", fmt_nexus(n, driving));
×
2160

2161
      fputs("\n", stderr);
×
2162
   }
2163
}
×
2164

2165
static void dump_signals(rt_model_t *m, rt_scope_t *scope)
×
2166
{
2167
   if (scope->signals.count == 0 && scope->children.count == 0)
×
2168
      return;
2169

2170
   if (!is_signal_scope(scope)) {
×
2171
      const char *sname = istr(scope->name);
×
2172
      fprintf(stderr, "== %s ", sname);
×
2173
      for (int pad = 74 - strlen(sname); pad > 0; pad--)
×
2174
         fputc('=', stderr);
×
2175
      fputc('\n', stderr);
×
2176

2177
      fprintf(stderr, "%-20s %5s %4s %7s %7s %4s %s\n",
×
2178
              "Signal", "Width", "Size", "Sources", "Outputs", "Rank", "Value");
2179
   }
2180

2181
   for (int i = 0; i < scope->signals.count; i++)
×
2182
      dump_one_signal(m, scope, scope->signals.items[i], NULL);
×
2183

2184
   for (int i = 0; i < scope->aliases.count; i++) {
×
2185
      rt_alias_t *a = scope->aliases.items[i];
×
2186
      dump_one_signal(m, scope, a->signal, a->where);
×
2187
   }
2188

2189
   for (int i = 0; i < scope->children.count; i++) {
×
2190
      rt_scope_t *c = scope->children.items[i];
×
2191
      if (is_signal_scope(c))
×
2192
         dump_signals(m, c);
×
2193
   }
2194

2195
   for (int i = 0; i < scope->children.count; i++) {
×
2196
      rt_scope_t *c = scope->children.items[i];
×
2197
      if (!is_signal_scope(c))
×
2198
         dump_signals(m, c);
×
2199
   }
2200
}
2201
#endif   // TRACE_SIGNALS
2202

2203
static text_buf_t *signal_full_name(rt_signal_t *s)
32✔
2204
{
2205
   text_buf_t *tb = tb_new();
32✔
2206
   if (is_signal_scope(s->parent))
32✔
2207
      tb_printf(tb, "%s.", istr(s->parent->name));
16✔
2208
   tb_cat(tb, istr(tree_ident(s->where)));
32✔
2209
   return tb;
32✔
2210
}
2211

2212
static void check_undriven_std_logic(rt_nexus_t *n)
39,033✔
2213
{
2214
   // Print a warning if any STD_LOGIC signal has multiple sources one
2215
   // of which is an undriven port with initial value 'U'. The resolved
2216
   // value will then always be 'U' which often confuses users.
2217

2218
   if (n->n_sources < 2 || !(n->signal->shared.flags & SIG_F_STD_LOGIC))
39,033✔
2219
      return;
39,017✔
2220

2221
   rt_signal_t *undriven = NULL;
132✔
2222
   for (rt_source_t *s = &(n->sources); s; s = s->chain_input) {
412✔
2223
      if (s->tag == SOURCE_PORT && s->u.port.conv_func == NULL) {
280✔
2224
         rt_nexus_t *input = s->u.port.input;
112✔
2225
         if (input->n_sources == 0) {
112✔
2226
            const unsigned char *init = nexus_effective(input), *p = init;
28✔
2227
            for (; *p == 0 && p < init + input->width; p++);
52✔
2228

2229
            if (p == init + input->width)
28✔
2230
               undriven = s->u.port.input->signal;
16✔
2231
         }
2232
      }
2233
   }
2234

2235
   if (undriven == NULL)
132✔
2236
      return;
2237

2238
   LOCAL_TEXT_BUF sig_name = signal_full_name(n->signal);
32✔
2239
   LOCAL_TEXT_BUF port_name = signal_full_name(undriven);
16✔
2240

2241
   const loc_t *sig_loc = tree_loc(n->signal->where);
16✔
2242
   rt_scope_t *sig_scope = n->signal->parent;
16✔
2243
   for (; is_signal_scope(sig_scope); sig_scope = sig_scope->parent)
28✔
2244
      sig_loc = tree_loc(sig_scope->where);
12✔
2245

2246
   const loc_t *port_loc = tree_loc(undriven->where);
16✔
2247
   rt_scope_t *port_scope = undriven->parent;
16✔
2248
   for (; is_signal_scope(port_scope); port_scope = port_scope->parent)
28✔
2249
      port_loc = tree_loc(port_scope->where);
12✔
2250

2251
   diag_t *d = diag_new(DIAG_WARN, sig_loc);
16✔
2252
   diag_printf(d, "%ssignal %s has %d sources including port %s which has "
32✔
2253
               "initial value 'U' and no driver in instance %s",
2254
               n->signal->n_nexus > 1 ? "sub-element of " : "",
16✔
2255
               tb_get(sig_name), n->n_sources,
16✔
2256
               tb_get(port_name), istr(tree_ident(port_scope->where)));
2257
   diag_hint(d, sig_loc, "signal %s declared here", tb_get(sig_name));
16✔
2258
   diag_hint(d, port_loc, "sourced by port %s which always contributes 'U'",
16✔
2259
             tb_get(port_name));
2260
   diag_hint(d, NULL, "the resolved value will always be 'U' which was almost "
16✔
2261
             "certainly not intended");
2262
   diag_emit(d);
16✔
2263

2264
   // Prevent multiple warnings for the same signal
2265
   n->signal->shared.flags &= ~SIG_F_STD_LOGIC;
16✔
2266
}
2267

2268
static void create_processes(rt_model_t *m, rt_scope_t *s)
16,830✔
2269
{
2270
   for (int i = 0; i < s->children.count; i++) {
30,825✔
2271
      if (s->children.items[i]->kind == SCOPE_INSTANCE)
13,995✔
2272
         create_processes(m, s->children.items[i]);
12,239✔
2273
   }
2274

2275
   if (s->kind != SCOPE_INSTANCE)
16,830✔
2276
      return;
4,591✔
2277

2278
   tree_t hier = tree_decl(s->where, 0);
12,239✔
2279
   assert(tree_kind(hier) == T_HIER);
12,239✔
2280

2281
   LOCAL_TEXT_BUF tb = tb_new();
24,478✔
2282
   get_path_name(s, tb);
12,239✔
2283

2284
   ident_t path = ident_new(tb_get(tb));
12,239✔
2285
   ident_t sym_prefix = tree_ident2(hier);
12,239✔
2286

2287
   const int nstmts = tree_stmts(s->where);
12,239✔
2288
   for (int i = 0; i < nstmts; i++) {
32,249✔
2289
      tree_t t = tree_stmt(s->where, i);
20,010✔
2290
      switch (tree_kind(t)) {
20,010✔
2291
      case T_VERILOG:
368✔
2292
         {
2293
            ident_t name = tree_ident(t);
368✔
2294
            ident_t sym = ident_prefix(sym_prefix, name, '.');
368✔
2295

2296
            rt_proc_t *p = xcalloc(sizeof(rt_proc_t));
368✔
2297
            p->where     = t;
368✔
2298
            p->name      = ident_prefix(path, ident_downcase(name), ':');
368✔
2299
            p->handle    = jit_lazy_compile(m->jit, sym);
368✔
2300
            p->scope     = s;
368✔
2301
            p->privdata  = mptr_new(m->mspace, "process privdata");
368✔
2302

2303
            p->wakeable.kind      = W_PROC;
368✔
2304
            p->wakeable.pending   = false;
368✔
2305
            p->wakeable.postponed = false;
368✔
2306
            p->wakeable.delayed   = false;
368✔
2307

2308
            APUSH(s->procs, p);
368✔
2309
         }
2310
         break;
368✔
2311

2312
      case T_PROCESS:
11,698✔
2313
         {
2314
            ident_t name = tree_ident(t);
11,698✔
2315
            ident_t sym = ident_prefix(sym_prefix, name, '.');
11,698✔
2316

2317
            rt_proc_t *p = xcalloc(sizeof(rt_proc_t));
11,698✔
2318
            p->where     = t;
11,698✔
2319
            p->name      = ident_prefix(path, ident_downcase(name), ':');
11,698✔
2320
            p->handle    = jit_lazy_compile(m->jit, sym);
11,698✔
2321
            p->scope     = s;
11,698✔
2322
            p->privdata  = mptr_new(m->mspace, "process privdata");
11,698✔
2323

2324
            p->wakeable.kind      = W_PROC;
11,698✔
2325
            p->wakeable.pending   = false;
11,698✔
2326
            p->wakeable.postponed = !!(tree_flags(t) & TREE_F_POSTPONED);
11,698✔
2327
            p->wakeable.delayed   = false;
11,698✔
2328

2329
            APUSH(s->procs, p);
11,698✔
2330
         }
2331
         break;
11,698✔
2332

2333
      case T_PSL_DIRECT:
296✔
2334
         {
2335
            psl_node_t psl = tree_psl(t);
296✔
2336

2337
            const psl_kind_t kind = psl_kind(psl);
296✔
2338
            if (kind != P_ASSERT && kind != P_COVER)
296✔
UNCOV
2339
               continue;
×
2340

2341
            ident_t name = tree_ident(t);
296✔
2342
            ident_t sym = ident_prefix(s->name, name, '.');
296✔
2343

2344
            rt_prop_t *p = xcalloc(sizeof(rt_prop_t));
296✔
2345
            p->where    = tree_psl(t);
296✔
2346
            p->handle   = jit_lazy_compile(m->jit, sym);
296✔
2347
            p->scope    = s;
296✔
2348
            p->name     = sym;
296✔
2349
            p->privdata = mptr_new(m->mspace, "property privdata");
296✔
2350

2351
            p->wakeable.kind      = W_PROPERTY;
296✔
2352
            p->wakeable.pending   = false;
296✔
2353
            p->wakeable.postponed = false;
296✔
2354
            p->wakeable.delayed   = false;
296✔
2355

2356
            APUSH(s->properties, p);
296✔
2357
         }
2358
         break;
296✔
2359

2360
      default:
2361
         break;
2362
      }
2363
   }
2364
}
2365

2366
void model_reset(rt_model_t *m)
4,591✔
2367
{
2368
   MODEL_ENTRY(m);
9,138✔
2369

2370
   // Re-read options as these may have changed
2371
   m->stop_delta = opt_get_int(OPT_STOP_DELTA);
4,591✔
2372
   m->shuffle    = opt_get_int(OPT_SHUFFLE_PROCS);
4,591✔
2373

2374
   __trace_on = opt_get_int(OPT_RT_TRACE);
4,591✔
2375

2376
   create_processes(m, m->root);
4,591✔
2377

2378
   nvc_rusage(&m->ready_rusage);
4,591✔
2379

2380
   // Initialisation is described in LRM 93 section 12.6.4
2381

2382
   reset_scope(m, m->root);
4,591✔
2383

2384
   if (m->force_stop)
4,591✔
2385
      return;   // Error in intialisation
44✔
2386

2387
#if TRACE_SIGNALS > 0
2388
   if (__trace_on)
4,547✔
UNCOV
2389
      dump_signals(m, m->root);
×
2390
#endif
2391

2392
   TRACE("calculate initial signal values");
4,547✔
2393

2394
   model_thread_t *thread = model_thread(m);
4,547✔
2395
   thread->tlab = tlab_acquire(m->mspace);
4,547✔
2396

2397
   // The signals in the model are updated as follows in an order such
2398
   // that if a given signal R depends upon the current value of another
2399
   // signal S, then the current value of S is updated prior to the
2400
   // updating of the current value of R.
2401

2402
   for (rt_nexus_t *n = m->nexuses; n != NULL; n = n->chain) {
43,580✔
2403
      // The initial value of each driver is the default value of the signal
2404
      if (n->n_sources > 0) {
39,033✔
2405
         for (rt_source_t *s = &(n->sources); s; s = s->chain_input) {
57,986✔
2406
            if (s->tag == SOURCE_DRIVER)
29,369✔
2407
               copy_value_ptr(n, &(s->u.driver.waveforms.value),
18,841✔
2408
                              nexus_effective(n));
18,841✔
2409
         }
2410
      }
2411

2412
      const int rank = nexus_rank(n);
39,033✔
2413
      if (rank > MAX_RANK)
39,033✔
UNCOV
2414
         fatal_at(tree_loc(n->signal->where), "signal rank %d is greater "
×
2415
                  "than the maximum supported %d", rank, MAX_RANK);
2416
      else if (rank > 0 || n->n_sources > 1)
39,033✔
2417
         heap_insert(m->driving_heap, rank, n);
10,444✔
2418
      else {
2419
         calculate_initial_value(m, n);
28,589✔
2420
         check_undriven_std_logic(n);
28,589✔
2421
      }
2422
   }
2423

2424
   while (heap_size(m->driving_heap) > 0) {
14,991✔
2425
      rt_nexus_t *n = heap_extract_min(m->driving_heap);
10,444✔
2426
      calculate_initial_value(m, n);
10,444✔
2427
      check_undriven_std_logic(n);
10,444✔
2428
   }
2429

2430
   // Update effective values after all initial driving values calculated
2431
   while (heap_size(m->effective_heap) > 0) {
5,689✔
2432
      rt_nexus_t *n = heap_extract_min(m->effective_heap);
1,142✔
2433
      n->flags &= ~NET_F_PENDING;
1,142✔
2434

2435
      calculate_effective_value(m, n);
1,142✔
2436
   }
2437

2438
   tlab_reset(thread->tlab);   // No allocations can be live past here
4,547✔
2439

2440
   global_event(m, RT_END_OF_INITIALISATION);
4,547✔
2441
}
2442

2443
static void update_property(rt_model_t *m, rt_prop_t *prop)
3,368✔
2444
{
2445
   TRACE("update property %s state %s", istr(prop->name),
3,368✔
2446
         trace_states(&prop->state));
2447

2448
   rt_wakeable_t *obj = &(prop->wakeable);
3,368✔
2449

2450
   if (obj->trigger != NULL && !run_trigger(m, obj->trigger))
3,368✔
2451
      return;   // Filtered
816✔
2452

2453
   model_thread_t *thread = model_thread(m);
2,552✔
2454
   assert(thread->tlab != NULL);
2,552✔
2455

2456
   thread->active_obj = obj;
2,552✔
2457
   thread->active_scope = prop->scope;
2,552✔
2458

2459
   jit_scalar_t args[] = {
2,552✔
2460
      { .pointer = *mptr_get(prop->privdata) ?: (void *)-1 },
2,552✔
2461
      { .pointer = *mptr_get(prop->scope->privdata) },
2,552✔
2462
      { .integer = -1 },
2463
   };
2464

2465
   mask_clearall(&prop->newstate);
2,552✔
2466
   prop->strong = false;
2,552✔
2467

2468
   size_t bit = -1;
2,552✔
2469
   while (mask_iter(&prop->state, &bit)) {
9,356✔
2470
      args[2].integer = bit;
4,252✔
2471

2472
      if (!jit_vfastcall(m->jit, prop->handle, args, ARRAY_LEN(args),
4,252✔
2473
                         NULL, 0, thread->tlab))
UNCOV
2474
         m->force_stop = true;
×
2475
   }
2476

2477
   tlab_reset(thread->tlab);   // No allocations can be live past here
2,552✔
2478

2479
   thread->active_obj = NULL;
2,552✔
2480
   thread->active_scope = NULL;
2,552✔
2481

2482
   TRACE("new state %s%s", trace_states(&prop->newstate),
2,552✔
2483
         prop->strong ? " strong" : "");
2484

2485
   mask_copy(&prop->state, &prop->newstate);
2,552✔
2486

2487
   m->liveness |= prop->strong;
2,552✔
2488
}
2489

2490
static void sched_event(rt_model_t *m, rt_nexus_t *n, rt_wakeable_t *obj)
71,750✔
2491
{
2492
   if (n->pending == NULL)
71,750✔
2493
      n->pending = tag_pointer(obj, 1);
8,247✔
2494
   else if (pointer_tag(n->pending) == 1) {
63,503✔
2495
      rt_wakeable_t *cur = untag_pointer(n->pending, rt_wakeable_t);
1,787✔
2496
      if (cur == obj)
1,787✔
2497
         return;
2498

2499
      rt_pending_t *p = xmalloc_flex(sizeof(rt_pending_t), PENDING_MIN,
1,731✔
2500
                                     sizeof(rt_wakeable_t *));
2501
      p->max = PENDING_MIN;
1,731✔
2502
      p->count = 2;
1,731✔
2503
      p->wake[0] = cur;
1,731✔
2504
      p->wake[1] = obj;
1,731✔
2505

2506
      n->pending = tag_pointer(p, 0);
1,731✔
2507
   }
2508
   else {
2509
      rt_pending_t *p = untag_pointer(n->pending, rt_pending_t);
61,716✔
2510

2511
      for (int i = 0; i < p->count; i++) {
193,392✔
2512
         if (p->wake[i] == NULL || p->wake[i] == obj) {
191,536✔
2513
            p->wake[i] = obj;
59,860✔
2514
            return;
59,860✔
2515
         }
2516
      }
2517

2518
      if (p->count == p->max) {
1,856✔
2519
         p->max = MAX(PENDING_MIN, p->max * 2);
204✔
2520
         p = xrealloc_flex(p, sizeof(rt_pending_t), p->max,
204✔
2521
                           sizeof(rt_wakeable_t *));
2522
         n->pending = tag_pointer(p, 0);
204✔
2523
      }
2524

2525
      p->wake[p->count++] = obj;
1,856✔
2526
   }
2527
}
2528

2529
static void clear_event(rt_model_t *m, rt_nexus_t *n, rt_wakeable_t *obj)
60,640✔
2530
{
2531
   if (pointer_tag(n->pending) == 1) {
60,640✔
2532
      rt_wakeable_t *wake = untag_pointer(n->pending, rt_wakeable_t);
745✔
2533
      if (wake == obj)
745✔
2534
         n->pending = NULL;
745✔
2535
   }
2536
   else if (n->pending != NULL) {
59,895✔
2537
      rt_pending_t *p = untag_pointer(n->pending, rt_pending_t);
59,895✔
2538
      for (int i = 0; i < p->count; i++) {
175,922✔
2539
         if (p->wake[i] == obj) {
175,922✔
2540
            p->wake[i] = NULL;
59,895✔
2541
            return;
59,895✔
2542
         }
2543
      }
2544
   }
2545
}
2546

2547
static rt_source_t *find_driver(rt_nexus_t *nexus, rt_proc_t *proc)
72,877✔
2548
{
2549
   // Try to find this process in the list of existing drivers
2550
   for (rt_source_t *d = &(nexus->sources); d; d = d->chain_input) {
73,261✔
2551
      if (d->tag == SOURCE_DRIVER && d->u.driver.proc == proc)
73,253✔
2552
         return d;
72,869✔
2553
   }
2554

2555
   return NULL;
2556
}
2557

2558
static inline bool insert_transaction(rt_model_t *m, rt_nexus_t *nexus,
72,877✔
2559
                                      rt_source_t *source, waveform_t *w,
2560
                                      uint64_t when, uint64_t reject)
2561
{
2562
   waveform_t *last = &(source->u.driver.waveforms);
72,877✔
2563
   waveform_t *it   = last->next;
72,877✔
2564
   while (it != NULL && it->when < when) {
74,127✔
2565
      // If the current transaction is within the pulse rejection interval
2566
      // and the value is different to that of the new transaction then
2567
      // delete the current transaction
2568
      assert(it->when >= m->now);
1,250✔
2569
      if (it->when >= when - reject
1,250✔
2570
          && !cmp_values(nexus, it->value, w->value)) {
56✔
2571
         waveform_t *next = it->next;
20✔
2572
         last->next = next;
20✔
2573
         free_value(nexus, it->value);
20✔
2574
         free_waveform(m, it);
20✔
2575
         it = next;
20✔
2576
      }
2577
      else {
2578
         last = it;
1,230✔
2579
         it = it->next;
1,230✔
2580
      }
2581
   }
2582
   last->next = w;
72,877✔
2583

2584
   // Delete all transactions later than this
2585
   // We could remove this transaction from the deltaq as well but the
2586
   // overhead of doing so is probably higher than the cost of waking
2587
   // up for the empty event
2588
   bool already_scheduled = false;
72,877✔
2589
   for (waveform_t *next; it != NULL; it = next) {
72,993✔
2590
      next = it->next;
116✔
2591
      already_scheduled |= (it->when == when);
116✔
2592
      free_value(nexus, it->value);
116✔
2593
      free_waveform(m, it);
116✔
2594
   }
2595

2596
   return already_scheduled;
72,877✔
2597
}
2598

2599
static void sched_driver(rt_model_t *m, rt_nexus_t *n, uint64_t after,
5,148,846✔
2600
                         uint64_t reject, const void *value, rt_proc_t *proc)
2601
{
2602
   if (after == 0 && (n->flags & NET_F_FAST_DRIVER)) {
9,935,288✔
2603
      rt_source_t *d = &(n->sources);
5,076,121✔
2604
      assert(n->n_sources == 1);
5,076,121✔
2605

2606
      waveform_t *w = &d->u.driver.waveforms;
5,076,121✔
2607
      w->when = m->now;
5,076,121✔
2608
      assert(w->next == NULL);
5,076,121✔
2609

2610
      rt_signal_t *signal = n->signal;
5,076,121✔
2611
      rt_source_t *d0 = &(signal->nexus.sources);
5,076,121✔
2612

2613
      if (d->fastqueued)
5,076,121✔
2614
         assert(m->next_is_delta);
4,065,040✔
2615
      else if ((signal->shared.flags & NET_F_FAST_DRIVER) && d0->sigqueued) {
1,011,081✔
2616
         assert(m->next_is_delta);
41,679✔
2617
         d->fastqueued = 1;
41,679✔
2618
      }
2619
      else if (cmp_bytes(value, value_ptr(n, &w->value), n->width * n->size)) {
1,938,804✔
2620
         m->next_is_delta = true;
289,679✔
2621
         d->was_active = (n->active_delta == m->iteration);
289,679✔
2622
         n->active_delta = m->iteration + 1;
289,679✔
2623
         return;
289,679✔
2624
      }
2625
      else if (signal->shared.flags & NET_F_FAST_DRIVER) {
679,723✔
2626
         deferq_do(&m->delta_driverq, async_fast_all_drivers, signal);
41,265✔
2627
         m->next_is_delta = true;
41,265✔
2628
         d0->sigqueued = 1;
41,265✔
2629
         d->fastqueued = 1;
41,265✔
2630
      }
2631
      else {
2632
         deferq_do(&m->delta_driverq, async_fast_driver, d);
638,458✔
2633
         m->next_is_delta = true;
638,458✔
2634
         d->fastqueued = 1;
638,458✔
2635
      }
2636

2637
      copy_value_ptr(n, &w->value, value);
4,786,442✔
2638
   }
2639
   else {
2640
      rt_source_t *d = find_driver(n, proc);
72,725✔
2641
      assert(d != NULL);
72,725✔
2642

2643
      if ((n->flags & NET_F_FAST_DRIVER) && d->fastqueued) {
72,725✔
2644
         // A fast update to this driver is already scheduled
2645
         waveform_t *w0 = alloc_waveform(m);
28✔
2646
         w0->when  = m->now;
28✔
2647
         w0->next  = NULL;
28✔
2648
         w0->value = alloc_value(m, n);
28✔
2649

2650
         const uint8_t *prev = value_ptr(n, &(d->u.driver.waveforms.value));
28✔
2651
         copy_value_ptr(n, &w0->value, prev);
28✔
2652

2653
         assert(d->u.driver.waveforms.next == NULL);
28✔
2654
         d->u.driver.waveforms.next = w0;
28✔
2655
      }
2656

2657
      n->flags &= ~NET_F_FAST_DRIVER;
72,725✔
2658

2659
      waveform_t *w = alloc_waveform(m);
72,725✔
2660
      w->when  = m->now + after;
72,725✔
2661
      w->next  = NULL;
72,725✔
2662
      w->value = alloc_value(m, n);
72,725✔
2663

2664
      copy_value_ptr(n, &w->value, value);
72,725✔
2665

2666
      if (!insert_transaction(m, n, d, w, w->when, reject))
72,725✔
2667
         deltaq_insert_driver(m, after, d);
72,633✔
2668
   }
2669
}
2670

2671
static void sched_disconnect(rt_model_t *m, rt_nexus_t *nexus, uint64_t after,
60✔
2672
                             uint64_t reject, rt_proc_t *proc)
2673
{
2674
   rt_source_t *d = find_driver(nexus, proc);
60✔
2675
   assert(d != NULL);
60✔
2676

2677
   const uint64_t when = m->now + after;
60✔
2678

2679
   // Need update_driver to clear disconnected flag
2680
   nexus->flags &= ~NET_F_FAST_DRIVER;
60✔
2681

2682
   waveform_t *w = alloc_waveform(m);
60✔
2683
   w->when = -when;   // Use sign bit to represent null
60✔
2684
   w->next = NULL;
60✔
2685
   w->value.qword = 0;
60✔
2686

2687
   if (!insert_transaction(m, nexus, d, w, when, reject))
60✔
2688
      deltaq_insert_driver(m, after, d);
60✔
2689
}
60✔
2690

2691
static void async_watch_callback(rt_model_t *m, void *arg)
3,101✔
2692
{
2693
   rt_watch_t *w = arg;
3,101✔
2694

2695
   assert(w->wakeable.pending);
3,101✔
2696
   w->wakeable.pending = false;
3,101✔
2697

2698
   if (w->wakeable.zombie)
3,101✔
2699
      free(w);
4✔
2700
   else
2701
      (*w->fn)(m->now, w->signals[0], w, w->user_data);
3,097✔
2702
}
3,101✔
2703

2704
static void async_timeout_callback(rt_model_t *m, void *arg)
104✔
2705
{
2706
   rt_callback_t *cb = arg;
104✔
2707
   (*cb->fn)(m, cb->user);
104✔
2708
   free(cb);
104✔
2709
}
104✔
2710

2711
static void async_update_implicit_signal(rt_model_t *m, void *arg)
436✔
2712
{
2713
   rt_implicit_t *imp = arg;
436✔
2714

2715
   assert(imp->wakeable.pending);
436✔
2716
   imp->wakeable.pending = false;
436✔
2717

2718
   update_implicit_signal(m, imp);
436✔
2719
}
436✔
2720

2721
static void async_run_process(rt_model_t *m, void *arg)
643,493✔
2722
{
2723
   rt_proc_t *proc = arg;
643,493✔
2724

2725
   assert(proc->wakeable.pending);
643,493✔
2726
   proc->wakeable.pending = false;
643,493✔
2727

2728
   run_process(m, proc);
643,493✔
2729
}
643,493✔
2730

2731
static void async_update_property(rt_model_t *m, void *arg)
3,368✔
2732
{
2733
   rt_prop_t *prop = arg;
3,368✔
2734

2735
   assert(prop->wakeable.pending);
3,368✔
2736
   prop->wakeable.pending = false;
3,368✔
2737

2738
   update_property(m, prop);
3,368✔
2739
}
3,368✔
2740

2741
static bool heap_delete_proc_cb(uint64_t key, void *value, void *search)
52✔
2742
{
2743
   if (pointer_tag(value) != EVENT_PROCESS)
52✔
2744
      return false;
2745

2746
   return untag_pointer(value, rt_proc_t) == search;
40✔
2747
}
2748

2749
static bool run_trigger(rt_model_t *m, rt_trigger_t *t)
64,134✔
2750
{
2751
   if (t->when == m->now && t->iteration == m->iteration)
64,134✔
2752
      return t->result.integer != 0;   // Cached
46,704✔
2753

2754
   switch (t->kind) {
17,430✔
2755
   case FUNC_TRIGGER:
16,950✔
2756
      {
2757
         tlab_t tlab = jit_null_tlab(m->jit);
16,950✔
2758
         if (!jit_vfastcall(m->jit, t->handle, t->args, t->nargs,
16,950✔
2759
                            &t->result, 1, &tlab))
UNCOV
2760
            m->force_stop = true;
×
2761

2762
         TRACE("run trigger %p %s ==> %"PRIi64, t,
16,950✔
2763
               istr(jit_get_name(m->jit, t->handle)), t->result.integer);
2764
      }
2765
      break;
16,950✔
2766

2767
   case OR_TRIGGER:
124✔
2768
      {
2769
         rt_trigger_t *left = t->args[0].pointer;
124✔
2770
         rt_trigger_t *right = t->args[1].pointer;
124✔
2771
         t->result.integer = run_trigger(m, left) || run_trigger(m, right);
124✔
2772

2773
         TRACE("or trigger %p ==> %"PRIi64, t, t->result.integer);
124✔
2774
      }
2775
      break;
2776

2777
   case CMP_TRIGGER:
356✔
2778
      {
2779
         rt_signal_t *s = t->args[0].pointer;
356✔
2780
         uint32_t offset = t->args[1].integer;
356✔
2781
         int64_t right = t->args[2].integer;
356✔
2782

2783
#define COMPARE_SCALAR(type) do {                                       \
2784
            const type *data = (type *)s->shared.data;                  \
2785
            t->result.integer = (data[offset] == right);                \
2786
      } while (0)
2787

2788
         FOR_ALL_SIZES(s->nexus.size, COMPARE_SCALAR);
356✔
2789

2790
         TRACE("cmp trigger %p ==> %"PRIi64, t, t->result.integer);
356✔
2791
      }
2792
      break;
2793
   }
2794

2795
   t->when = m->now;
17,430✔
2796
   t->iteration = m->iteration;
17,430✔
2797

2798
   return t->result.integer != 0;
17,430✔
2799
}
2800

2801
static void wakeup_one(rt_model_t *m, rt_wakeable_t *obj)
412,498✔
2802
{
2803
   if (obj->pending)
412,498✔
2804
      return;   // Already scheduled
2805

2806
   deferq_t *dq = obj->postponed ? &m->postponedq : &m->procq;
369,521✔
2807

2808
   switch (obj->kind) {
369,521✔
2809
   case W_PROC:
201,072✔
2810
      {
2811
         rt_proc_t *proc = container_of(obj, rt_proc_t, wakeable);
201,072✔
2812
         TRACE("wakeup %sprocess %s", obj->postponed ? "postponed " : "",
201,072✔
2813
               istr(proc->name));
2814
         deferq_do(dq, async_run_process, proc);
201,072✔
2815

2816
         if (proc->wakeable.delayed) {
201,072✔
2817
            // This process was already scheduled to run at a later
2818
            // time so we need to delete it from the simulation queue
2819
            heap_delete(m->eventq_heap, heap_delete_proc_cb, proc);
28✔
2820
            proc->wakeable.delayed = false;
28✔
2821
         }
2822
      }
2823
      break;
2824

2825
   case W_PROPERTY:
3,368✔
2826
      {
2827
         rt_prop_t *prop = container_of(obj, rt_prop_t, wakeable);
3,368✔
2828
         TRACE("wakeup property %s", istr(prop->name));
3,368✔
2829
         deferq_do(dq, async_update_property, prop);
3,368✔
2830
      }
2831
      break;
3,368✔
2832

2833
   case W_IMPLICIT:
48✔
2834
      {
2835
         rt_implicit_t *imp = container_of(obj, rt_implicit_t, wakeable);
48✔
2836
         TRACE("wakeup implicit signal %s closure %s",
48✔
2837
               istr(tree_ident(imp->signal.where)),
2838
               istr(jit_get_name(m->jit, imp->closure.handle)));
2839
         deferq_do(&m->implicitq, async_update_implicit_signal, imp);
48✔
2840
      }
2841
      break;
48✔
2842

2843
   case W_WATCH:
3,101✔
2844
      {
2845
         rt_watch_t *w = container_of(obj, rt_watch_t, wakeable);
3,101✔
2846
         TRACE("wakeup %svalue change callback %p %s",
3,101✔
2847
               obj->postponed ? "postponed " : "", w,
2848
               debug_symbol_name(w->fn));
2849

2850
         assert(!w->wakeable.zombie);
3,101✔
2851
         deferq_do(dq, async_watch_callback, w);
3,101✔
2852
      }
2853
      break;
3,101✔
2854

2855
   case W_TRANSFER:
161,932✔
2856
      {
2857
         rt_transfer_t *t = container_of(obj, rt_transfer_t, wakeable);
161,932✔
2858
         TRACE("wakeup signal transfer for %s",
161,932✔
2859
               istr(tree_ident(t->target->signal->where)));
2860
         deferq_do(dq, async_transfer_signal, t);
161,932✔
2861
      }
2862
      break;
161,932✔
2863
   }
2864

2865
   set_pending(obj);
369,521✔
2866
}
2867

2868
static void notify_event(rt_model_t *m, rt_nexus_t *n)
930,170✔
2869
{
2870
   // Must only be called once per cycle
2871
   assert(n->last_event != m->now || n->event_delta != m->iteration);
930,170✔
2872

2873
   n->last_event = m->now;
930,170✔
2874
   n->event_delta = m->iteration;
930,170✔
2875

2876
   if (n->flags & NET_F_CACHE_EVENT)
930,170✔
2877
      n->signal->shared.flags |= SIG_F_EVENT_FLAG;
18,473✔
2878

2879
   if (pointer_tag(n->pending) == 1) {
930,170✔
2880
      rt_wakeable_t *wake = untag_pointer(n->pending, rt_wakeable_t);
134,333✔
2881
      wakeup_one(m, wake);
134,333✔
2882
   }
2883
   else if (n->pending != NULL) {
795,837✔
2884
      rt_pending_t *p = untag_pointer(n->pending, rt_pending_t);
107,994✔
2885
      for (int i = 0; i < p->count; i++) {
386,482✔
2886
         if (p->wake[i] != NULL)
278,488✔
2887
            wakeup_one(m, p->wake[i]);
278,165✔
2888
      }
2889
   }
2890
}
930,170✔
2891

2892
static void put_effective(rt_model_t *m, rt_nexus_t *n, const void *value)
971,062✔
2893
{
2894
   TRACE("update %s effective value %s", trace_nexus(n), fmt_nexus(n, value));
971,062✔
2895

2896
   unsigned char *eff = nexus_effective(n);
971,062✔
2897
   unsigned char *last = nexus_last_value(n);
971,062✔
2898

2899
   const size_t valuesz = n->size * n->width;
971,062✔
2900

2901
   if (!cmp_bytes(eff, value, valuesz)) {
1,942,124✔
2902
      copy2(last, eff, value, valuesz);
930,170✔
2903
      notify_event(m, n);
930,170✔
2904
   }
2905
}
971,062✔
2906

2907
static void enqueue_effective(rt_model_t *m, rt_nexus_t *n)
460✔
2908
{
2909
   if (n->flags & NET_F_PENDING)
460✔
2910
      return;
2911

2912
   n->flags |= NET_F_PENDING;
200✔
2913
   heap_insert(m->effective_heap, MAX_RANK - n->rank, n);
200✔
2914
}
2915

2916
static void update_effective(rt_model_t *m, rt_nexus_t *n)
1,488✔
2917
{
2918
   n->active_delta = m->iteration;
1,488✔
2919
   n->flags &= ~NET_F_PENDING;
1,488✔
2920

2921
   calculate_effective_value(m, n);
1,488✔
2922

2923
   if (n->n_sources > 0) {
1,488✔
2924
      for (rt_source_t *s = &(n->sources); s; s = s->chain_input) {
3,080✔
2925
         if (s->tag != SOURCE_PORT)
1,668✔
2926
            continue;
1,156✔
2927
         else if (s->u.port.conv_func != NULL) {
512✔
2928
            rt_conv_func_t *cf = s->u.port.conv_func;
UNCOV
2929
            for (int i = 0; i < cf->ninputs; i++) {
×
UNCOV
2930
               if (cf->inputs[i].nexus->flags & NET_F_INOUT)
×
2931
                  enqueue_effective(m, cf->inputs[i].nexus);
×
2932
            }
2933
         }
2934
         else if (s->u.port.input->flags & NET_F_INOUT)
512✔
2935
            enqueue_effective(m, s->u.port.input);
460✔
2936
      }
2937
   }
2938
}
1,488✔
2939

2940
static void put_driving(rt_model_t *m, rt_nexus_t *n, const void *value)
970,450✔
2941
{
2942
   if (n->flags & NET_F_EFFECTIVE) {
970,450✔
2943
      TRACE("update %s driving value %s", trace_nexus(n), fmt_nexus(n, value));
2,430✔
2944

2945
      memcpy(nexus_driving(n), value, n->size * n->width);
2,430✔
2946

2947
      assert(!(n->flags & NET_F_PENDING));
2,430✔
2948
      n->flags |= NET_F_PENDING;
2,430✔
2949
      n->flags &= ~NET_F_HAS_INITIAL;
2,430✔
2950
      heap_insert(m->effective_heap, MAX_RANK - n->rank, n);
2,430✔
2951
   }
2952
   else
2953
      put_effective(m, n, value);
968,020✔
2954
}
970,450✔
2955

2956
static void defer_driving_update(rt_model_t *m, rt_nexus_t *n)
2,074✔
2957
{
2958
   if (n->flags & NET_F_PENDING)
2,074✔
2959
      return;
2960

2961
   TRACE("defer %s driving value update", trace_nexus(n));
1,538✔
2962
   heap_insert(m->driving_heap, n->rank, n);
1,538✔
2963
   n->flags |= NET_F_PENDING;
1,538✔
2964
}
2965

2966
static void update_driving(rt_model_t *m, rt_nexus_t *n, bool safe)
932,947✔
2967
{
2968
   if (n->n_sources == 1 || safe) {
932,947✔
2969
      n->active_delta = m->iteration;
931,785✔
2970
      n->flags &= ~NET_F_PENDING;
931,785✔
2971

2972
      calculate_driving_value(m, n);
931,785✔
2973

2974
      // Update outputs if the effective value must be calculated
2975
      // separately or there was an event on this signal
2976
      const bool update_outputs = !!(n->flags & NET_F_EFFECTIVE)
1,863,570✔
2977
         || (n->event_delta == m->iteration && n->last_event == m->now);
931,785✔
2978

2979
      if (update_outputs) {
931,785✔
2980
         for (rt_source_t *o = n->outputs; o; o = o->chain_output) {
1,064,365✔
2981
            switch (o->tag) {
135,599✔
2982
            case SOURCE_PORT:
135,399✔
2983
               if (o->u.port.conv_func != NULL)
135,399✔
2984
                  defer_driving_update(m, o->u.port.output);
912✔
2985
               else
2986
                  update_driving(m, o->u.port.output, false);
134,487✔
2987
               break;
2988
            case SOURCE_IMPLICIT:
200✔
2989
               update_driving(m, o->u.pseudo.nexus , false);
200✔
2990
               break;
200✔
UNCOV
2991
            default:
×
2992
               should_not_reach_here();
2993
            }
2994
         }
2995
      }
2996
   }
2997
   else
2998
      defer_driving_update(m, n);
1,162✔
2999
}
932,947✔
3000

3001
static void update_driver(rt_model_t *m, rt_nexus_t *n, rt_source_t *source)
72,787✔
3002
{
3003
   waveform_t *w_now  = &(source->u.driver.waveforms);
72,787✔
3004
   waveform_t *w_next = w_now->next;
72,787✔
3005

3006
   if (likely(w_next != NULL && w_next->when == m->now)) {
72,787✔
3007
      free_value(n, w_now->value);
72,675✔
3008
      *w_now = *w_next;
72,675✔
3009
      free_waveform(m, w_next);
72,675✔
3010
      source->disconnected = 0;
72,675✔
3011
      update_driving(m, n, false);
72,675✔
3012
   }
3013
   else if (unlikely(w_next != NULL && w_next->when == -m->now)) {
112✔
3014
      // Disconnect source due to null transaction
3015
      *w_now = *w_next;
48✔
3016
      free_waveform(m, w_next);
48✔
3017
      source->disconnected = 1;
48✔
3018
      update_driving(m, n, false);
48✔
3019
   }
3020
}
72,787✔
3021

3022
static void fast_update_driver(rt_model_t *m, rt_nexus_t *nexus)
721,345✔
3023
{
3024
   rt_source_t *src = &(nexus->sources);
721,345✔
3025

3026
   if (likely(nexus->flags & NET_F_FAST_DRIVER)) {
721,345✔
3027
      // Preconditions for fast driver updates
3028
      assert(nexus->n_sources == 1);
721,297✔
3029
      assert(src->tag == SOURCE_DRIVER);
721,297✔
3030
      assert(src->u.driver.waveforms.next == NULL);
721,297✔
3031

3032
      update_driving(m, nexus, false);
721,297✔
3033
   }
3034
   else
3035
      update_driver(m, nexus, src);
48✔
3036

3037
   assert(src->fastqueued);
721,345✔
3038
   src->fastqueued = 0;
721,345✔
3039
}
721,345✔
3040

3041
static void fast_update_all_drivers(rt_model_t *m, rt_signal_t *signal)
41,261✔
3042
{
3043
   assert(signal->shared.flags & NET_F_FAST_DRIVER);
41,261✔
3044

3045
   rt_nexus_t *n = &(signal->nexus);
41,261✔
3046
   assert(n->sources.sigqueued);
41,261✔
3047
   n->sources.sigqueued = 0;
41,261✔
3048

3049
   int count = 0;
41,261✔
3050
   for (int i = 0; i < signal->n_nexus; i++, n = n->chain) {
126,086✔
3051
      if (n->sources.fastqueued) {
84,825✔
3052
         fast_update_driver(m, n);
82,936✔
3053
         count++;
82,936✔
3054
      }
3055
   }
3056

3057
   if (count < signal->n_nexus >> 1) {
41,261✔
3058
      // Unlikely to be worth the iteration cost
3059
      signal->shared.flags &= ~NET_F_FAST_DRIVER;
209✔
3060
   }
3061
}
41,261✔
3062

3063
static void async_update_driver(rt_model_t *m, void *arg)
72,739✔
3064
{
3065
   rt_source_t *src = arg;
72,739✔
3066
   update_driver(m, src->u.driver.nexus, src);
72,739✔
3067
}
72,739✔
3068

3069
static void async_fast_driver(rt_model_t *m, void *arg)
638,409✔
3070
{
3071
   rt_source_t *src = arg;
638,409✔
3072
   fast_update_driver(m, src->u.driver.nexus);
638,409✔
3073
}
638,409✔
3074

3075
static void async_fast_all_drivers(rt_model_t *m, void *arg)
41,261✔
3076
{
3077
   rt_signal_t *signal = arg;
41,261✔
3078
   fast_update_all_drivers(m, signal);
41,261✔
3079
}
41,261✔
3080

3081
static void async_pseudo_source(rt_model_t *m, void *arg)
2,702✔
3082
{
3083
   rt_source_t *src = arg;
2,702✔
3084
   assert(src->tag == SOURCE_FORCING || src->tag == SOURCE_DEPOSIT);
2,702✔
3085

3086
   update_driving(m, src->u.pseudo.nexus, false);
2,702✔
3087

3088
   assert(src->pseudoqueued);
2,702✔
3089
   src->pseudoqueued = 0;
2,702✔
3090
}
2,702✔
3091

3092
static void async_transfer_signal(rt_model_t *m, void *arg)
163,481✔
3093
{
3094
   rt_transfer_t *t = arg;
163,481✔
3095

3096
   assert(t->wakeable.pending);
163,481✔
3097
   t->wakeable.pending = false;
163,481✔
3098

3099
   rt_nexus_t *n = t->target;
163,481✔
3100
   char *vptr = nexus_effective(t->source);
163,481✔
3101
   for (int count = t->count; count > 0; n = n->chain) {
366,970✔
3102
      count -= n->width;
203,489✔
3103
      assert(count >= 0);
203,489✔
3104

3105
      sched_driver(m, n, t->after, t->reject, vptr, t->proc);
203,489✔
3106
      vptr += n->width * n->size;
203,489✔
3107
   }
3108
}
163,481✔
3109

3110
static void update_implicit_signal(rt_model_t *m, rt_implicit_t *imp)
436✔
3111
{
3112
   model_thread_t *thread = model_thread(m);
436✔
3113
   assert(thread->active_obj == NULL);
436✔
3114
   thread->active_obj = &(imp->wakeable);
436✔
3115

3116
   jit_scalar_t result;
436✔
3117
   if (!jit_try_call(m->jit, imp->closure.handle, &result,
436✔
3118
                     imp->closure.context, imp->signal.shared.data[0]))
436✔
UNCOV
3119
      m->force_stop = true;
×
3120

3121
   thread->active_obj = NULL;
436✔
3122

3123
   TRACE("implicit signal %s new value %"PRIi64,
436✔
3124
         istr(tree_ident(imp->signal.where)), result.integer);
3125

3126
   assert(imp->signal.n_nexus == 1);
436✔
3127
   rt_nexus_t *n0 = &(imp->signal.nexus);
436✔
3128

3129
   n0->active_delta = m->iteration;
436✔
3130

3131
   if (n0->n_sources > 0 && n0->sources.tag == SOURCE_DRIVER) {
436✔
3132
      if (!result.integer) {
272✔
3133
         // Update driver for 'STABLE and 'QUIET
3134
         // TODO: this should happen inside the callback
3135
         waveform_t *w = alloc_waveform(m);
92✔
3136
         w->when  = m->now + imp->delay;
92✔
3137
         w->next  = NULL;
92✔
3138
         w->value = alloc_value(m, n0);
92✔
3139

3140
         w->value.bytes[0] = 1;   // Boolean TRUE
92✔
3141

3142
         if (!insert_transaction(m, n0, &(n0->sources), w, w->when, imp->delay))
92✔
3143
            deltaq_insert_driver(m, imp->delay, &(n0->sources));
92✔
3144

3145
         put_effective(m, n0, &result.integer);
92✔
3146
      }
3147
      else if (n0->sources.u.driver.waveforms.next == NULL)
180✔
3148
         put_effective(m, n0, &result.integer);
156✔
3149
   }
3150
   else
3151
      put_effective(m, n0, &result.integer);
164✔
3152
}
436✔
3153

3154
static void iteration_limit_proc_cb(void *fn, void *arg, void *extra)
4✔
3155
{
3156
   diag_t *d = extra;
4✔
3157
   rt_proc_t *proc = NULL;
4✔
3158

3159
   if (fn == async_run_process)
4✔
3160
      proc = arg;
UNCOV
3161
   else if (fn == async_transfer_signal) {
×
UNCOV
3162
      rt_transfer_t *t = arg;
×
3163
      proc = t->proc;
×
3164
   }
3165

3166
   if (proc == NULL)
4✔
3167
      return;
3168

3169
   const loc_t *loc = tree_loc(proc->where);
4✔
3170
   diag_hint(d, loc, "process %s is active", istr(proc->name));
4✔
3171
}
3172

3173
static void iteration_limit_driver_cb(void *fn, void *arg, void *extra)
20✔
3174
{
3175
   diag_t *d = extra;
20✔
3176
   tree_t decl = NULL;
20✔
3177

3178
   if (fn == async_update_driver || fn == async_fast_driver) {
20✔
3179
      rt_source_t *src = arg;
16✔
3180
      if (src->tag == SOURCE_DRIVER)
16✔
3181
         decl = src->u.driver.nexus->signal->where;
16✔
3182
   }
3183
   else if (fn == async_fast_all_drivers) {
4✔
3184
      rt_signal_t *s = arg;
4✔
3185
      decl = s->where;
4✔
3186
   }
3187

3188
   if (decl == NULL)
20✔
3189
      return;
3190

3191
   diag_hint(d, tree_loc(decl), "driver for %s %s is active",
52✔
3192
             tree_kind(decl) == T_PORT_DECL ? "port" : "signal",
20✔
3193
             istr(tree_ident(decl)));
3194
}
3195

3196
static void reached_iteration_limit(rt_model_t *m)
8✔
3197
{
3198
   diag_t *d = diag_new(DIAG_FATAL, NULL);
8✔
3199

3200
   diag_printf(d, "limit of %d delta cycles reached", m->stop_delta);
8✔
3201

3202
   deferq_scan(&m->delta_procq, iteration_limit_proc_cb, d);
8✔
3203
   deferq_scan(&m->delta_driverq, iteration_limit_driver_cb, d);
8✔
3204

3205
   diag_hint(d, NULL, "you can increase this limit with $bold$--stop-delta$$");
8✔
3206
   diag_emit(d);
8✔
3207

3208
   m->force_stop = true;
8✔
3209
}
8✔
3210

3211
static void sync_event_cache(rt_model_t *m)
929,517✔
3212
{
3213
   for (int i = 0; i < m->eventsigs.count; i++) {
965,225✔
3214
      rt_signal_t *s = m->eventsigs.items[i];
35,708✔
3215
      assert(s->shared.flags & SIG_F_CACHE_EVENT);
35,708✔
3216

3217
      const bool event = s->nexus.last_event == m->now
71,416✔
3218
         && s->nexus.event_delta == m->iteration;
35,708✔
3219

3220
      TRACE("sync event flag %d for %s", event, istr(tree_ident(s->where)));
35,708✔
3221

3222
      if (event)
35,708✔
3223
         assert(s->shared.flags & SIG_F_EVENT_FLAG);   // Set by notify_event
18,473✔
3224
      else
3225
         s->shared.flags &= ~SIG_F_EVENT_FLAG;
17,235✔
3226
   }
3227
}
929,517✔
3228

3229
static void swap_deferq(deferq_t *a, deferq_t *b)
1,859,034✔
3230
{
3231
   deferq_t tmp = *a;
1,859,034✔
3232
   *a = *b;
1,859,034✔
3233
   *b = tmp;
1,859,034✔
3234
}
1,859,034✔
3235

3236
static void model_cycle(rt_model_t *m)
929,517✔
3237
{
3238
   // Simulation cycle is described in LRM 93 section 12.6.4
3239

3240
   const bool is_delta_cycle = m->next_is_delta;
929,517✔
3241
   m->next_is_delta = false;
929,517✔
3242

3243
   if (is_delta_cycle)
929,517✔
3244
      m->iteration = m->iteration + 1;
477,509✔
3245
   else {
3246
      m->now = heap_min_key(m->eventq_heap);
452,008✔
3247
      m->iteration = 0;
452,008✔
3248
   }
3249

3250
   TRACE("begin cycle");
929,517✔
3251

3252
#if TRACE_DELTAQ > 0
3253
   if (__trace_on)
3254
      deltaq_dump(m);
3255
#endif
3256

3257
   swap_deferq(&m->procq, &m->delta_procq);
929,517✔
3258
   swap_deferq(&m->driverq, &m->delta_driverq);
929,517✔
3259

3260
   if (m->iteration == 0)
929,517✔
3261
      global_event(m, RT_NEXT_TIME_STEP);
456,417✔
3262

3263
   global_event(m, RT_NEXT_CYCLE);
929,517✔
3264

3265
   if (!is_delta_cycle) {
929,517✔
3266
      for (;;) {
460,039✔
3267
         void *e = heap_extract_min(m->eventq_heap);
460,039✔
3268
         switch (pointer_tag(e)) {
460,039✔
3269
         case EVENT_PROCESS:
387,888✔
3270
            {
3271
               rt_proc_t *proc = untag_pointer(e, rt_proc_t);
387,888✔
3272
               assert(proc->wakeable.delayed);
387,888✔
3273
               proc->wakeable.delayed = false;
387,888✔
3274
               set_pending(&proc->wakeable);
387,888✔
3275
               deferq_do(&m->procq, async_run_process, proc);
387,888✔
3276
            }
3277
            break;
387,888✔
3278
         case EVENT_DRIVER:
72,047✔
3279
            {
3280
               rt_source_t *source = untag_pointer(e, rt_source_t);
72,047✔
3281
               deferq_do(&m->driverq, async_update_driver, source);
72,047✔
3282
            }
3283
            break;
72,047✔
3284
         case EVENT_TIMEOUT:
104✔
3285
            {
3286
               rt_callback_t *cb = untag_pointer(e, rt_callback_t);
104✔
3287
               deferq_do(&m->driverq, async_timeout_callback, cb);
104✔
3288
            }
3289
            break;
104✔
3290
         }
3291

3292
         if (heap_size(m->eventq_heap) == 0)
460,039✔
3293
            break;
3294
         else if (heap_min_key(m->eventq_heap) > m->now)
13,540✔
3295
            break;
3296
      }
3297
   }
3298

3299
   deferq_run(m, &m->driverq);
929,517✔
3300

3301
   while (heap_size(m->driving_heap) > 0) {
931,055✔
3302
      rt_nexus_t *n = heap_extract_min(m->driving_heap);
1,538✔
3303
      update_driving(m, n, true);
1,538✔
3304
   }
3305

3306
   while (heap_size(m->effective_heap) > 0) {
931,005✔
3307
      rt_nexus_t *n = heap_extract_min(m->effective_heap);
1,488✔
3308
      update_effective(m, n);
1,488✔
3309
   }
3310

3311
   sync_event_cache(m);
929,517✔
3312

3313
   // Update implicit signals
3314
   deferq_run(m, &m->implicitq);
929,517✔
3315

3316
#if TRACE_SIGNALS > 0
3317
   if (__trace_on)
929,517✔
UNCOV
3318
      dump_signals(m, m->root);
×
3319
#endif
3320

3321
   if (m->shuffle)
929,517✔
3322
      deferq_shuffle(&m->procq);
404✔
3323

3324
   // Run all non-postponed processes and event callbacks
3325
   deferq_run(m, &m->procq);
929,517✔
3326

3327
   global_event(m, RT_END_OF_PROCESSES);
929,517✔
3328

3329
   if (!m->next_is_delta)
929,517✔
3330
      global_event(m, RT_LAST_KNOWN_DELTA_CYCLE);
456,356✔
3331

3332
   if (!m->next_is_delta) {
929,517✔
3333
      m->can_create_delta = false;
456,352✔
3334

3335
      // Run all postponed processes and event callbacks
3336
      deferq_run(m, &m->postponedq);
456,352✔
3337

3338
      global_event(m, RT_END_TIME_STEP);
456,352✔
3339

3340
      m->can_create_delta = true;
456,352✔
3341
   }
3342
   else if (m->stop_delta > 0 && m->iteration == m->stop_delta)
473,165✔
3343
      reached_iteration_limit(m);
8✔
3344
}
929,517✔
3345

3346
static bool should_stop_now(rt_model_t *m, uint64_t stop_time)
934,046✔
3347
{
3348
   if (m->force_stop) {
934,046✔
3349
      // Make sure we print the interrupted message if this was the
3350
      // result of an interrupt
3351
      jit_check_interrupt(m->jit);
537✔
3352
      return true;
537✔
3353
   }
3354
   else if (m->next_is_delta)
933,509✔
3355
      return false;
3356
   else if (heap_size(m->eventq_heap) == 0)
456,005✔
3357
      return true;
3358
   else
3359
      return heap_min_key(m->eventq_heap) > stop_time;
452,039✔
3360
}
3361

3362
static void check_liveness_properties(rt_model_t *m, rt_scope_t *s)
8✔
3363
{
3364
   model_thread_t *thread = model_thread(m);
8✔
3365

3366
   for (int i = 0; i < s->properties.count; i++) {
16✔
3367
      rt_prop_t *p = s->properties.items[i];
8✔
3368
      if (p->strong) {
8✔
3369
         TRACE("property %s in strong state", istr(p->name));
8✔
3370

3371
         // Passing an invalid state triggers the assertion failure
3372
         jit_scalar_t args[] = {
8✔
3373
            { .pointer = *mptr_get(p->privdata) ?: (void *)-1 },
8✔
3374
            { .pointer = *mptr_get(p->scope->privdata) },
8✔
3375
            { .integer = INT_MAX },
3376
         };
3377
         jit_vfastcall(m->jit, p->handle, args, ARRAY_LEN(args),
8✔
3378
                       NULL, 0, thread->tlab);
3379
      }
3380
   }
3381

3382
   for (int i = 0; i < s->children.count; i++)
12✔
3383
      check_liveness_properties(m, s->children.items[i]);
4✔
3384
}
8✔
3385

3386
void model_run(rt_model_t *m, uint64_t stop_time)
4,585✔
3387
{
3388
   MODEL_ENTRY(m);
9,114✔
3389

3390
   if (m->force_stop)
4,585✔
3391
      return;   // Was error during intialisation
56✔
3392

3393
   global_event(m, RT_START_OF_SIMULATION);
4,529✔
3394

3395
   while (!should_stop_now(m, stop_time))
934,030✔
3396
      model_cycle(m);
929,501✔
3397

3398
   global_event(m, RT_END_OF_SIMULATION);
4,529✔
3399

3400
   if (m->liveness)
4,529✔
3401
      check_liveness_properties(m, m->root);
4✔
3402
}
3403

3404
bool model_step(rt_model_t *m)
16✔
3405
{
3406
   MODEL_ENTRY(m);
32✔
3407

3408
   if (!m->force_stop)
16✔
3409
      model_cycle(m);
16✔
3410

3411
   return should_stop_now(m, TIME_HIGH);
16✔
3412
}
3413

3414
static inline void check_postponed(int64_t after, rt_proc_t *proc)
4,947,134✔
3415
{
3416
   if (unlikely(proc->wakeable.postponed && (after == 0)))
4,947,134✔
UNCOV
3417
      fatal("postponed process %s cannot cause a delta cycle",
×
3418
            istr(proc->name));
3419
}
4,947,134✔
3420

3421
static inline void check_reject_limit(rt_signal_t *s, uint64_t after,
4,946,350✔
3422
                                      uint64_t reject)
3423
{
3424
   if (unlikely(reject > after))
4,946,350✔
3425
      jit_msg(NULL, DIAG_FATAL, "signal %s pulse reject limit %s is greater "
4✔
3426
              "than delay %s", istr(tree_ident(s->where)),
3427
              trace_time(reject), trace_time(after));
3428
}
4,946,346✔
3429

3430
static inline void check_delay(int64_t delay)
5,376,683✔
3431
{
3432
   if (unlikely(delay < 0)) {
5,376,683✔
3433
      char buf[32];
8✔
3434
      fmt_time_r(buf, sizeof(buf), delay, " ");
8✔
3435
      jit_msg(NULL, DIAG_FATAL, "illegal negative delay %s", buf);
8✔
3436
   }
3437
}
5,376,675✔
3438

3439
void force_signal(rt_model_t *m, rt_signal_t *s, const void *values,
211✔
3440
                  int offset, size_t count)
3441
{
3442
   RT_LOCK(s->lock);
211✔
3443

3444
   TRACE("force signal %s+%d to %s", istr(tree_ident(s->where)), offset,
211✔
3445
         fmt_values(values, count));
3446

3447
   assert(m->can_create_delta);
211✔
3448

3449
   rt_nexus_t *n = split_nexus(m, s, offset, count);
211✔
3450
   const char *vptr = values;
211✔
3451
   for (; count > 0; n = n->chain) {
633✔
3452
      count -= n->width;
211✔
3453
      assert(count >= 0);
211✔
3454

3455
      n->flags |= NET_F_FORCED;
211✔
3456

3457
      rt_source_t *src = get_pseudo_source(m, n, SOURCE_FORCING);
211✔
3458
      copy_value_ptr(n, &(src->u.pseudo.value), vptr);
211✔
3459
      src->disconnected = 0;
211✔
3460

3461
      if (!src->pseudoqueued) {
211✔
3462
         deltaq_insert_pseudo_source(m, src);
211✔
3463
         src->pseudoqueued = 1;
211✔
3464
      }
3465

3466
      vptr += n->width * n->size;
211✔
3467
   }
3468
}
211✔
3469

3470
void release_signal(rt_model_t *m, rt_signal_t *s, int offset, size_t count)
47✔
3471
{
3472
   RT_LOCK(s->lock);
47✔
3473

3474
   TRACE("release signal %s+%d", istr(tree_ident(s->where)), offset);
47✔
3475

3476
   assert(m->can_create_delta);
47✔
3477

3478
   rt_nexus_t *n = split_nexus(m, s, offset, count);
47✔
3479
   for (; count > 0; n = n->chain) {
145✔
3480
      count -= n->width;
51✔
3481
      assert(count >= 0);
51✔
3482

3483
      n->flags &= ~NET_F_FORCED;
51✔
3484

3485
      rt_source_t *src = get_pseudo_source(m, n, SOURCE_FORCING);
51✔
3486
      src->disconnected = 1;
51✔
3487

3488
      if (!src->pseudoqueued) {
51✔
3489
         deltaq_insert_pseudo_source(m, src);
51✔
3490
         src->pseudoqueued = 1;
51✔
3491
      }
3492
   }
3493
}
47✔
3494

3495
void deposit_signal(rt_model_t *m, rt_signal_t *s, const void *values,
2,420✔
3496
                    int offset, size_t count)
3497
{
3498
   RT_LOCK(s->lock);
2,420✔
3499

3500
   TRACE("deposit signal %s+%d to %s", istr(tree_ident(s->where)),
2,420✔
3501
         offset, fmt_values(values, count));
3502

3503
   assert(m->can_create_delta);
2,420✔
3504

3505
   rt_nexus_t *n = split_nexus(m, s, offset, count);
2,420✔
3506
   const char *vptr = values;
2,420✔
3507
   for (; count > 0; n = n->chain) {
7,304✔
3508
      count -= n->width;
2,464✔
3509
      assert(count >= 0);
2,464✔
3510

3511
      rt_source_t *src = get_pseudo_source(m, n, SOURCE_DEPOSIT);
2,464✔
3512
      copy_value_ptr(n, &(src->u.pseudo.value), vptr);
2,464✔
3513
      src->disconnected = 0;
2,464✔
3514

3515
      if (!src->pseudoqueued) {
2,464✔
3516
         deltaq_insert_pseudo_source(m, src);
2,452✔
3517
         src->pseudoqueued = 1;
2,452✔
3518
      }
3519

3520
      vptr += n->width * n->size;
2,464✔
3521
   }
3522
}
2,420✔
3523

3524
bool model_can_create_delta(rt_model_t *m)
1,900✔
3525
{
3526
   return m->can_create_delta;
1,900✔
3527
}
3528

3529
int64_t model_now(rt_model_t *m, unsigned *deltas)
4,005,876✔
3530
{
3531
   if (deltas != NULL)
4,005,876✔
3532
      *deltas = MAX(m->iteration, 0);
134✔
3533

3534
   return m->now;
4,005,876✔
3535
}
3536

3537
int64_t model_next_time(rt_model_t *m)
4✔
3538
{
3539
   if (heap_size(m->eventq_heap) == 0)
4✔
3540
      return TIME_HIGH;
3541
   else
3542
      return heap_min_key(m->eventq_heap);
4✔
3543
}
3544

3545
void model_stop(rt_model_t *m)
8✔
3546
{
3547
   relaxed_store(&m->force_stop, true);
8✔
3548
}
8✔
3549

3550
void model_set_global_cb(rt_model_t *m, rt_event_t event, rt_event_fn_t fn,
2,216✔
3551
                         void *user)
3552
{
3553
   assert(event < RT_LAST_EVENT);
2,216✔
3554

3555
   // Add to end of list so callbacks are called in registration order
3556
   rt_callback_t **p = &(m->global_cbs[event]);
2,216✔
3557
   for (; *p; p = &(*p)->next);
2,216✔
3558

3559
   rt_callback_t *cb = xcalloc(sizeof(rt_callback_t));
2,216✔
3560
   cb->next = NULL;
2,216✔
3561
   cb->fn   = fn;
2,216✔
3562
   cb->user = user;
2,216✔
3563

3564
   *p = cb;
2,216✔
3565
}
2,216✔
3566

3567
void model_set_timeout_cb(rt_model_t *m, uint64_t when, rt_event_fn_t fn,
104✔
3568
                          void *user)
3569
{
3570
   rt_callback_t *cb = xcalloc(sizeof(rt_callback_t));
104✔
3571
   cb->next = NULL;
104✔
3572
   cb->fn   = fn;
104✔
3573
   cb->user = user;
104✔
3574

3575
   assert(when > m->now);   // TODO: delta timeouts?
104✔
3576

3577
   void *e = tag_pointer(cb, EVENT_TIMEOUT);
104✔
3578
   heap_insert(m->eventq_heap, when, e);
104✔
3579
}
104✔
3580

3581
rt_watch_t *watch_new(rt_model_t *m, sig_event_fn_t fn, void *user,
1,433✔
3582
                      watch_kind_t kind, unsigned slots)
3583
{
3584
   rt_watch_t *w = xcalloc_flex(sizeof(rt_watch_t), slots,
1,433✔
3585
                                sizeof(rt_signal_t *));
3586
   w->fn        = fn;
1,433✔
3587
   w->chain_all = m->watches;
1,433✔
3588
   w->user_data = user;
1,433✔
3589
   w->num_slots = slots;
1,433✔
3590

3591
   w->wakeable.kind      = W_WATCH;
1,433✔
3592
   w->wakeable.postponed = (kind == WATCH_POSTPONED);
1,433✔
3593
   w->wakeable.pending   = false;
1,433✔
3594
   w->wakeable.delayed   = false;
1,433✔
3595

3596
   m->watches = w;
1,433✔
3597

3598
   return w;
1,433✔
3599
}
3600

3601
void watch_free(rt_model_t *m, rt_watch_t *w)
8✔
3602
{
3603
   assert(!w->wakeable.zombie);
8✔
3604

3605
   for (int i = 0; i < w->next_slot; i++) {
16✔
3606
      rt_nexus_t *n = &(w->signals[i]->nexus);
8✔
3607
      for (int j = 0; j < w->signals[i]->n_nexus; j++, n = n->chain)
16✔
3608
         clear_event(m, n, &(w->wakeable));
8✔
3609
   }
3610

3611
   rt_watch_t **last = &m->watches;
8✔
3612
   for (rt_watch_t *it = *last; it;
12✔
3613
        last = &(it->chain_all), it = it->chain_all) {
4✔
3614
      if (it == w) {
12✔
3615
         *last = it->chain_all;
8✔
3616
         if (w->wakeable.pending)
8✔
3617
            w->wakeable.zombie = true;   // Will be freed in callback
4✔
3618
         else
3619
            free(w);
4✔
3620
         return;
8✔
3621
      }
3622
   }
3623

3624
   should_not_reach_here();
3625
}
3626

3627
rt_watch_t *model_set_event_cb(rt_model_t *m, rt_signal_t *s, rt_watch_t *w)
1,437✔
3628
{
3629
   assert(!w->wakeable.zombie);
1,437✔
3630
   assert(w->next_slot < w->num_slots);
1,437✔
3631

3632
   w->signals[w->next_slot++] = s;
1,437✔
3633

3634
   rt_nexus_t *n = &(s->nexus);
1,437✔
3635
   for (int i = 0; i < s->n_nexus; i++, n = n->chain)
2,978✔
3636
      sched_event(m, n, &(w->wakeable));
1,541✔
3637

3638
   return w;
1,437✔
3639
}
3640

UNCOV
3641
static void handle_interrupt_cb(jit_t *j, void *ctx)
×
3642
{
3643
   rt_proc_t *proc = get_active_proc();
×
3644

3645
   if (proc != NULL)
×
UNCOV
3646
      jit_msg(NULL, DIAG_FATAL, "interrupted in process %s", istr(proc->name));
×
3647
   else {
3648
      diag_t *d = diag_new(DIAG_FATAL, NULL);
×
UNCOV
3649
      diag_printf(d, "interrupted");
×
3650
      diag_emit(d);
×
3651
   }
3652
}
×
3653

3654
void model_interrupt(rt_model_t *m)
×
3655
{
3656
   model_stop(m);
×
UNCOV
3657
   jit_interrupt(m->jit, handle_interrupt_cb, m);
×
3658
}
×
3659

3660
int model_exit_status(rt_model_t *m)
5,558✔
3661
{
3662
   int status;
5,558✔
3663
   if (jit_exit_status(m->jit, &status))
5,558✔
3664
      return status;
429✔
3665
   else if (m->stop_delta > 0 && m->iteration == m->stop_delta)
5,129✔
3666
      return EXIT_FAILURE;
3667
   else
3668
      return get_vhdl_assert_exit_status();
5,121✔
3669
}
3670

3671
static bool nexus_active(rt_model_t *m, rt_nexus_t *nexus)
580✔
3672
{
3673
   if (nexus->n_sources > 0) {
580✔
3674
      for (rt_source_t *s = &(nexus->sources); s; s = s->chain_input) {
708✔
3675
         if (s->tag == SOURCE_PORT) {
560✔
3676
            rt_conv_func_t *cf = s->u.port.conv_func;
164✔
3677
            if (cf == NULL) {
164✔
3678
               RT_LOCK(s->u.port.input->signal->lock);
88✔
3679
               if (nexus_active(m, s->u.port.input))
88✔
3680
                  return true;
3681
            }
3682
            else {
3683
               for (int i = 0; i < cf->ninputs; i++) {
84✔
3684
                  if (nexus_active(m, cf->inputs[i].nexus))
80✔
3685
                     return true;
3686
               }
3687
            }
3688
         }
3689
         else if (s->tag == SOURCE_DRIVER
396✔
3690
                  && s->u.driver.waveforms.when == m->now) {
368✔
3691
            if (nexus->active_delta == m->iteration)
272✔
3692
               return true;
3693
            else if (nexus->active_delta == m->iteration + 1 && s->was_active)
36✔
3694
               return true;
3695
         }
3696
      }
3697
   }
3698

3699
   return false;
3700
}
3701

3702
static uint64_t nexus_last_active(rt_model_t *m, rt_nexus_t *nexus)
88✔
3703
{
3704
   int64_t last = TIME_HIGH;
88✔
3705

3706
   if (nexus->n_sources > 0) {
88✔
3707
      for (rt_source_t *s = &(nexus->sources); s; s = s->chain_input) {
136✔
3708
          if (s->tag == SOURCE_PORT) {
68✔
3709
            rt_conv_func_t *cf = s->u.port.conv_func;
16✔
3710
            if (cf == NULL) {
16✔
UNCOV
3711
               RT_LOCK(s->u.port.input->signal->lock);
×
UNCOV
3712
               last = MIN(last, nexus_last_active(m, s->u.port.input));
×
3713
            }
3714
            else {
3715
               for (int i = 0; i < cf->ninputs; i++) {
48✔
3716
                  RT_LOCK(cf->inputs[i]->signal->lock);
32✔
3717
                  last = MIN(last, nexus_last_active(m, cf->inputs[i].nexus));
32✔
3718
               }
3719
            }
3720
         }
3721
         else if (s->tag == SOURCE_DRIVER
52✔
3722
                  && s->u.driver.waveforms.when <= m->now)
52✔
3723
            last = MIN(last, m->now - s->u.driver.waveforms.when);
48✔
3724
      }
3725
   }
3726

3727
   return last;
88✔
3728
}
3729

3730
void get_forcing_value(rt_signal_t *s, uint8_t *value)
3✔
3731
{
3732
   uint8_t *p = value;
3✔
3733
   rt_nexus_t *n = &(s->nexus);
3✔
3734
   for (int i = 0; i < s->n_nexus; i++) {
6✔
3735
      assert(n->n_sources > 0);
3✔
3736
      rt_source_t *s = NULL;
3✔
3737
      for (s = &(n->sources); s; s = s->chain_input) {
6✔
3738
         if (s->tag == SOURCE_FORCING)
6✔
3739
            break;
3740
      }
3741
      assert(s != NULL);
3✔
3742

3743
      memcpy(p, s->u.pseudo.value.bytes, n->width * n->size);
3✔
3744
      p += n->width * n->size;
3✔
3745
   }
3746
   assert(p == value + s->shared.size);
3✔
3747
}
3✔
3748

3749
int32_t *get_cover_counter(rt_model_t *m, int32_t tag, int count)
3,056✔
3750
{
3751
   assert(tag >= 0);
3,056✔
3752
   assert(m->cover != NULL);
3,056✔
3753
   return jit_get_cover_mem(m->jit, tag + count) + tag;
3,056✔
3754
}
3755

3756
static rt_trigger_t *new_trigger(rt_model_t *m, trigger_kind_t kind,
454✔
3757
                                 uint64_t hash, jit_handle_t handle,
3758
                                 unsigned nargs, const jit_scalar_t *args)
3759
{
3760
   rt_trigger_t **bucket = &(m->triggertab[hash % TRIGGER_TAB_SIZE]);
454✔
3761

3762
   for (rt_trigger_t *exist = *bucket; exist; exist = exist->chain) {
454✔
3763
      bool hit = exist->handle == handle
304✔
3764
         && exist->nargs == nargs
152✔
3765
         && exist->kind == kind;
304✔
3766

3767
      for (int i = 0; hit && i < nargs; i++)
580✔
3768
         hit &= (exist->args[i].integer == args[i].integer);
428✔
3769

3770
      if (hit)
152✔
3771
         return exist;
152✔
3772
   }
3773

3774
   const size_t argsz = nargs * sizeof(jit_scalar_t);
302✔
3775

3776
   rt_trigger_t *t = static_alloc(m, sizeof(rt_trigger_t) + argsz);
302✔
3777
   t->handle = handle;
302✔
3778
   t->nargs  = nargs;
302✔
3779
   t->when   = TIME_HIGH;
302✔
3780
   t->kind   = kind;
302✔
3781
   t->chain  = *bucket;
302✔
3782
   memcpy(t->args, args, argsz);
302✔
3783

3784
   return (*bucket = t);
302✔
3785
}
3786

3787
void call_with_model(rt_model_t *m, void (*cb)(void *), void *arg)
4,856✔
3788
{
3789
   MODEL_ENTRY(m);
9,707✔
3790
   (*cb)(arg);
4,856✔
3791
}
4,851✔
3792

3793
void get_instance_name(rt_scope_t *s, text_buf_t *tb)
3,004✔
3794
{
3795
   if (s->kind == SCOPE_ROOT)
3,004✔
3796
      return;
3797

3798
   tree_t hier = tree_decl(s->where, 0);
2,450✔
3799
   assert(tree_kind(hier) == T_HIER);
2,450✔
3800

3801
   switch (tree_subkind(hier)) {
2,450✔
3802
   case T_ARCH:
690✔
3803
      {
3804
         tree_t unit = tree_ref(hier);
690✔
3805

3806
         get_instance_name(s->parent, tb);
690✔
3807

3808
         if (s->parent->kind != SCOPE_ROOT) {
690✔
3809
            tree_t hier2 = tree_decl(s->parent->where, 0);
136✔
3810
            assert(tree_kind(hier2) == T_HIER);
136✔
3811

3812
            if (tree_subkind(hier2) != T_COMPONENT) {
136✔
3813
               tb_append(tb, ':');
26✔
3814
               tb_istr(tb, tree_ident(s->where));
26✔
3815
            }
3816

3817
            tb_append(tb, '@');
136✔
3818
         }
3819
         else
3820
            tb_append(tb, ':');
554✔
3821

3822
         const char *arch = strchr(istr(tree_ident(unit)), '-') + 1;
690✔
3823
         tb_printf(tb, "%s(%s)", istr(tree_ident2(unit)), arch);
690✔
3824
      }
3825
      break;
690✔
3826

3827
   case T_BLOCK:
1,540✔
3828
   case T_FOR_GENERATE:
3829
   case T_IF_GENERATE:
3830
   case T_CASE_GENERATE:
3831
      get_instance_name(s->parent, tb);
1,540✔
3832
      tb_append(tb, ':');
1,540✔
3833
      tb_printf(tb, "%s", istr(tree_ident(s->where)));
1,540✔
3834
      break;
1,540✔
3835

3836
   case T_COMPONENT:
220✔
3837
      get_instance_name(s->parent, tb);
220✔
3838
      tb_printf(tb, ":%s", istr(tree_ident(s->where)));
220✔
3839
      break;
220✔
3840

UNCOV
3841
   default:
×
3842
      should_not_reach_here();
3843
   }
3844

3845
   tb_downcase(tb);
2,450✔
3846
}
3847

3848
void get_path_name(rt_scope_t *s, text_buf_t *tb)
94,576✔
3849
{
3850
   if (s->kind == SCOPE_ROOT)
94,576✔
3851
      return;
3852

3853
   get_path_name(s->parent, tb);
81,595✔
3854

3855
   if (s->parent->kind != SCOPE_ROOT) {
81,595✔
3856
      tree_t hier = tree_decl(s->parent->where, 0);
68,614✔
3857
      assert(tree_kind(hier) == T_HIER);
68,614✔
3858

3859
      if (tree_subkind(hier) == T_COMPONENT)
68,614✔
3860
         return;   // Skip implicit block for components
3861
   }
3862

3863
   tb_append(tb, ':');
76,501✔
3864
   tb_istr(tb, tree_ident(s->where));
76,501✔
3865
   tb_downcase(tb);
76,501✔
3866
}
3867

3868
////////////////////////////////////////////////////////////////////////////////
3869
// Entry points from compiled code
3870

3871
sig_shared_t *x_init_signal(int64_t count, uint32_t size, jit_scalar_t value,
44,735✔
3872
                            bool scalar, sig_flags_t flags, tree_t where,
3873
                            int32_t offset)
3874
{
3875
   TRACE("init signal %s count=%"PRIi64" size=%d value=%s flags=%x offset=%d",
44,735✔
3876
         istr(tree_ident(where)), count, size,
3877
         fmt_jit_value(value, scalar, size * count), flags, offset);
3878

3879
   rt_model_t *m = get_model();
44,735✔
3880

3881
   if (count > INT32_MAX)
44,735✔
3882
      jit_msg(tree_loc(where), DIAG_FATAL, "signal %s has %"PRIi64
1✔
3883
              " sub-elements which is greater than the maximum supported %d",
3884
              istr(tree_ident(where)), count, INT32_MAX);
3885

3886
   const size_t datasz = MAX(3 * count * size, 8);
44,734✔
3887
   rt_signal_t *s = static_alloc(m, sizeof(rt_signal_t) + datasz);
44,734✔
3888
   setup_signal(m, s, where, count, size, flags, offset);
44,734✔
3889

3890
   // The driving value area is also used to save the default value
3891
   void *driving = s->shared.data + 2*s->shared.size;
44,734✔
3892

3893
   if (scalar) {
44,734✔
3894
#define COPY_SCALAR(type) do {                  \
3895
         type *pi = (type *)s->shared.data;     \
3896
         type *pd = (type *)driving;            \
3897
         for (int i = 0; i < count; i++)        \
3898
            pi[i] = pd[i] = value.integer;      \
3899
      } while (0)
3900

3901
      FOR_ALL_SIZES(size, COPY_SCALAR);
3,392,231✔
3902
   }
3903
   else {
3904
      memcpy(s->shared.data, value.pointer, s->shared.size);
1,874✔
3905
      memcpy(driving, value.pointer, s->shared.size);
1,874✔
3906
   }
3907

3908
   return &(s->shared);
44,734✔
3909
}
3910

3911
void x_drive_signal(sig_shared_t *ss, uint32_t offset, int32_t count)
18,622✔
3912
{
3913
   rt_signal_t *s = container_of(ss, rt_signal_t, shared);
18,622✔
3914
   RT_LOCK(s->lock);
18,622✔
3915

3916
   TRACE("drive signal %s+%d count=%d", istr(tree_ident(s->where)),
18,622✔
3917
         offset, count);
3918

3919
   rt_model_t *m = get_model();
18,622✔
3920
   rt_proc_t *proc = get_active_proc();
18,622✔
3921
   rt_nexus_t *n = split_nexus(m, s, offset, count);
18,622✔
3922
   for (; count > 0; n = n->chain) {
56,123✔
3923
      rt_source_t *s;
18,887✔
3924
      for (s = &(n->sources); s; s = s->chain_input) {
37,762✔
3925
         if (s->tag == SOURCE_DRIVER && s->u.driver.proc == proc)
18,995✔
3926
            break;
3927
      }
3928

3929
      if (s == NULL) {
18,887✔
3930
         s = add_source(m, n, SOURCE_DRIVER);
18,767✔
3931
         s->u.driver.waveforms.value = alloc_value(m, n);
18,759✔
3932
         s->u.driver.proc = proc;
18,759✔
3933
      }
3934

3935
      count -= n->width;
18,879✔
3936
      assert(count >= 0);
18,879✔
3937
   }
3938
}
18,614✔
3939

3940
void x_sched_process(int64_t delay)
430,390✔
3941
{
3942
   rt_proc_t *proc = get_active_proc();
430,390✔
3943
   if (proc == NULL)
430,390✔
3944
      return;    // May be called during constant folding
3945

3946
   TRACE("schedule process %s delay=%s", istr(proc->name), trace_time(delay));
430,389✔
3947

3948
   check_delay(delay);
430,389✔
3949
   deltaq_insert_proc(get_model(), delay, proc);
430,385✔
3950
}
3951

3952
void x_sched_waveform_s(sig_shared_t *ss, uint32_t offset, uint64_t scalar,
4,527,819✔
3953
                        int64_t after, int64_t reject)
3954
{
3955
   rt_signal_t *s = container_of(ss, rt_signal_t, shared);
4,527,819✔
3956
   RT_LOCK(s->lock);
4,527,819✔
3957

3958
   TRACE("_sched_waveform_s %s+%d value=%"PRIi64" after=%s reject=%s",
4,527,819✔
3959
         istr(tree_ident(s->where)), offset, scalar, trace_time(after),
3960
         trace_time(reject));
3961

3962
   rt_proc_t *proc = get_active_proc();
4,527,819✔
3963

3964
   check_delay(after);
4,527,819✔
3965
   check_postponed(after, proc);
4,527,815✔
3966
   check_reject_limit(s, after, reject);
4,527,815✔
3967

3968
   rt_model_t *m = get_model();
4,527,811✔
3969
   rt_nexus_t *n = split_nexus(m, s, offset, 1);
4,527,811✔
3970

3971
   sched_driver(m, n, after, reject, &scalar, proc);
4,527,811✔
3972
}
4,527,811✔
3973

3974
void x_sched_waveform(sig_shared_t *ss, uint32_t offset, void *values,
416,921✔
3975
                      int32_t count, int64_t after, int64_t reject)
3976
{
3977
   rt_signal_t *s = container_of(ss, rt_signal_t, shared);
416,921✔
3978
   RT_LOCK(s->lock);
416,921✔
3979

3980
   TRACE("_sched_waveform %s+%d value=%s count=%d after=%s reject=%s",
416,921✔
3981
         istr(tree_ident(s->where)), offset,
3982
         fmt_values(values, count * s->nexus.size),
3983
         count, trace_time(after), trace_time(reject));
3984

3985
   rt_proc_t *proc = get_active_proc();
416,921✔
3986

3987
   check_delay(after);
416,921✔
3988
   check_postponed(after, proc);
416,921✔
3989
   check_reject_limit(s, after, reject);
416,921✔
3990

3991
   rt_model_t *m = get_model();
416,921✔
3992
   rt_nexus_t *n = split_nexus(m, s, offset, count);
416,921✔
3993
   char *vptr = values;
416,921✔
3994
   for (; count > 0; n = n->chain) {
1,251,388✔
3995
      count -= n->width;
417,546✔
3996
      assert(count >= 0);
417,546✔
3997

3998
      sched_driver(m, n, after, reject, vptr, proc);
417,546✔
3999
      vptr += n->width * n->size;
417,546✔
4000
   }
4001
}
416,921✔
4002

4003
void x_transfer_signal(sig_shared_t *target_ss, uint32_t toffset,
1,554✔
4004
                       sig_shared_t *source_ss, uint32_t soffset,
4005
                       int32_t count, int64_t after, int64_t reject)
4006
{
4007
   rt_signal_t *target = container_of(target_ss, rt_signal_t, shared);
1,554✔
4008
   rt_signal_t *source = container_of(source_ss, rt_signal_t, shared);
1,554✔
4009

4010
   TRACE("transfer signal %s+%d to %s+%d count=%d",
1,554✔
4011
         istr(tree_ident(source->where)), soffset,
4012
         istr(tree_ident(target->where)), toffset, count);
4013

4014
   rt_proc_t *proc = get_active_proc();
1,554✔
4015

4016
   check_delay(after);
1,554✔
4017
   check_postponed(after, proc);
1,554✔
4018
   check_reject_limit(target, after, reject);
1,554✔
4019

4020
   rt_model_t *m = get_model();
1,554✔
4021

4022
   rt_transfer_t *t = static_alloc(m, sizeof(rt_transfer_t));
1,554✔
4023
   t->proc   = proc;
1,554✔
4024
   t->target = split_nexus(m, target, toffset, count);
1,554✔
4025
   t->source = split_nexus(m, source, soffset, count);
1,554✔
4026
   t->count  = count;
1,554✔
4027
   t->after  = after;
1,554✔
4028
   t->reject = reject;
1,554✔
4029

4030
   t->wakeable.kind      = W_TRANSFER;
1,554✔
4031
   t->wakeable.postponed = false;
1,554✔
4032
   t->wakeable.pending   = false;
1,554✔
4033
   t->wakeable.delayed   = false;
1,554✔
4034

4035
   for (rt_nexus_t *n = t->source; count > 0; n = n->chain) {
3,120✔
4036
      sched_event(m, n, &(t->wakeable));
1,566✔
4037

4038
      if (!t->wakeable.pending) {
1,566✔
4039
         // Schedule initial update immediately
4040
         deferq_do(&m->delta_procq, async_transfer_signal, t);
1,550✔
4041
         t->wakeable.pending = true;
1,550✔
4042
      }
4043

4044
      count -= n->width;
1,566✔
4045
      assert(count >= 0);
1,566✔
4046
   }
4047
}
1,554✔
4048

4049
int32_t x_test_net_event(sig_shared_t *ss, uint32_t offset, int32_t count)
628✔
4050
{
4051
   rt_signal_t *s = container_of(ss, rt_signal_t, shared);
628✔
4052
   RT_LOCK(s->lock);
628✔
4053

4054
   TRACE("_test_net_event %s offset=%d count=%d",
628✔
4055
         istr(tree_ident(s->where)), offset, count);
4056

4057
   int32_t result = 0;
628✔
4058
   rt_model_t *m = get_model();
628✔
4059
   rt_nexus_t *n = split_nexus(m, s, offset, count);
628✔
4060
   for (; count > 0; n = n->chain) {
1,688✔
4061
      if (n->last_event == m->now && n->event_delta == m->iteration) {
732✔
4062
         result = 1;
4063
         break;
4064
      }
4065

4066
      count -= n->width;
432✔
4067
      assert(count >= 0);
432✔
4068
   }
4069

4070
   if (ss->size == s->nexus.size) {
628✔
4071
      assert(!(ss->flags & SIG_F_CACHE_EVENT));   // Should have taken fast-path
447✔
4072
      ss->flags |= SIG_F_CACHE_EVENT | (result ? SIG_F_EVENT_FLAG : 0);
447✔
4073
      s->nexus.flags |= NET_F_CACHE_EVENT;
447✔
4074
      APUSH(m->eventsigs, s);
447✔
4075
   }
4076

4077
   return result;
628✔
4078
}
4079

4080
int32_t x_test_net_active(sig_shared_t *ss, uint32_t offset, int32_t count)
352✔
4081
{
4082
   rt_signal_t *s = container_of(ss, rt_signal_t, shared);
352✔
4083
   RT_LOCK(s->lock);
352✔
4084

4085
   TRACE("_test_net_active %s offset=%d count=%d",
352✔
4086
         istr(tree_ident(s->where)), offset, count);
4087

4088
   rt_model_t *m = get_model();
352✔
4089
   rt_nexus_t *n = split_nexus(m, s, offset, count);
352✔
4090
   for (; count > 0; n = n->chain) {
872✔
4091
      if (nexus_active(m, n))
412✔
4092
         return 1;
4093

4094
      count -= n->width;
168✔
4095
      assert(count >= 0);
168✔
4096
   }
4097

4098
   return 0;
4099
}
4100

4101
void x_sched_event(sig_shared_t *ss, uint32_t offset, int32_t count)
68,570✔
4102
{
4103
   rt_signal_t *s = container_of(ss, rt_signal_t, shared);
68,570✔
4104
   RT_LOCK(s->lock);
68,570✔
4105

4106
   TRACE("_sched_event %s+%d count=%d", istr(tree_ident(s->where)),
68,570✔
4107
         offset, count);
4108

4109
   rt_wakeable_t *obj = get_active_wakeable();
68,570✔
4110

4111
   rt_model_t *m = get_model();
68,570✔
4112
   rt_nexus_t *n = split_nexus(m, s, offset, count);
68,570✔
4113
   for (; count > 0; n = n->chain) {
205,783✔
4114
      sched_event(m, n, obj);
68,643✔
4115

4116
      count -= n->width;
68,643✔
4117
      assert(count >= 0);
68,643✔
4118
   }
4119
}
68,570✔
4120

4121
void x_clear_event(sig_shared_t *ss, uint32_t offset, int32_t count)
60,592✔
4122
{
4123
   rt_signal_t *s = container_of(ss, rt_signal_t, shared);
60,592✔
4124
   RT_LOCK(s->lock);
60,592✔
4125

4126
   TRACE("clear event %s+%d count=%d",
60,592✔
4127
         istr(tree_ident(s->where)), offset, count);
4128

4129
   rt_model_t *m = get_model();
60,592✔
4130
   rt_proc_t *proc = get_active_proc();
60,592✔
4131
   rt_nexus_t *n = split_nexus(m, s, offset, count);
60,592✔
4132
   for (; count > 0; n = n->chain) {
181,816✔
4133
      clear_event(m, n, &(proc->wakeable));
60,632✔
4134

4135
      count -= n->width;
60,632✔
4136
      assert(count >= 0);
60,632✔
4137
   }
4138
}
60,592✔
4139

4140
void x_enter_state(int32_t state, bool strong)
4,388✔
4141
{
4142
   rt_wakeable_t *obj = get_active_wakeable();
4,388✔
4143
   assert(obj->kind == W_PROPERTY);
4,388✔
4144

4145
   rt_prop_t *prop = container_of(obj, rt_prop_t, wakeable);
4,388✔
4146
   mask_set(&prop->newstate, state);
4,388✔
4147
   prop->strong |= strong;
4,388✔
4148
}
4,388✔
4149

4150
void x_alias_signal(sig_shared_t *ss, tree_t where)
7,033✔
4151
{
4152
   rt_signal_t *s = container_of(ss, rt_signal_t, shared);
7,033✔
4153
   RT_LOCK(s->lock);
7,033✔
4154

4155
   TRACE("alias signal %s to %s", istr(tree_ident(s->where)),
7,033✔
4156
         istr(tree_ident(where)));
4157

4158
   rt_alias_t *a = xcalloc(sizeof(rt_alias_t));
7,033✔
4159
   a->where  = where;
7,033✔
4160
   a->signal = s;
7,033✔
4161

4162
   model_thread_t *thread = model_thread(get_model());
7,033✔
4163
   APUSH(thread->active_scope->aliases, a);
7,033✔
4164
}
7,033✔
4165

4166
int64_t x_last_event(sig_shared_t *ss, uint32_t offset, int32_t count)
76✔
4167
{
4168
   rt_signal_t *s = container_of(ss, rt_signal_t, shared);
76✔
4169
   RT_LOCK(s->lock);
76✔
4170

4171
   TRACE("_last_event %s offset=%d count=%d",
76✔
4172
         istr(tree_ident(s->where)), offset, count);
4173

4174
   int64_t last = TIME_HIGH;
76✔
4175

4176
   rt_model_t *m = get_model();
76✔
4177
   rt_nexus_t *n = split_nexus(m, s, offset, count);
76✔
4178
   for (; count > 0; n = n->chain) {
240✔
4179
      if (n->last_event <= m->now)
88✔
4180
         last = MIN(last, m->now - n->last_event);
20✔
4181

4182
      count -= n->width;
88✔
4183
      assert(count >= 0);
88✔
4184
   }
4185

4186
   return last;
76✔
4187
}
4188

4189
int64_t x_last_active(sig_shared_t *ss, uint32_t offset, int32_t count)
48✔
4190
{
4191
   rt_signal_t *s = container_of(ss, rt_signal_t, shared);
48✔
4192
   RT_LOCK(s->lock);
48✔
4193

4194
   TRACE("_last_active %s offset=%d count=%d",
48✔
4195
         istr(tree_ident(s->where)), offset, count);
4196

4197
   int64_t last = TIME_HIGH;
48✔
4198

4199
   rt_model_t *m = get_model();
48✔
4200
   rt_nexus_t *n = split_nexus(m, s, offset, count);
48✔
4201
   for (; count > 0; n = n->chain) {
152✔
4202
      last = MIN(last, nexus_last_active(m, n));
56✔
4203

4204
      count -= n->width;
56✔
4205
      assert(count >= 0);
56✔
4206
   }
4207

4208
   return last;
48✔
4209
}
4210

4211
void x_map_signal(sig_shared_t *src_ss, uint32_t src_offset,
9,824✔
4212
                  sig_shared_t *dst_ss, uint32_t dst_offset, uint32_t count)
4213
{
4214
   rt_signal_t *src_s = container_of(src_ss, rt_signal_t, shared);
9,824✔
4215
   RT_LOCK(src_s->lock);
9,824✔
4216

4217
   rt_signal_t *dst_s = container_of(dst_ss, rt_signal_t, shared);
9,824✔
4218
   RT_LOCK(dst_s->lock);
9,824✔
4219

4220
   TRACE("map signal %s+%d to %s+%d count %d",
9,824✔
4221
         istr(tree_ident(src_s->where)), src_offset,
4222
         istr(tree_ident(dst_s->where)), dst_offset, count);
4223

4224
   assert(src_s != dst_s);
9,824✔
4225

4226
   rt_model_t *m = get_model();
9,824✔
4227

4228
   rt_nexus_t *src_n = split_nexus(m, src_s, src_offset, count);
9,824✔
4229
   rt_nexus_t *dst_n = split_nexus(m, dst_s, dst_offset, count);
9,824✔
4230

4231
   while (count > 0) {
19,623✔
4232
      if (src_n->width > dst_n->width)
9,799✔
4233
         clone_nexus(m, src_n, dst_n->width);
10✔
4234
      else if (src_n->width < dst_n->width)
9,789✔
UNCOV
4235
         clone_nexus(m, dst_n, src_n->width);
×
4236

4237
      assert(src_n->width == dst_n->width);
9,799✔
4238
      assert(src_n->size == dst_n->size);
9,799✔
4239

4240
      // Effective value updates must propagate through ports
4241
      src_n->flags |= (dst_n->flags & NET_F_EFFECTIVE);
9,799✔
4242
      dst_n->flags |= (src_n->flags & NET_F_EFFECTIVE);
9,799✔
4243

4244
      rt_source_t *port = add_source(m, dst_n, SOURCE_PORT);
9,799✔
4245
      port->u.port.input = src_n;
9,799✔
4246

4247
      port->chain_output = src_n->outputs;
9,799✔
4248
      src_n->outputs = port;
9,799✔
4249

4250
      count -= src_n->width;
9,799✔
4251
      assert(count >= 0);
9,799✔
4252

4253
      src_n = src_n->chain;
9,799✔
4254
      dst_n = dst_n->chain;
9,799✔
4255
   }
4256
}
9,824✔
4257

4258
void x_map_const(sig_shared_t *ss, uint32_t offset,
711✔
4259
                 const uint8_t *values, uint32_t count)
4260
{
4261
   rt_signal_t *s = container_of(ss, rt_signal_t, shared);
711✔
4262
   RT_LOCK(s->lock);
711✔
4263

4264
   TRACE("map const %s to %s+%d count %d", fmt_values(values, count),
711✔
4265
         istr(tree_ident(s->where)), offset, count);
4266

4267
   rt_model_t *m = get_model();
711✔
4268
   rt_nexus_t *n = split_nexus(m, s, offset, count);
711✔
4269
   for (; count > 0; n = n->chain) {
2,128✔
4270
      const size_t valuesz = n->size * n->width;
706✔
4271
      memcpy(nexus_effective(n), values, valuesz);
706✔
4272
      memcpy(nexus_initial(n), values, valuesz);
706✔
4273

4274
      n->flags |= NET_F_HAS_INITIAL;
706✔
4275
      values += valuesz;
706✔
4276

4277
      count -= n->width;
706✔
4278
      assert(count >= 0);
706✔
4279
   }
4280
}
711✔
4281

4282
void x_map_implicit(sig_shared_t *src_ss, uint32_t src_offset,
110✔
4283
                    sig_shared_t *dst_ss, uint32_t dst_offset,
4284
                    uint32_t count)
4285
{
4286
   rt_signal_t *src_s = container_of(src_ss, rt_signal_t, shared);
110✔
4287
   RT_LOCK(src_s->lock);
110✔
4288

4289
   rt_signal_t *dst_s = container_of(dst_ss, rt_signal_t, shared);
110✔
4290
   RT_LOCK(dst_s->lock);
110✔
4291

4292
   TRACE("map implicit signal %s+%d to %s+%d count %d",
110✔
4293
         istr(tree_ident(src_s->where)), src_offset,
4294
         istr(tree_ident(dst_s->where)), dst_offset, count);
4295

4296
   assert(src_s != dst_s);
110✔
4297
   assert(dst_offset == 0);
110✔
4298

4299
   rt_model_t *m = get_model();
110✔
4300
   rt_nexus_t *n = split_nexus(m, src_s, src_offset, count);
110✔
4301
   for (; count > 0; n = n->chain) {
330✔
4302
      count -= n->width;
110✔
4303
      assert(count >= 0);
110✔
4304

4305
      rt_source_t *src = add_source(m, &(dst_s->nexus), SOURCE_IMPLICIT);
110✔
4306
      src->u.port.input = n;
110✔
4307

4308
      src->chain_output = n->outputs;
110✔
4309
      n->outputs = src;
110✔
4310

4311
      n->flags |= NET_F_EFFECTIVE;   // Update outputs when active
110✔
4312
      n->flags &= ~NET_F_FAST_DRIVER;
110✔
4313
   }
4314
}
110✔
4315

4316
void x_push_scope(tree_t where, int32_t size, rt_scope_kind_t kind)
8,619✔
4317
{
4318
   TRACE("push scope %s size=%d kind=%d", istr(tree_ident(where)), size, kind);
8,619✔
4319

4320
   rt_model_t *m = get_model();
8,619✔
4321
   model_thread_t *thread = model_thread(m);
8,619✔
4322

4323
   ident_t name;
8,619✔
4324
   if (thread->active_scope && thread->active_scope->kind == SCOPE_ARRAY)
8,619✔
4325
      name = ident_sprintf("%s(%d)", istr(thread->active_scope->name),
5,093✔
4326
                           thread->active_scope->children.count);
4327
   else if (thread->active_scope && thread->active_scope->kind == SCOPE_RECORD)
3,526✔
4328
      name = ident_prefix(thread->active_scope->name, tree_ident(where), '.');
1,248✔
4329
   else
4330
      name = tree_ident(where);
2,278✔
4331

4332
   rt_scope_t *s = xcalloc(sizeof(rt_scope_t));
8,619✔
4333
   s->where    = where;
8,619✔
4334
   s->name     = name;
8,619✔
4335
   s->kind     = kind;
8,619✔
4336
   s->parent   = thread->active_scope;
8,619✔
4337
   s->size     = size;
8,619✔
4338
   s->privdata = MPTR_INVALID;
8,619✔
4339

4340
   if (kind != SCOPE_PACKAGE) {
8,619✔
4341
      type_t type = tree_type(where);
8,547✔
4342
      assert(type_is_composite(type));
8,547✔
4343
      if (type_kind(type) == T_SUBTYPE && type_has_resolution(type))
8,547✔
4344
         s->flags |= SCOPE_F_RESOLVED;
63✔
4345
   }
4346

4347
   thread->active_scope = s;
8,619✔
4348
}
8,619✔
4349

4350
void x_pop_scope(void)
8,619✔
4351
{
4352
   rt_model_t *m = get_model();
8,619✔
4353
   model_thread_t *thread = model_thread(m);
8,619✔
4354

4355
   rt_scope_t *pop = thread->active_scope, *old = pop->parent;
8,619✔
4356

4357
   TRACE("pop scope %s", istr(tree_ident(pop->where)));
8,619✔
4358

4359
   int offset = INT_MAX;
8,619✔
4360
   for (int i = 0; i < pop->children.count; i++)
14,981✔
4361
      offset = MIN(offset, pop->children.items[i]->offset);
6,362✔
4362
   for (int i = 0; i < pop->signals.count; i++)
35,146✔
4363
      offset = MIN(offset, pop->signals.items[i]->offset);
26,527✔
4364
   pop->offset = offset;
8,619✔
4365

4366
   thread->active_scope = old;
8,619✔
4367

4368
   if (pop->kind == SCOPE_PACKAGE)
8,619✔
4369
      pop->parent = m->root;   // Always attach packages to root scope
72✔
4370

4371
   APUSH(pop->parent->children, pop);
8,619✔
4372
}
8,619✔
4373

4374
bool x_driving(sig_shared_t *ss, uint32_t offset, int32_t count)
44✔
4375
{
4376
   rt_signal_t *s = container_of(ss, rt_signal_t, shared);
44✔
4377
   RT_LOCK(s->lock);
44✔
4378

4379
   TRACE("_driving %s offset=%d count=%d",
44✔
4380
         istr(tree_ident(s->where)), offset, count);
4381

4382
   int ntotal = 0, ndriving = 0;
44✔
4383
   bool found = false;
44✔
4384
   rt_model_t *m = get_model();
44✔
4385
   rt_proc_t *proc = get_active_proc();
44✔
4386
   rt_nexus_t *n = split_nexus(m, s, offset, count);
44✔
4387
   for (; count > 0; n = n->chain) {
140✔
4388
      if (n->n_sources > 0) {
52✔
4389
         rt_source_t *src = find_driver(n, proc);
52✔
4390
         if (src != NULL) {
52✔
4391
            if (!src->disconnected) ndriving++;
44✔
4392
            found = true;
4393
         }
4394
      }
4395

4396
      ntotal++;
52✔
4397
      count -= n->width;
52✔
4398
      assert(count >= 0);
52✔
4399
   }
4400

4401
   if (!found)
44✔
4402
      jit_msg(NULL, DIAG_FATAL, "process %s does not contain a driver for %s",
4✔
4403
              istr(proc->name), istr(tree_ident(s->where)));
4404

4405
   return ntotal == ndriving;
40✔
4406
}
4407

4408
void *x_driving_value(sig_shared_t *ss, uint32_t offset, int32_t count)
824✔
4409
{
4410
   rt_signal_t *s = container_of(ss, rt_signal_t, shared);
824✔
4411
   RT_LOCK(s->lock);
824✔
4412

4413
   TRACE("driving value %s offset=%d count=%d", istr(tree_ident(s->where)),
824✔
4414
         offset, count);
4415

4416
   rt_model_t *m = get_model();
824✔
4417
   rt_nexus_t *n = split_nexus(m, s, offset, count);
824✔
4418

4419
   rt_proc_t *proc = get_active_proc();
824✔
4420
   if (proc == NULL) {   // Called in output conversion
824✔
4421
      if (n->flags & NET_F_EFFECTIVE)
788✔
4422
         return nexus_driving(n);
576✔
4423
      else
4424
         return nexus_effective(n);
212✔
4425
   }
4426

4427
   void *result = tlab_alloc(model_thread(m)->tlab, s->shared.size);
36✔
4428

4429
   uint8_t *p = result;
36✔
4430
   for (; count > 0; n = n->chain) {
112✔
4431
      rt_source_t *src = find_driver(n, proc);
40✔
4432
      if (src == NULL)
40✔
UNCOV
4433
         jit_msg(NULL, DIAG_FATAL, "process %s does not contain a driver "
×
4434
                 "for %s", istr(proc->name), istr(tree_ident(s->where)));
4435

4436
      const uint8_t *driving;
40✔
4437
      if (n->flags & NET_F_FAST_DRIVER)
40✔
4438
         driving = nexus_effective(n);
20✔
4439
      else
4440
         driving = value_ptr(n, &(src->u.driver.waveforms.value));
20✔
4441

4442
      memcpy(p, driving, n->width * n->size);
40✔
4443
      p += n->width * n->size;
40✔
4444

4445
      count -= n->width;
40✔
4446
      assert(count >= 0);
40✔
4447
   }
4448

4449
   return result;
4450
}
4451

4452
sig_shared_t *x_implicit_signal(uint32_t count, uint32_t size, tree_t where,
135✔
4453
                                implicit_kind_t kind, ffi_closure_t *closure,
4454
                                int64_t delay)
4455
{
4456
   TRACE("implicit signal %s count=%d size=%d kind=%d",
135✔
4457
         istr(tree_ident(where)), count, size, kind);
4458

4459
   rt_model_t *m = get_model();
135✔
4460

4461
   const size_t datasz = MAX(2 * count * size, 8);
135✔
4462
   rt_implicit_t *imp = static_alloc(m, sizeof(rt_implicit_t) + datasz);
135✔
4463
   setup_signal(m, &(imp->signal), where, count, size, SIG_F_IMPLICIT, 0);
135✔
4464

4465
   imp->closure = *closure;
135✔
4466
   imp->delay = delay;
135✔
4467
   imp->wakeable.kind = W_IMPLICIT;
135✔
4468

4469
   deferq_do(&m->implicitq, async_update_implicit_signal, imp);
135✔
4470
   set_pending(&(imp->wakeable));
135✔
4471

4472
   if (kind == IMPLICIT_STABLE || kind == IMPLICIT_QUIET) {
135✔
4473
      add_source(m, &(imp->signal.nexus), SOURCE_DRIVER);
70✔
4474
      imp->signal.shared.data[0] = 1;    // X'STABLE initally true
70✔
4475
   }
4476

4477
   return &(imp->signal.shared);
135✔
4478
}
4479

4480
void x_disconnect(sig_shared_t *ss, uint32_t offset, int32_t count,
60✔
4481
                  int64_t after, int64_t reject)
4482
{
4483
   rt_signal_t *s = container_of(ss, rt_signal_t, shared);
60✔
4484

4485
   TRACE("_disconnect %s+%d len=%d after=%s reject=%s",
60✔
4486
         istr(tree_ident(s->where)), offset, count, trace_time(after),
4487
         trace_time(reject));
4488

4489
   rt_proc_t *proc = get_active_proc();
60✔
4490

4491
   check_postponed(after, proc);
60✔
4492
   check_reject_limit(s, after, reject);
60✔
4493

4494
   rt_model_t *m = get_model();
60✔
4495
   rt_nexus_t *n = split_nexus(m, s, offset, count);
60✔
4496
   for (; count > 0; n = n->chain) {
180✔
4497
      count -= n->width;
60✔
4498
      assert(count >= 0);
60✔
4499

4500
      sched_disconnect(m, n, after, reject, proc);
60✔
4501
   }
4502
}
60✔
4503

4504
void x_force(sig_shared_t *ss, uint32_t offset, int32_t count, void *values)
184✔
4505
{
4506
   rt_signal_t *s = container_of(ss, rt_signal_t, shared);
184✔
4507

4508
   TRACE("force signal %s+%d value=%s count=%d", istr(tree_ident(s->where)),
184✔
4509
         offset, fmt_values(values, count), count);
4510

4511
   rt_proc_t *proc = get_active_proc();
184✔
4512
   rt_model_t *m = get_model();
184✔
4513

4514
   check_postponed(0, proc);
184✔
4515

4516
   force_signal(m, s, values, offset, count);
184✔
4517
}
184✔
4518

4519
void x_release(sig_shared_t *ss, uint32_t offset, int32_t count)
40✔
4520
{
4521
   rt_signal_t *s = container_of(ss, rt_signal_t, shared);
40✔
4522

4523
   TRACE("release signal %s+%d count=%d", istr(tree_ident(s->where)),
40✔
4524
         offset, count);
4525

4526
   rt_proc_t *proc = get_active_proc();
40✔
4527
   rt_model_t *m = get_model();
40✔
4528

4529
   check_postponed(0, proc);
40✔
4530

4531
   release_signal(m, s, offset, count);
40✔
4532
}
40✔
4533

4534
void x_deposit_signal(sig_shared_t *ss, uint32_t offset, int32_t count,
560✔
4535
                      void *values)
4536
{
4537
   rt_signal_t *s = container_of(ss, rt_signal_t, shared);
560✔
4538

4539
   TRACE("deposit signal %s+%d value=%s count=%d", istr(tree_ident(s->where)),
560✔
4540
         offset, fmt_values(values, count * s->nexus.size), count);
4541

4542
   rt_proc_t *proc = get_active_proc();
560✔
4543
   rt_model_t *m = get_model();
560✔
4544

4545
   check_postponed(0, proc);
560✔
4546

4547
   deposit_signal(m, s, values, offset, count);
560✔
4548
}
560✔
4549

4550
void x_put_conversion(rt_conv_func_t *cf, sig_shared_t *ss, uint32_t offset,
16,820✔
4551
                      int32_t count, void *values)
4552
{
4553
   rt_signal_t *s = container_of(ss, rt_signal_t, shared);
16,820✔
4554

4555
   TRACE("put conversion %s+%d value=%s count=%d", istr(tree_ident(s->where)),
16,820✔
4556
         offset, fmt_values(values, count * s->nexus.size), count);
4557

4558
   rt_model_t *m = get_model();
16,820✔
4559
   rt_nexus_t *n = split_nexus(m, s, offset, count);
16,820✔
4560
   for (; count > 0; n = n->chain) {
50,460✔
4561
      count -= n->width;
16,820✔
4562
      assert(count >= 0);
16,820✔
4563

4564
      rt_source_t *s = &(n->sources);
16,820✔
4565
      for (; s; s = s->chain_input) {
16,872✔
4566
         if (s->tag == SOURCE_PORT && s->u.port.conv_func == cf)
16,840✔
4567
            break;
4568
      }
4569

4570
      rt_value_t *result;
16,820✔
4571
      if (s != NULL)
16,820✔
4572
         result = &(s->u.port.conv_result);
16,788✔
4573
      else {
4574
         assert(n->flags & NET_F_EFFECTIVE);
32✔
4575
         result = find_conversion_input(cf, n);
32✔
4576
         assert(result != NULL);
32✔
4577
      }
4578

4579
      copy_value_ptr(n, result, values);
16,820✔
4580

4581
      values += n->width * n->size;
16,820✔
4582
   }
4583
}
16,820✔
4584

4585
void x_resolve_signal(sig_shared_t *ss, jit_handle_t handle, void *context,
8,573✔
4586
                      int32_t nlits, int32_t flags)
4587
{
4588
   rt_signal_t *s = container_of(ss, rt_signal_t, shared);
8,573✔
4589

4590
   TRACE("resolve signal %s", istr(tree_ident(s->where)));
8,573✔
4591

4592
   ffi_closure_t closure = {
8,573✔
4593
      .handle = handle,
4594
      .context = context
4595
   };
4596

4597
   rt_model_t *m = get_model();
8,573✔
4598
   s->resolution = memo_resolution_fn(m, s, closure, nlits, flags);
8,573✔
4599

4600
   // Copy R_IDENT into the nexus flags to avoid rt_resolve_nexus_fast
4601
   // having to dereference the resolution pointer in the common case
4602
   if (s->resolution->flags & R_IDENT) {
8,573✔
4603
      s->shared.flags |= NET_F_R_IDENT;
7,784✔
4604

4605
      rt_nexus_t *n = &(s->nexus);
7,784✔
4606
      for (int i = 0; i < s->n_nexus; i++, n = n->chain)
15,568✔
4607
         n->flags |= NET_F_R_IDENT;
7,784✔
4608
   }
4609
}
8,573✔
4610

4611
void x_process_init(jit_handle_t handle, tree_t where)
210✔
4612
{
4613
   rt_model_t *m = get_model();
210✔
4614
   ident_t name = jit_get_name(m->jit, handle);
210✔
4615

4616
   TRACE("init process %s", istr(name));
210✔
4617

4618
   rt_scope_t *s = model_thread(m)->active_scope;
210✔
4619
   assert(s != NULL);
210✔
4620
   assert(s->kind == SCOPE_INSTANCE);
210✔
4621

4622
   rt_proc_t *p = xcalloc(sizeof(rt_proc_t));
210✔
4623
   p->where     = where;
210✔
4624
   p->name      = name;
210✔
4625
   p->handle    = handle;
210✔
4626
   p->scope     = s;
210✔
4627
   p->privdata  = mptr_new(m->mspace, "process privdata");
210✔
4628

4629
   p->wakeable.kind      = W_PROC;
210✔
4630
   p->wakeable.pending   = false;
210✔
4631
   p->wakeable.postponed = false;
210✔
4632
   p->wakeable.delayed   = false;
210✔
4633

4634
   APUSH(s->procs, p);
210✔
4635
}
210✔
4636

4637
void *x_function_trigger(jit_handle_t handle, unsigned nargs,
350✔
4638
                         const jit_scalar_t *args)
4639
{
4640
   rt_model_t *m = get_model();
350✔
4641

4642
   uint64_t hash = mix_bits_32(handle);
350✔
4643
   for (int i = 0; i < nargs; i++)
1,064✔
4644
      hash ^= mix_bits_64(args[i].integer);
714✔
4645

4646
   TRACE("function trigger %s nargs=%u hash=%"PRIx64,
350✔
4647
         istr(jit_get_name(m->jit, handle)), nargs, hash);
4648

4649
   return new_trigger(m, FUNC_TRIGGER, hash, handle, nargs, args);
350✔
4650
}
4651

4652
void *x_or_trigger(void *left, void *right)
44✔
4653
{
4654
   rt_model_t *m = get_model();
44✔
4655

4656
   uint64_t hash = mix_bits_64(left) ^ mix_bits_64(right);
44✔
4657

4658
   TRACE("or trigger %p %p hash=%"PRIx64, left, right, hash);
44✔
4659

4660
   const jit_scalar_t args[] = {
44✔
4661
      { .pointer = left < right ? left : right },
44✔
4662
      { .pointer = left < right ? right : left }
44✔
4663
   };
4664

4665
   return new_trigger(m, OR_TRIGGER, hash, JIT_HANDLE_INVALID, 2, args);
44✔
4666
}
4667

4668
void *x_cmp_trigger(sig_shared_t *ss, uint32_t offset, int64_t right)
60✔
4669
{
4670
   rt_model_t *m = get_model();
60✔
4671
   rt_signal_t *s = container_of(ss, rt_signal_t, shared);
60✔
4672

4673
   uint64_t hash = mix_bits_64(s) ^ mix_bits_32(offset) ^ mix_bits_64(right);
60✔
4674

4675
   TRACE("cmp trigger %s+%d right=%"PRIi64" hash=%"PRIx64,
60✔
4676
         istr(tree_ident(s->where)), offset, right, hash);
4677

4678
   const jit_scalar_t args[] = {
60✔
4679
      { .pointer = s },
4680
      { .integer = offset },
4681
      { .integer = right }
4682
   };
4683
   return new_trigger(m, CMP_TRIGGER, hash, JIT_HANDLE_INVALID, 3, args);
60✔
4684
}
4685

4686
void x_add_trigger(void *ptr)
498✔
4687
{
4688
   TRACE("add trigger %p", ptr);
498✔
4689

4690
   rt_wakeable_t *obj = get_active_wakeable();
498✔
4691
   assert(obj->trigger == NULL);
498✔
4692

4693
   obj->trigger = ptr;
498✔
4694
}
498✔
4695

4696
void *x_port_conversion(const ffi_closure_t *driving,
483✔
4697
                        const ffi_closure_t *effective)
4698
{
4699
   rt_model_t *m = get_model();
483✔
4700

4701
   TRACE("port conversion %s context %p",
483✔
4702
         istr(jit_get_name(m->jit, driving->handle)), driving->context);
4703

4704
   if (effective->handle != JIT_HANDLE_INVALID)
483✔
4705
      TRACE("effective value conversion %s context %p",
33✔
4706
            istr(jit_get_name(m->jit, effective->handle)), effective->context);
4707

4708
   const size_t tail_bytes = ALIGN_UP(sizeof(rt_conv_func_t), MEMBLOCK_ALIGN)
483✔
4709
      - sizeof(rt_conv_func_t);
4710
   const int tail_max_inputs = tail_bytes / sizeof(conv_input_t);
483✔
4711
   assert(tail_max_inputs > 0);
483✔
4712

4713
   const size_t total_bytes =
483✔
4714
      sizeof(rt_conv_func_t) + tail_max_inputs * sizeof(conv_input_t);
4715

4716
   rt_conv_func_t *cf = static_alloc(m, total_bytes);
483✔
4717
   cf->driving   = *driving;
483✔
4718
   cf->effective = *effective;
483✔
4719
   cf->ninputs   = 0;
483✔
4720
   cf->maxinputs = tail_max_inputs;
483✔
4721
   cf->outputs   = NULL;
483✔
4722
   cf->inputs    = cf->tail;
483✔
4723
   cf->when      = TIME_HIGH;
483✔
4724
   cf->iteration = UINT_MAX;
483✔
4725

4726
   return cf;
483✔
4727
}
4728

4729
void x_convert_in(void *ptr, sig_shared_t *ss, uint32_t offset, int32_t count)
978✔
4730
{
4731
   rt_signal_t *s = container_of(ss, rt_signal_t, shared);
978✔
4732

4733
   TRACE("convert in %p %s+%d count=%d", ptr, istr(tree_ident(s->where)),
978✔
4734
         offset, count);
4735

4736
   rt_conv_func_t *cf = ptr;
978✔
4737
   rt_model_t *m = get_model();
978✔
4738

4739
   rt_nexus_t *n = split_nexus(m, s, offset, count);
978✔
4740
   for (; count > 0; n = n->chain) {
2,934✔
4741
      count -= n->width;
978✔
4742
      assert(count >= 0);
978✔
4743

4744
      add_conversion_input(m, cf, n);
978✔
4745

4746
      rt_source_t **p = &(n->outputs);
978✔
4747
      for (; *p != NULL && *p != cf->outputs; p = &((*p)->chain_output));
1,238✔
4748
      *p = cf->outputs;
978✔
4749
   }
4750
}
978✔
4751

4752
void x_convert_out(void *ptr, sig_shared_t *ss, uint32_t offset, int32_t count)
1,213✔
4753
{
4754
   rt_signal_t *s = container_of(ss, rt_signal_t, shared);
1,213✔
4755

4756
   TRACE("convert out %p %s+%d count=%d", ptr, istr(tree_ident(s->where)),
1,213✔
4757
         offset, count);
4758

4759
   rt_conv_func_t *cf = ptr;
1,213✔
4760
   rt_model_t *m = get_model();
1,213✔
4761

4762
   assert(cf->ninputs == 0);    // Add outputs first
1,213✔
4763

4764
   rt_nexus_t *n = split_nexus(m, s, offset, count);
1,213✔
4765
   for (; count > 0; n = n->chain) {
3,639✔
4766
      count -= n->width;
1,213✔
4767
      assert(count >= 0);
1,213✔
4768

4769
      rt_source_t *src = add_source(m, n, SOURCE_PORT);
1,213✔
4770
      src->u.port.conv_func   = cf;
1,213✔
4771
      src->u.port.conv_result = alloc_value(m, n);
1,213✔
4772

4773
      src->chain_output = cf->outputs;
1,213✔
4774
      cf->outputs = src;
1,213✔
4775
   }
4776
}
1,213✔
4777

4778
void x_instance_name(attr_kind_t kind, text_buf_t *tb)
1,272✔
4779
{
4780
   rt_model_t *m = get_model();
1,272✔
4781
   rt_scope_t *s = get_active_scope(m);
1,272✔
4782

4783
   switch (kind) {
1,272✔
4784
   case ATTR_INSTANCE_NAME:
546✔
4785
      get_instance_name(s, tb);
546✔
4786
      break;
546✔
4787
   case ATTR_PATH_NAME:
726✔
4788
      get_path_name(s, tb);
726✔
4789
      break;
726✔
UNCOV
4790
   default:
×
4791
      should_not_reach_here();
4792
   }
4793
}
1,272✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc