Jlm
verilator-harness-hls.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2021 David Metz <david.c.metz@ntnu.no>
3  * Copyright 2024 HÃ¥vard Krogstie <krogstie.havard@gmail.com>
4  * See COPYING for terms of redistribution.
5  */
6 
9 
10 #include <sstream>
11 
12 namespace jlm::hls
13 {
14 
15 // The number of cycles before a load is ready
16 static constexpr int MEMORY_RESPONSE_LATENCY = 10;
17 
18 std::string
20 {
21  if (auto t = dynamic_cast<const rvsdg::BitType *>(type))
22  {
23  if (t->nbits() == 1)
24  return "bool";
25  return "int" + util::strfmt(t->nbits()) + "_t";
26  }
27  if (jlm::rvsdg::is<llvm::PointerType>(*type))
28  {
29  return "void*";
30  }
31  if (auto ft = dynamic_cast<const llvm::FloatingPointType *>(type))
32  {
33  switch (ft->size())
34  {
35  case llvm::fpsize::flt:
36  return "float";
37  case llvm::fpsize::dbl:
38  return "double";
39  default:
40  throw std::logic_error(type->debug_string() + " not implemented!");
41  }
42  }
43  if (auto t = dynamic_cast<const llvm::VectorType *>(type))
44  {
45  return ConvertToCType(&t->type()) + " __attribute__((vector_size("
46  + std::to_string(JlmSize(type) / 8) + ")))";
47  }
48  if (auto t = dynamic_cast<const llvm::ArrayType *>(type))
49  {
50  return ConvertToCType(&t->element_type()) + "*";
51  }
52 
53  JLM_UNREACHABLE("Unimplemented C type");
54 }
55 
62 std::optional<std::string>
64 {
65  const auto & results = kernel.GetOperation().type().Results();
66 
67  if (results.empty())
68  return std::nullopt;
69 
70  const auto & type = results.front();
71 
72  if (type->Kind() == rvsdg::TypeKind::State)
73  return std::nullopt;
74 
75  return ConvertToCType(type.get());
76 }
77 
86 std::tuple<size_t, std::string, std::string>
88 {
89  size_t argument_index = 0;
90  std::ostringstream parameters;
91  std::ostringstream arguments;
92 
93  for (auto & argType : kernel.GetOperation().type().Arguments())
94  {
95  if (argType->Kind() == rvsdg::TypeKind::State)
96  continue;
97  if (rvsdg::is<BundleType>(argType))
98  continue;
99 
100  if (argument_index != 0)
101  {
102  parameters << ", ";
103  arguments << ", ";
104  }
105 
106  parameters << ConvertToCType(argType.get()) << " a" << argument_index;
107  arguments << "a" << argument_index;
108  argument_index++;
109  }
110 
111  return std::make_tuple(argument_index, parameters.str(), arguments.str());
112 }
113 
114 std::string
116 {
117  std::ostringstream cpp;
118  const auto & kernel = *get_hls_lambda(rm);
119  const auto & function_name =
120  dynamic_cast<llvm::LlvmLambdaOperation &>(kernel.GetOperation()).name();
121 
122  // The request and response parts of memory queues
123  const auto mem_reqs = get_mem_reqs(kernel);
124  const auto mem_resps = get_mem_resps(kernel);
125  JLM_ASSERT(mem_reqs.size() == mem_resps.size());
126 
127  // All inputs that are not memory queues
128  const auto reg_args = get_reg_args(kernel);
129 
130  // Extract info about the kernel's function signature in C
131  const auto c_return_type = GetReturnTypeAsC(kernel);
132  const auto [num_c_params, c_params, c_call_args] = GetParameterListAsC(kernel);
133 
134  cpp << R"(
135 #define TRACE_CHUNK_SIZE 100000
136 #define TIMEOUT 10000000
137 
138 #ifndef MEMORY_LATENCY
139 #define MEMORY_LATENCY )"
140  << MEMORY_RESPONSE_LATENCY << R"(
141 #endif
142 
143 #include <algorithm>
144 #include <cassert>
145 #include <csignal>
146 #include <cstdint>
147 #include <cstdio>
148 #include <cstdlib>
149 #include <cstring>
150 #include <deque>
151 #include <iostream>
152 #include <vector>
153 #include <verilated.h>
154 #ifdef FST
155 #include "verilated_fst_c.h"
156 #else
157 #include "verilated_vcd_c.h"
158 #endif
159 #define xstr(s) str(s)
160 #define str(s) #s
161 )" << std::endl;
162 
163  cpp << "#include \"V" << VerilogFile_.base() << ".h\"" << std::endl;
164  cpp << "#define V_NAME V" << VerilogFile_.base() << std::endl;
165 
166  cpp << R"(
167 // ======== Global variables used for simulating the model ========
168 // The verilated model being simulated
169 V_NAME *top;
170 
171 // Current simulation time, in number of cycles
172 uint64_t main_time = 0;
173 
174 // Can be set from signal handlers, to trigger gracefull early termination
175 bool terminate = false;
176 
177 
178 // ======== Global variables imported from other modules ========
179 )";
180 
181  for (const auto arg : rm.Rvsdg().GetRootRegion().Arguments())
182  {
183  const auto graphImport = util::assertedCast<llvm::LlvmGraphImport>(arg);
184  cpp << "extern \"C\" char " << graphImport->Name() << ";" << std::endl;
185  }
186  cpp << R"(
187 
188 // ======== Tracing accesses to main memory ==========
189 struct mem_access {
190  void * addr;
191  bool write;
192  uint8_t width; // 2^width bytes
193  void* data;
194  uint32_t port;
195  uint64_t timestamp;
196 
197  bool operator==(const mem_access & other) const {
198  return addr == other.addr && write == other.write && width == other.width && !memcmp(data, other.data, 1<<width);
199  }
200 };
201 
202 // A log of memory accesses made by the kernel
203 std::vector<mem_access> memory_accesses;
204 // Accesses to regions in this vector of (start, length) pairs are not traced
205 std::vector<std::pair<void*, size_t>> ignored_memory_regions;
206 
207 static void ignore_memory_region(void* start, size_t length) {
208  ignored_memory_regions.emplace_back(start, length);
209 }
210 
211 static bool in_ignored_region(void* addr) {
212  for (auto [start, length] : ignored_memory_regions) {
213  if (addr >= start && addr < (char*)start + length)
214  return true;
215  }
216  return false;
217 }
218 
219 static void* instrumented_load(void* addr, uint8_t width, uint32_t port=0) {
220  void * data = malloc(1 << width);
221  memcpy(data, addr, 1 << width);
222  if (!in_ignored_region(addr))
223  memory_accesses.push_back({addr, false, width, data, port, main_time});
224  return data;
225 }
226 
227 static void instrumented_store(void* addr, void *data, uint8_t width, uint32_t port=0) {
228  void * data_copy = malloc(1 << width);
229  memcpy(data_copy, data, 1 << width);
230  memcpy(addr, data_copy, 1 << width);
231  if(!in_ignored_region(addr))
232  memory_accesses.push_back({addr, true, width, data_copy, port, main_time});
233 }
234 
235 uint32_t dummy_data[16] = {
236  0xDEADBEE0,
237  0xDEADBEE1,
238  0xDEADBEE2,
239  0xDEADBEE3,
240  0xDEADBEE4,
241  0xDEADBEE5,
242  0xDEADBEE6,
243  0xDEADBEE7,
244  0xDEADBEE8,
245  0xDEADBEE9,
246  0xDEADBEEA,
247  0xDEADBEEB,
248  0xDEADBEEC,
249  0xDEADBEED,
250  0xDEADBEEE,
251  0xDEADBEEF,
252 };
253 // ======== Implementation of external memory queues, adding latency to loads ========
254 class MemoryQueue {
255  struct Response {
256  uint64_t request_time;
257  void* data;
258  uint8_t size;
259  uint8_t id;
260  };
261  int latency;
262  int width;
263  int port;
264  std::deque<Response> responses;
265 
266 public:
267  MemoryQueue(int latency, int width, int port) : latency(latency), width(width), port(port) {}
268 
269  // Called right before posedge, can only read from the model
270  void accept_request(uint8_t req_ready, uint8_t req_valid, uint8_t req_write, uint64_t req_addr, uint8_t req_size, void* req_data, uint8_t req_id, uint8_t res_valid, uint8_t res_ready) {
271  if (top->reset) {
272  responses.clear();
273  return;
274  }
275 
276  // If a response was consumed this cycle, remove it
277  if (res_ready && res_valid) {
278  assert(!responses.empty());
279  responses.pop_front();
280  }
281 
282  if (!req_ready || !req_valid)
283  return;
284 
285  if (req_write) {
286  // Stores are performed immediately
287  instrumented_store((void*) req_addr, req_data, req_size, port);
288  responses.push_back({main_time, req_data, req_size, req_id});
289  } else {
290  // Loads are performed immediately, but their response is placed in the queue
291  void* data = instrumented_load((void*) req_addr, req_size, port);
292  responses.push_back({main_time, data, req_size, req_id});
293  }
294  }
295 
296  // Called right after posedge, can only write to the model
297  void produce_response(uint8_t& req_ready, uint8_t& res_valid, void* res_data, uint8_t& res_id) {
298  if (!responses.empty() && responses.front().request_time + latency <= main_time + 1) {
299  res_valid = 1;
300  memcpy(res_data, responses.front().data, 1<<responses.front().size);
301  res_id = responses.front().id;
302  } else {
303  res_valid = 0;
304  memcpy(res_data, dummy_data, width);
305  res_id = 0;
306  }
307 
308  // Always ready for requests
309  req_ready = 1;
310  }
311 
312  bool empty() const {
313  return responses.empty();
314  }
315 };
316 )" << std::endl;
317 
318  cpp << "MemoryQueue memory_queues[] = {";
319  for (size_t i = 0; i < mem_reqs.size(); i++)
320  {
321  auto bundle = dynamic_cast<const BundleType *>(mem_resps[i]->Type().get());
322  auto size = JlmSize(&*bundle->get_element_type("data")) / 8;
323  // int width =
324  cpp << "{MEMORY_LATENCY, " << size << ", " << i << "}, ";
325  }
326  cpp << "};" << R"(
327 
328 // ======== Variables and functions for tracing the verilated model ========
329 #ifdef TRACE_SIGNALS
330 #ifdef FST
331 VerilatedFstC *tfp;
332 #else
333 VerilatedVcdC *tfp;
334 #endif
335 #endif
336 
337 static void init_tracing() {
338  #ifdef TRACE_SIGNALS
339  #ifdef FST
340  tfp = new VerilatedFstC;
341  top->trace(tfp, 99); // Trace 99 levels of hierarchy
342  tfp->open(xstr(V_NAME) ".fst");
343  #else
344  tfp = new VerilatedVcdC;
345  top->trace(tfp, 99); // Trace 99 levels of hierarchy
346  tfp->open(xstr(V_NAME) ".vcd");
347  #endif
348  #endif
349 }
350 
351 // Saves the current state of all wires and registers at the given timestep
352 static void capture_trace(uint64_t time) {
353  #ifdef TRACE_SIGNALS
354  tfp->dump(time);
355  #ifdef VCD_FLUSH
356  tfp->flush();
357  #endif
358  #endif
359 }
360 
361 static void finish_trace() {
362  // Coverage analysis (since test passed)
363 #if VM_COVERAGE
364  Verilated::mkdir("logs");
365  VerilatedCov::write("logs/coverage.dat");
366 #endif
367 #ifdef TRACE_SIGNALS
368  tfp->close();
369 #endif
370 }
371 
372 // ======== Setup and execution of the verilated model ========
373 static void posedge();
374 static void negedge();
375 static void verilator_finish();
376 
377 // Called by $time in Verilog. Converts to real, to match SystemC
378 double sc_time_stamp() {
379  return main_time;
380 }
381 
382 // Called once to initialize the verilated model
383 static void verilator_init(int argc, char **argv) {
384  // set up signaling so we can kill the program and still get waveforms
385  struct sigaction action;
386  memset(&action, 0, sizeof(struct sigaction));
387  action.sa_handler = [](int sig){ terminate = true; };
388  sigaction(SIGTERM, &action, NULL);
389  sigaction(SIGKILL, &action, NULL);
390  sigaction(SIGINT, &action, NULL);
391 
392  atexit(verilator_finish);
393 
394  // Set debug level, 0 is off, 9 is highest presently used
395  // May be overridden by commandArgs
396  Verilated::debug(0);
397 
398  // Randomization reset policy
399  // May be overridden by commandArgs
400  Verilated::randReset(2);
401 
402  // Verilator must compute traced signals
403  Verilated::traceEverOn(true);
404 
405  // Pass arguments so Verilated code can see them, e.g., $value$plusargs
406  // This needs to be called before you create any model
407  Verilated::commandArgs(argc, argv);
408 
409  // Construct the Verilated model
410  top = new V_NAME;
411  main_time = 0;
412 
413  init_tracing();
414 
415  top->clk = 0;
416  top->reset = 1;
417  top->i_valid = 0;
418  top->o_ready = 0;
419 )" << std::endl;
420 
421  // Zero out all kernel inputs, except for context variables
422  size_t first_ctx_var = reg_args.size() - kernel.GetContextVars().size();
423  for (size_t i = 0; i < first_ctx_var; i++)
424  {
425  // don't generate ports for state edges
426  if (reg_args[i]->Type()->Kind() == rvsdg::TypeKind::State)
427  continue;
428  cpp << " top->i_data_" << i << " = 0;" << std::endl;
429  }
430  for (const auto & ctx : kernel.GetContextVars())
431  {
432  // Context variables should always be external symbols imported by name
433  const auto import = util::assertedCast<rvsdg::GraphImport>(ctx.input->origin());
434  cpp << " top->i_data_" << first_ctx_var << " = (uint64_t) &" << import->Name() << ";"
435  << std::endl;
436  first_ctx_var++;
437  }
438 
439  cpp << R"(
440  // Run some cycles with reset set HIGH
441  posedge();
442  negedge();
443  posedge();
444  negedge();
445  posedge();
446  negedge();
447  posedge();
448  negedge();
449  posedge();
450  negedge();
451  posedge();
452  top->reset = 0;
453  negedge();
454 }
455 
456 // Model outputs should be read right before posedge()
457 // Model inputs should be set right after posedge()
458 static void posedge() {
459  if (terminate) {
460  std::cout << "terminating\n";
461  exit(-1);
462  }
463  assert(!Verilated::gotFinish());
464  assert(top->clk == 0);
465 
466  // Read memory requests just before the rising edge
467 )";
468 
469  // Emit calls to MemoryQueue::accept_request()
470  for (size_t i = 0; i < mem_reqs.size(); i++)
471  {
472  const auto req_bt = util::assertedCast<const BundleType>(mem_reqs[i]->Type().get());
473  const auto has_write = req_bt->get_element_type("write") != nullptr;
474 
475  cpp << " memory_queues[" << i << "].accept_request(";
476  cpp << "top->mem_" << i << "_req_ready, ";
477  cpp << "top->mem_" << i << "_req_valid, ";
478  if (has_write)
479  cpp << " top->mem_" << i << "_req_data_write, ";
480  else
481  cpp << "0, ";
482  cpp << "top->mem_" << i << "_req_data_addr, ";
483  cpp << "top->mem_" << i << "_req_data_size, ";
484  if (has_write)
485  cpp << "&top->mem_" << i << "_req_data_data, ";
486  else
487  cpp << "nullptr, ";
488  cpp << "top->mem_" << i << "_req_data_id, ";
489  cpp << "top->mem_" << i << "_res_ready, ";
490  cpp << "top->mem_" << i << "_res_valid);" << std::endl;
491  }
492 
493  cpp << R"(
494  top->clk = 1;
495  top->eval();
496  // Capturing the posedge trace here would make external inputs appear on negedge
497  // capture_trace(main_time * 2);
498 }
499 
500 static void negedge() {
501  assert(!Verilated::gotFinish());
502  assert(top->clk == 1);
503 
504  // Memory responses are ready before the negedge
505 )";
506 
507  // Emit calls to MemoryQueue::produce_response
508  for (size_t i = 0; i < mem_reqs.size(); i++)
509  {
510  cpp << " memory_queues[" << i << "].produce_response(";
511  cpp << "top->mem_" << i << "_req_ready, ";
512  cpp << "top->mem_" << i << "_res_valid, ";
513  cpp << "&top->mem_" << i << "_res_data_data, ";
514  cpp << "top->mem_" << i << "_res_data_id);" << std::endl;
515  }
516 
517  cpp << R"(
518  top->eval();
519 
520  // Capturing the posedge trace here makes external inputs appear to update with the posedge
521  capture_trace(main_time * 2);
522 
523  top->clk = 0;
524  top->eval();
525  capture_trace(main_time * 2 + 1);
526  main_time++;
527 }
528 
529 static void verilator_finish() {
530  if (!top)
531  return;
532  top->final();
533  finish_trace();
534  // delete top;
535 }
536 
537 static )"
538  << c_return_type.value_or("void") << " run_hls(" << std::endl;
539  cpp << c_params << R"(
540 ) {
541  if(!top) {
542  verilator_init(0, NULL);
543  }
544  int start = main_time;
545 
546  // Run cycles until i_ready becomes HIGH
547  for (int i = 0; i < TIMEOUT && !top->i_ready; i++) {
548  posedge();
549  negedge();
550  }
551  if (!top->i_ready) {
552  std::cout << "i_ready was not set within TIMEOUT" << std::endl;
553  exit(-1);
554  }
555 
556  posedge();
557 
558  // Pass in input data for one cycle
559  top->i_valid = 1;
560 )";
561 
562  for (size_t i = 0; i < num_c_params; i++)
563  {
564  if (auto ft = dynamic_cast<const jlm::llvm::FloatingPointType *>(
565  kernel.GetOperation().type().Arguments()[i].get()))
566  {
567  if (ft->size() == llvm::fpsize::flt)
568  cpp << "top->i_data_" << i << " = *(uint32_t*) &a" << i << ";" << std::endl;
569  else if (ft->size() == llvm::fpsize::dbl)
570  cpp << "top->i_data_" << i << " = *(uint64_t*) &a" << i << ";" << std::endl;
571  }
572  else
573  {
574  cpp << "top->i_data_" << i << " = (uint64_t) a" << i << ";" << std::endl;
575  }
576  }
577 
578  cpp << R"(
579  negedge();
580  posedge();
581 
582  top->o_ready = 1;
583  top->i_valid = 0;
584 )";
585 
586  // Zero out the kernel inputs again
587  for (size_t i = 0; i < num_c_params; i++)
588  {
589  cpp << "top->i_data_" << i << " = 0;" << std::endl;
590  }
591 
592  cpp << R"(
593  negedge();
594 
595  // Cycle until o_valid becomes HIGH
596  for (int i = 0; i < TIMEOUT && !top->o_valid; i++) {
597  posedge();
598  negedge();
599  }
600  if (!top->o_valid) {
601  std::cout << "o_valid was not set within TIMEOUT" << std::endl;
602  exit(-1);
603  }
604 
605  std::cout << "finished - took " << (main_time - start) << " cycles" << std::endl;
606 
607  // Ensure all memory queues are empty
608 )";
609  for (size_t i = 0; i < mem_reqs.size(); i++)
610  cpp << "assert(memory_queues[" << i << "].empty());" << std::endl;
611 
612  if (c_return_type.has_value())
613  cpp << "return *(" << c_return_type.value() << "*)&top->o_data_0;" << std::endl;
614 
615  cpp << R"(
616 }
617 
618 
619 // ======== Running the kernel compiled as C, with intrumentation ========
620 extern "C" )"
621  << c_return_type.value_or("void") << " instrumented_ref(" << c_params << ");" << R"(
622 
623 extern "C" void reference_load(void* addr, uint64_t width) {
624  instrumented_load(addr, width);
625 }
626 
627 extern "C" void reference_store(void* addr, uint64_t width) {
628  instrumented_store(addr, addr, width);
629 }
630 
631 extern "C" void reference_alloca(void* start, uint64_t length) {
632  ignore_memory_region(start, length);
633 }
634 
635 std::vector<mem_access> ref_memory_accesses;
636 
637 // Calls instrumented_ref in a forked process and stores its memory accesses
638 static void run_ref(
639 )" << c_params
640  << R"(
641 ) {
642  int fd[2]; // channel 0 for reading and 1 for writing
643  size_t tmp = pipe(fd);
644  int pid = fork();
645  if(pid == 0) { // child
646  close(fd[0]); // close fd[0] since child will only write
647 
648  instrumented_ref()"
649  << c_call_args << R"();
650 
651  // Send all memory accesses to the parent
652  size_t cnt = memory_accesses.size();
653  tmp = write(fd[1], &cnt, sizeof(size_t));
654  for (auto & access : memory_accesses){
655  tmp = write(fd[1], &access, sizeof(mem_access));
656  tmp = write(fd[1], access.data, 1<< access.width);
657  }
658 
659  close(fd[1]);
660  exit(0);
661  } else { // parent
662  close(fd[1]); // close fd[1] since parent will only read
663 
664  // Retrieve all memory_accesses from the child
665  size_t cnt;
666  tmp = read(fd[0], &cnt, sizeof(size_t));
667  ref_memory_accesses.resize(cnt);
668  for (auto & access : ref_memory_accesses) {
669  tmp = read(fd[0], &access, sizeof(mem_access));
670  access.data = malloc(1 << access.width);
671  tmp = read(fd[0], access.data, 1 << access.width);
672  }
673 
674  close(fd[0]);
675  }
676 }
677 
678 // Checks that memory_accesses and ref_memory_accesses are identical within each address
679 static void compare_memory_accesses() {
680  assert (memory_accesses.size() == ref_memory_accesses.size());
681 
682  // Stable sort the memory accesses by only address, keeping order within each address.
683  auto addr_sort = [](const mem_access & a, const mem_access & b) {
684  return a.addr < b.addr;
685  };
686  std::stable_sort(memory_accesses.begin(), memory_accesses.end(), addr_sort);
687  std::stable_sort(ref_memory_accesses.begin(), ref_memory_accesses.end(), addr_sort);
688  assert(memory_accesses == ref_memory_accesses);
689 }
690 
691 static void empty_mem_acces_vector(std::vector<mem_access> &vec){
692  for (auto &m: vec) {
693  free(m.data);
694  }
695  vec.erase(vec.begin(), vec.end());
696 }
697 
698 // ======== Entry point for calling kernel from host device (C code) ========
699 extern "C" )"
700  << c_return_type.value_or("void") << " " << function_name << "(" << c_params << ")" << R"(
701 {
702  // Execute instrumented version of kernel compiled for the host in a fork
703  run_ref()"
704  << c_call_args << R"();
705 
706  // Execute the verilated model in this process
707  )";
708  if (c_return_type.has_value())
709  cpp << "auto result = ";
710  cpp << "run_hls(" << c_call_args << ");" << std::endl;
711 
712  cpp << R"(
713  // Compare traced memory accesses
714  compare_memory_accesses();
715 
716  // Reset structures used for tracing memory operations
717  empty_mem_acces_vector(memory_accesses);
718  empty_mem_acces_vector(ref_memory_accesses);
719  ignored_memory_regions.clear();
720 )";
721 
722  if (c_return_type.has_value())
723  cpp << " return result;" << std::endl;
724 
725  cpp << "}" << std::endl;
726 
727  return cpp.str();
728 }
729 
730 } // namespace jlm::hls
std::vector< rvsdg::RegionArgument * > get_reg_args(const rvsdg::LambdaNode &lambda)
Definition: base-hls.hpp:112
std::vector< rvsdg::RegionResult * > get_mem_reqs(const rvsdg::LambdaNode &lambda)
Definition: base-hls.hpp:93
static int JlmSize(const jlm::rvsdg::Type *type)
Definition: base-hls.cpp:110
const rvsdg::LambdaNode * get_hls_lambda(llvm::LlvmRvsdgModule &rm)
Definition: base-hls.cpp:136
std::vector< rvsdg::RegionArgument * > get_mem_resps(const rvsdg::LambdaNode &lambda)
Definition: base-hls.hpp:75
std::string GetText(llvm::LlvmRvsdgModule &rm) override
Lambda operation.
Definition: lambda.hpp:30
const std::vector< std::shared_ptr< const jlm::rvsdg::Type > > & Arguments() const noexcept
const std::vector< std::shared_ptr< const jlm::rvsdg::Type > > & Results() const noexcept
Region & GetRootRegion() const noexcept
Definition: graph.hpp:99
Lambda node.
Definition: lambda.hpp:83
LambdaOperation & GetOperation() const noexcept override
Definition: lambda.cpp:51
const FunctionType & type() const noexcept
Definition: lambda.hpp:36
RegionArgumentRange Arguments() noexcept
Definition: region.hpp:272
Graph & Rvsdg() noexcept
Definition: RvsdgModule.hpp:57
std::string base() const noexcept
Returns the base name of the file without the path.
Definition: file.hpp:61
#define JLM_ASSERT(x)
Definition: common.hpp:16
#define JLM_UNREACHABLE(msg)
Definition: common.hpp:43
int JlmSize(const jlm::rvsdg::Type *type)
Definition: hls.cpp:344
std::tuple< size_t, std::string, std::string > GetParameterListAsC(const rvsdg::LambdaNode &kernel)
static constexpr int MEMORY_RESPONSE_LATENCY
std::optional< std::string > GetReturnTypeAsC(const rvsdg::LambdaNode &kernel)
std::string ConvertToCType(const rvsdg::Type *type)
static std::string type(const Node *n)
Definition: view.cpp:255
@ State
Designate a state type.
static std::string strfmt(Args... args)
Definition: strfmt.hpp:35