Jlm
mem-queue.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2021 David Metz <david.c.metz@ntnu.no>
3  * See COPYING for terms of redistribution.
4  */
5 
10 #include <jlm/hls/ir/hls.hpp>
17 #include <jlm/rvsdg/node.hpp>
18 #include <jlm/rvsdg/theta.hpp>
19 #include <jlm/rvsdg/view.hpp>
20 
21 #include <deque>
22 
23 namespace jlm::hls
24 {
25 
26 static void
28  jlm::rvsdg::Output * op,
29  std::vector<jlm::rvsdg::SimpleNode *> & load_nodes,
30  std::vector<jlm::rvsdg::SimpleNode *> & store_nodes,
31  std::unordered_set<jlm::rvsdg::Output *> & visited)
32 {
33  if (!jlm::rvsdg::is<jlm::llvm::MemoryStateType>(op->Type()))
34  {
35  return;
36  }
37  if (visited.count(op))
38  {
39  // skip already processed outputs
40  return;
41  }
42  visited.insert(op);
43  for (auto & user : op->Users())
44  {
45  if (auto simplenode = jlm::rvsdg::TryGetOwnerNode<jlm::rvsdg::SimpleNode>(user))
46  {
47  if (dynamic_cast<const jlm::llvm::StoreNonVolatileOperation *>(&simplenode->GetOperation()))
48  {
49  store_nodes.push_back(simplenode);
50  }
51  else if (dynamic_cast<const jlm::llvm::LoadNonVolatileOperation *>(
52  &simplenode->GetOperation()))
53  {
54  load_nodes.push_back(simplenode);
55  }
56  for (size_t i = 0; i < simplenode->noutputs(); ++i)
57  {
58  find_load_store(simplenode->output(i), load_nodes, store_nodes, visited);
59  }
60  }
61  else if (auto sti = dynamic_cast<jlm::rvsdg::StructuralInput *>(&user))
62  {
63  for (auto & arg : sti->arguments)
64  {
65  find_load_store(&arg, load_nodes, store_nodes, visited);
66  }
67  }
68  else if (auto r = dynamic_cast<jlm::rvsdg::RegionResult *>(&user))
69  {
70  if (auto ber = dynamic_cast<jlm::hls::BackEdgeResult *>(r))
71  {
72  find_load_store(ber->argument(), load_nodes, store_nodes, visited);
73  }
74  else
75  {
76  find_load_store(r->output(), load_nodes, store_nodes, visited);
77  }
78  }
79  else
80  {
81  JLM_UNREACHABLE("THIS SHOULD BE COVERED");
82  }
83  }
84 }
85 
88 {
89  auto sti_arg = sti->arguments.first();
90  JLM_ASSERT(sti_arg->nusers() == 1);
91  auto & user = *sti_arg->Users().begin();
92  auto [muxNode, muxOperation] =
93  jlm::rvsdg::TryGetSimpleNodeAndOptionalOp<jlm::hls::MuxOperation>(user);
94  JLM_ASSERT(muxNode && muxOperation);
95  for (size_t i = 1; i < 3; ++i)
96  {
97  auto arg = muxNode->input(i)->origin();
98  if (auto ba = dynamic_cast<jlm::hls::BackEdgeArgument *>(arg))
99  {
100  auto res = ba->result();
101  JLM_ASSERT(res);
102  auto [bufferNode, bufferOperation] =
103  jlm::rvsdg::TryGetSimpleNodeAndOptionalOp<jlm::hls::BufferOperation>(*res->origin());
104  JLM_ASSERT(bufferNode && bufferOperation);
105  auto [branchNode, branchOperation] =
106  jlm::rvsdg::TryGetSimpleNodeAndOptionalOp<jlm::hls::BranchOperation>(
107  *bufferNode->input(0)->origin());
108  JLM_ASSERT(branchNode && branchOperation);
109  for (size_t j = 0; j < 2; ++j)
110  {
111  JLM_ASSERT(branchNode->output(j)->nusers() == 1);
112  auto result =
113  dynamic_cast<jlm::rvsdg::RegionResult *>(&*branchNode->output(j)->Users().begin());
114  if (result)
115  {
116  return result->output();
117  }
118  }
119  }
120  }
121  JLM_UNREACHABLE("This should never happen");
122 }
123 
124 static rvsdg::Output *
126  jlm::rvsdg::Output * mem_edge,
127  jlm::rvsdg::Output * addr_edge,
128  jlm::rvsdg::SimpleNode ** load,
129  jlm::rvsdg::Output ** new_mem_edge,
130  std::vector<jlm::rvsdg::Output *> & store_addresses,
131  std::vector<jlm::rvsdg::Output *> & store_dequeues,
132  std::vector<bool> & store_precedes,
133  bool * load_encountered)
134 {
135  // follows along mem edge and routes addr edge through the same regions
136  // redirects the supplied load to the new edge and adds it to stores
137  // the new edge might be routed through unnecessary regions. This should be fixed by running DNE
138  while (true)
139  {
140  // each iteration should update common_edge and/or new_edge
141  JLM_ASSERT(mem_edge->nusers() == 1);
142  JLM_ASSERT(addr_edge->nusers() == 1);
143  JLM_ASSERT(mem_edge != addr_edge);
144  JLM_ASSERT(mem_edge->region() == addr_edge->region());
145  auto user = &*mem_edge->Users().begin();
146  auto & addr_edge_user = *addr_edge->Users().begin();
147  if (dynamic_cast<jlm::rvsdg::RegionResult *>(user))
148  {
149  JLM_UNREACHABLE("THIS SHOULD NOT HAPPEN");
150  // end of region reached
151  }
152  else if (auto sti = dynamic_cast<jlm::rvsdg::StructuralInput *>(user))
153  {
154  auto loop_node = jlm::util::assertedCast<jlm::hls::LoopNode>(sti->node());
155  jlm::rvsdg::Output * buffer = nullptr;
156  auto addr_edge_before_loop = addr_edge;
157  addr_edge = loop_node->AddLoopVar(addr_edge, &buffer);
158  addr_edge_user.divert_to(addr_edge);
159  mem_edge = find_loop_output(sti);
160  auto sti_arg = sti->arguments.first();
161  JLM_ASSERT(sti_arg->nusers() == 1);
162  auto & user = *sti_arg->Users().begin();
163  auto [muxNode, muxOperation] =
164  jlm::rvsdg::TryGetSimpleNodeAndOptionalOp<jlm::hls::MuxOperation>(user);
165  JLM_ASSERT(muxNode && muxOperation);
166  JLM_ASSERT(buffer->nusers() == 1);
167  // use a separate vector to check if the loop contains stores
168  std::vector<jlm::rvsdg::Output *> loop_store_addresses;
170  muxNode->output(0),
171  buffer,
172  load,
173  nullptr,
174  loop_store_addresses,
175  store_dequeues,
176  store_precedes,
177  load_encountered);
178  if (loop_store_addresses.empty())
179  {
180  jlm::hls::convert_loop_state_to_lcb(&*addr_edge_before_loop->Users().begin());
181  }
182  else
183  {
184  store_addresses.insert(
185  store_addresses.cend(),
186  loop_store_addresses.begin(),
187  loop_store_addresses.end());
188  }
189  }
190  else if (auto sn = jlm::rvsdg::TryGetOwnerNode<jlm::rvsdg::SimpleNode>(*user))
191  {
192  auto op = &sn->GetOperation();
193 
194  if (auto br = dynamic_cast<const jlm::hls::BranchOperation *>(op))
195  {
196  if (!br->loop)
197  {
198  // start of gamma
199  auto load_branch_out =
200  jlm::hls::BranchOperation::create(*sn->input(0)->origin(), *addr_edge, false);
201  for (size_t i = 0; i < sn->noutputs(); ++i)
202  {
203  // dummy user for edge
204  auto dummy_user_tmp = jlm::hls::SinkOperation::create(*load_branch_out[i]);
205  // Sink ops doesn't have any outputs so we get an empty vector back
206  // But we are not allowed to discard the vector and can't have unused variables
207  // So adding a meaningless assert to get it to compile
208  JLM_ASSERT(dummy_user_tmp.size() == 0);
209  auto dummy_user = &jlm::rvsdg::AssertGetOwnerNode<jlm::rvsdg::SimpleNode>(
210  *load_branch_out[i]->Users().begin());
211  // need both load and common edge here
212  load_branch_out[i] = separate_load_edge(
213  sn->output(i),
214  load_branch_out[i],
215  load,
216  &mem_edge,
217  store_addresses,
218  store_dequeues,
219  store_precedes,
220  load_encountered);
221  JLM_ASSERT(load_branch_out[i]->nusers() == 1);
222  JLM_ASSERT(dummy_user->input(0)->origin() == load_branch_out[i]);
223  remove(dummy_user);
224  }
225  // create mux
226  JLM_ASSERT(mem_edge->nusers() == 1);
227  auto [muxNode, muxOperation] =
228  jlm::rvsdg::TryGetSimpleNodeAndOptionalOp<jlm::hls::MuxOperation>(
229  *mem_edge->Users().begin());
230  JLM_ASSERT(muxNode && muxOperation);
231  addr_edge = jlm::hls::MuxOperation::create(
232  *muxNode->input(0)->origin(),
233  load_branch_out,
234  muxOperation->discarding,
235  false)[0];
236  addr_edge_user.divert_to(addr_edge);
237  mem_edge = muxNode->output(0);
238  }
239  else
240  {
241  // end of loop
242  JLM_ASSERT(jlm::rvsdg::IsOwnerNodeOperation<jlm::hls::BranchOperation>(addr_edge_user));
243  return nullptr;
244  }
245  }
246  else if (auto mx = dynamic_cast<const jlm::hls::MuxOperation *>(op))
247  {
248  JLM_ASSERT(!mx->loop);
249  // end of gamma
250  JLM_ASSERT(new_mem_edge);
251  *new_mem_edge = mem_edge;
252  return addr_edge;
253  }
254  else if (dynamic_cast<const jlm::llvm::StoreNonVolatileOperation *>(op))
255  {
256  auto sg_out = jlm::hls::StateGateOperation::create(*sn->input(0)->origin(), { addr_edge });
257  addr_edge = sg_out[1];
258  addr_edge_user.divert_to(addr_edge);
259  store_addresses.push_back(jlm::hls::route_to_region_rhls((*load)->region(), sg_out[0]));
260  store_precedes.push_back(!*load_encountered);
261  mem_edge = sn->output(0);
262  JLM_ASSERT(mem_edge->nusers() == 1);
263  user = &*mem_edge->Users().begin();
264  auto [mssNode, msso] =
265  jlm::rvsdg::TryGetSimpleNodeAndOptionalOp<jlm::llvm::MemoryStateSplitOperation>(*user);
266  if (mssNode && msso)
267  {
268  // handle case where output of store is already connected to a MemStateSplit by adding an
269  // output
270  auto store_split =
271  jlm::llvm::MemoryStateSplitOperation::Create(*mem_edge, msso->nresults() + 1);
272  for (size_t i = 0; i < msso->nresults(); ++i)
273  {
274  mssNode->output(i)->divert_users(store_split[i]);
275  }
276  remove(mssNode);
277  mem_edge = store_split[0];
278  store_dequeues.push_back(
279  jlm::hls::route_to_region_rhls((*load)->region(), store_split.back()));
280  }
281  else
282  {
283  auto store_split = jlm::llvm::MemoryStateSplitOperation::Create(*mem_edge, 2);
284  mem_edge = store_split[0];
285  user->divert_to(mem_edge);
286  store_dequeues.push_back(
287  jlm::hls::route_to_region_rhls((*load)->region(), store_split[1]));
288  }
289  }
290  else if (auto lo = dynamic_cast<const jlm::llvm::LoadNonVolatileOperation *>(op))
291  {
292  JLM_ASSERT(sn->noutputs() == 2);
293  if (sn == *load)
294  {
295  // create state gate for addr edge
296  auto addr_sg_out =
297  jlm::hls::StateGateOperation::create(*sn->input(0)->origin(), { addr_edge });
298  addr_edge = addr_sg_out[1];
299  addr_edge_user.divert_to(addr_edge);
300  auto addr_sg_out2 = jlm::hls::StateGateOperation::create(*addr_sg_out[0], { addr_edge });
301  addr_edge = addr_sg_out2[1];
302  addr_edge_user.divert_to(addr_edge);
303  // remove state edges from load
304  auto new_load_outputs = jlm::llvm::LoadNonVolatileOperation::Create(
305  addr_sg_out2[0],
306  {},
307  lo->GetLoadedType(),
308  lo->GetAlignment());
309  // create state gate for mem edge and load data
310  auto mem_sg_out =
311  jlm::hls::StateGateOperation::create(*new_load_outputs[0], { mem_edge });
312  mem_edge = mem_sg_out[1];
313 
314  sn->output(0)->divert_users(new_load_outputs[0]);
315  user->divert_to(addr_edge);
316  sn->output(1)->divert_users(mem_edge);
317  remove(sn);
318  *load = &jlm::rvsdg::AssertGetOwnerNode<jlm::rvsdg::SimpleNode>(*new_load_outputs[0]);
319  *load_encountered = true;
320  }
321  else
322  {
323  mem_edge = sn->output(1);
324  }
325  }
326  else if (dynamic_cast<const jlm::hls::StateGateOperation *>(op))
327  {
328  mem_edge = sn->output(1);
329  }
330  else if (dynamic_cast<const jlm::llvm::CallOperation *>(op))
331  {
332  JLM_ASSERT("Decoupled nodes not implemented yet");
333  }
334  else if (dynamic_cast<const jlm::llvm::MemoryStateMergeOperation *>(op))
335  {
336  auto si_load_user = jlm::rvsdg::TryGetOwnerNode<jlm::rvsdg::SimpleNode>(addr_edge_user);
337  auto & userNode = jlm::rvsdg::AssertGetOwnerNode<jlm::rvsdg::SimpleNode>(*user);
338  if (si_load_user && &userNode == sn)
339  {
340  return nullptr;
341  }
342  // TODO: handle
343  JLM_UNREACHABLE("THIS SHOULD NOT HAPPEN");
344  }
345  else
346  {
347  JLM_UNREACHABLE("THIS SHOULD NOT HAPPEN");
348  }
349  }
350  else
351  {
352  JLM_UNREACHABLE("THIS SHOULD NOT HAPPEN");
353  }
354  }
355 }
356 
359 {
360  while (true)
361  {
362  // each iteration should update state_edge
363  JLM_ASSERT(state_edge->nusers() == 1);
364  auto & user = *state_edge->Users().begin();
365  if (dynamic_cast<jlm::rvsdg::RegionResult *>(&user))
366  {
367  // End of region reached
368  return user.origin();
369  }
370  else if (auto sn = jlm::rvsdg::TryGetOwnerNode<jlm::rvsdg::SimpleNode>(user))
371  {
372  auto op = &sn->GetOperation();
373  auto br = dynamic_cast<const jlm::hls::BranchOperation *>(op);
374  if (br && !br->loop)
375  {
376  // start of gamma
377  for (size_t i = 0; i < sn->noutputs(); ++i)
378  {
379  state_edge = process_loops(sn->output(i));
380  }
381  }
382  else if (jlm::rvsdg::is<jlm::hls::MuxOperation>(*op))
383  {
384  // end of gamma
385  JLM_ASSERT(sn->noutputs() == 1);
386  return sn->output(0);
387  }
388  else if (dynamic_cast<const jlm::llvm::LambdaExitMemoryStateMergeOperation *>(op))
389  {
390  // end of lambda
391  JLM_ASSERT(sn->noutputs() == 1);
392  return sn->output(0);
393  }
394  else if (dynamic_cast<const jlm::llvm::LoadNonVolatileOperation *>(op))
395  {
396  // load
397  JLM_ASSERT(sn->noutputs() == 2);
398  state_edge = sn->output(1);
399  }
400  else if (dynamic_cast<const jlm::llvm::CallOperation *>(op))
401  {
402  state_edge = sn->output(sn->noutputs() - 1);
403  }
404  else
405  {
406  JLM_ASSERT(sn->noutputs() == 1);
407  state_edge = sn->output(0);
408  }
409  }
410  else if (auto sti = dynamic_cast<jlm::rvsdg::StructuralInput *>(&user))
411  {
412  JLM_ASSERT(dynamic_cast<const jlm::hls::LoopNode *>(sti->node()));
413  // update to output of loop
414  auto mem_edge_after_loop = find_loop_output(sti);
415  JLM_ASSERT(mem_edge_after_loop->nusers() == 1);
416  auto & common_user = *mem_edge_after_loop->Users().begin();
417 
418  std::vector<jlm::rvsdg::SimpleNode *> load_nodes;
419  std::vector<jlm::rvsdg::SimpleNode *> store_nodes;
420  std::unordered_set<jlm::rvsdg::Output *> visited;
421  // this is a hack to keep search within the loop
422  visited.insert(mem_edge_after_loop);
423  find_load_store(&*sti->arguments.begin(), load_nodes, store_nodes, visited);
424  auto split_states =
425  jlm::llvm::MemoryStateSplitOperation::Create(*sti->origin(), load_nodes.size() + 1);
426  // handle common edge
427  auto mem_edge = split_states[0];
428  sti->divert_to(mem_edge);
429  split_states[0] = mem_edge_after_loop;
430  state_edge = jlm::llvm::MemoryStateMergeOperation::Create(split_states);
431  common_user.divert_to(state_edge);
432  for (size_t i = 0; i < load_nodes.size(); ++i)
433  {
434  auto load = load_nodes[i];
435  auto addr_edge = split_states[1 + i];
436  std::vector<jlm::rvsdg::Output *> store_addresses;
437  std::vector<jlm::rvsdg::Output *> store_dequeues;
438  std::vector<bool> store_precedes;
439  bool load_encountered = false;
441  mem_edge,
442  addr_edge,
443  &load,
444  nullptr,
445  store_addresses,
446  store_dequeues,
447  store_precedes,
448  &load_encountered);
449  JLM_ASSERT(load_encountered);
450  JLM_ASSERT(store_nodes.size() == store_addresses.size());
451  JLM_ASSERT(store_nodes.size() == store_dequeues.size());
452  auto state_gate_addr_in =
453  jlm::rvsdg::AssertGetOwnerNode<jlm::rvsdg::SimpleNode>(*load->input(0)->origin())
454  .input(0);
455  for (size_t j = 0; j < store_nodes.size(); ++j)
456  {
457  JLM_ASSERT(state_gate_addr_in->origin()->region() == store_addresses[j]->region());
458  JLM_ASSERT(store_dequeues[j]->region() == store_addresses[j]->region());
459  state_gate_addr_in->divert_to(jlm::hls::AddressQueueOperation::create(
460  *state_gate_addr_in->origin(),
461  *store_addresses[j],
462  *store_dequeues[j],
463  store_precedes[j]));
464  }
465  }
466  }
467  else
468  {
469  JLM_UNREACHABLE("THIS SHOULD NOT HAPPEN");
470  }
471  }
472 }
473 
474 static void
476 {
477  const auto & graph = rvsdgModule.Rvsdg();
478  const auto rootRegion = &graph.GetRootRegion();
479  if (rootRegion->numNodes() != 1)
480  {
481  throw std::logic_error("Root should have only one node now");
482  }
483 
484  const auto lambda = dynamic_cast<const rvsdg::LambdaNode *>(rootRegion->Nodes().begin().ptr());
485  if (!lambda)
486  {
487  throw std::logic_error("Node needs to be a lambda");
488  }
489 
490  auto state_arg = &llvm::GetMemoryStateRegionArgument(*lambda);
491  if (!state_arg)
492  {
493  // No memstate, i.e., no memory used
494  return;
495  }
496  // for each state edge:
497  // for each outer loop (theta/loop in lambda region):
498  // split state edge before the loop
499  // * one edge for only stores (preserves store order)
500  // * a separate edge for each load, going through the stores as well
501  // merge state edges after the loop
502  // for each load:
503  // insert store address queue before address input of load
504  // * enq order of stores guaranteed by load edge, deq by store edge
505  // for each store:
506  // insert state gate addr enq + deq after store complete
507 
508  // Check if there exists a memory state splitter
509  if (state_arg->nusers() == 1)
510  {
511  auto entryNode = rvsdg::TryGetOwnerNode<rvsdg::Node>(*state_arg->Users().begin());
512  if (jlm::rvsdg::is<const jlm::llvm::LambdaEntryMemoryStateSplitOperation>(
513  entryNode->GetOperation()))
514  {
515  for (size_t i = 0; i < entryNode->noutputs(); ++i)
516  {
517  // Process each state edge separately
518  jlm::rvsdg::Output * stateEdge = entryNode->output(i);
519  process_loops(stateEdge);
520  }
521  return;
522  }
523  }
524  // There is no memory state splitter, so process the single state edge in the graph
525  process_loops(state_arg);
526 }
527 
529 
532 {}
533 
534 void
536 {
537  mem_queue(rvsdgModule);
538 }
539 
540 }
void Run(rvsdg::RvsdgModule &rvsdgModule, util::StatisticsCollector &statisticsCollector) override
Perform RVSDG transformation.
Definition: mem-queue.cpp:535
~AddressQueueInsertion() noexcept override
static jlm::rvsdg::Output * create(jlm::rvsdg::Output &check, jlm::rvsdg::Output &enq, jlm::rvsdg::Output &deq, bool combinatorial, size_t capacity=10)
Definition: hls.hpp:1036
static std::vector< jlm::rvsdg::Output * > create(jlm::rvsdg::Output &predicate, jlm::rvsdg::Output &value, bool loop=false)
Definition: hls.hpp:68
static std::vector< jlm::rvsdg::Output * > create(jlm::rvsdg::Output &predicate, const std::vector< jlm::rvsdg::Output * > &alternatives, bool discarding, bool loop=false)
Definition: hls.hpp:235
static std::vector< jlm::rvsdg::Output * > create(jlm::rvsdg::Output &value)
Definition: hls.hpp:302
static std::vector< jlm::rvsdg::Output * > create(jlm::rvsdg::Output &addr, const std::vector< jlm::rvsdg::Output * > &states)
Definition: hls.hpp:1096
Call operation class.
Definition: call.hpp:249
static std::unique_ptr< llvm::ThreeAddressCode > Create(const Variable *address, const Variable *state, std::shared_ptr< const rvsdg::Type > loadedType, size_t alignment)
Definition: Load.hpp:429
static rvsdg::Output * Create(const std::vector< rvsdg::Output * > &operands)
static std::vector< rvsdg::Output * > Create(rvsdg::Output &operand, const size_t numResults)
Region & GetRootRegion() const noexcept
Definition: graph.hpp:99
Lambda node.
Definition: lambda.hpp:83
rvsdg::Region * region() const noexcept
Definition: node.cpp:151
UsersRange Users()
Definition: node.hpp:354
const std::shared_ptr< const rvsdg::Type > & Type() const noexcept
Definition: node.hpp:366
void divert_users(jlm::rvsdg::Output *new_origin)
Definition: node.hpp:301
size_t nusers() const noexcept
Definition: node.hpp:280
Represents the result of a region.
Definition: region.hpp:120
StructuralOutput * output() const noexcept
Definition: region.hpp:149
Graph & Rvsdg() noexcept
Definition: RvsdgModule.hpp:57
Represents an RVSDG transformation.
ElementType * first() const noexcept
#define JLM_ASSERT(x)
Definition: common.hpp:16
#define JLM_UNREACHABLE(msg)
Definition: common.hpp:43
static rvsdg::Output * separate_load_edge(jlm::rvsdg::Output *mem_edge, jlm::rvsdg::Output *addr_edge, jlm::rvsdg::SimpleNode **load, jlm::rvsdg::Output **new_mem_edge, std::vector< jlm::rvsdg::Output * > &store_addresses, std::vector< jlm::rvsdg::Output * > &store_dequeues, std::vector< bool > &store_precedes, bool *load_encountered)
Definition: mem-queue.cpp:125
static void find_load_store(jlm::rvsdg::Output *op, std::vector< jlm::rvsdg::SimpleNode * > &load_nodes, std::vector< jlm::rvsdg::SimpleNode * > &store_nodes, std::unordered_set< jlm::rvsdg::Output * > &visited)
Definition: mem-queue.cpp:27
rvsdg::Output * route_to_region_rhls(rvsdg::Region *target, rvsdg::Output *out)
static rvsdg::StructuralOutput * find_loop_output(jlm::rvsdg::StructuralInput *sti)
Definition: mem-queue.cpp:87
jlm::rvsdg::Output * process_loops(jlm::rvsdg::Output *state_edge)
Definition: mem-queue.cpp:358
static void mem_queue(rvsdg::RvsdgModule &rvsdgModule)
Definition: mem-queue.cpp:475
void convert_loop_state_to_lcb(rvsdg::Input *loop_state_input)
rvsdg::Output & GetMemoryStateRegionArgument(const rvsdg::LambdaNode &lambdaNode) noexcept
static void remove(Node *node)
Definition: region.hpp:932