Jlm
MlirToJlmConverter.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2024 Louis Maurin <louis7maurin@gmail.com>
3  * Copyright 2023 Magnus Sjalander <work@sjalander.com>
4  * See COPYING for terms of redistribution.
5  */
6 
21 #include <mlir/Parser/Parser.h>
22 #include <mlir/Transforms/TopologicalSortUtils.h>
23 
24 namespace jlm::mlir
25 {
26 
27 std::unique_ptr<llvm::LlvmRvsdgModule>
29 {
30  auto config = ::mlir::ParserConfig(Context_.get());
31  std::unique_ptr<::mlir::Block> block = std::make_unique<::mlir::Block>();
32  auto result = ::mlir::parseSourceFile(filePath.to_str(), block.get(), config);
33  if (result.failed())
34  {
35  JLM_ASSERT("Parsing MLIR input file failed.");
36  }
37  return ConvertMlir(block);
38 }
39 
40 std::unique_ptr<llvm::LlvmRvsdgModule>
41 MlirToJlmConverter::ConvertMlir(std::unique_ptr<::mlir::Block> & block)
42 {
43  auto & topNode = block->front();
44  if (auto module = ::mlir::dyn_cast<::mlir::ModuleOp>(topNode))
45  {
46  auto & newTopNode = module.getBodyRegion().front().front();
47  auto omegaNode = ::mlir::dyn_cast<::mlir::rvsdg::OmegaNode>(newTopNode);
48  if (!omegaNode)
49  {
50  JLM_UNREACHABLE("frontend : Top node in module op is not an OmegaNode.");
51  }
52  return ConvertOmega(omegaNode);
53  }
54  auto omegaNode = ::mlir::dyn_cast<::mlir::rvsdg::OmegaNode>(topNode);
55  if (!omegaNode)
56  {
57  JLM_UNREACHABLE("frontend : Top node is not an OmegaNode.");
58  }
59  return ConvertOmega(omegaNode);
60 }
61 
62 std::unique_ptr<llvm::LlvmRvsdgModule>
63 MlirToJlmConverter::ConvertOmega(::mlir::rvsdg::OmegaNode & omegaNode)
64 {
65  auto rvsdgModule =
66  llvm::LlvmRvsdgModule::Create(util::FilePath(""), std::string(), std::string());
67  auto & graph = rvsdgModule->Rvsdg();
68  auto & root = graph.GetRootRegion();
69  ConvertRegion(omegaNode.getRegion(), root);
70 
71  return rvsdgModule;
72 }
73 
74 ::llvm::SmallVector<jlm::rvsdg::Output *>
75 MlirToJlmConverter::ConvertRegion(::mlir::Region & region, rvsdg::Region & rvsdgRegion)
76 {
77  // MLIR use blocks as the innermost "container"
78  // In the RVSDG Dialect a region should contain one and only one block
79  JLM_ASSERT(region.getBlocks().size() == 1);
80  return ConvertBlock(region.front(), rvsdgRegion);
81 }
82 
83 ::llvm::SmallVector<jlm::rvsdg::Output *>
85  ::mlir::Operation & mlirOp,
86  const std::unordered_map<void *, rvsdg::Output *> & outputMap)
87 {
88  ::llvm::SmallVector<jlm::rvsdg::Output *> inputs;
89  for (::mlir::Value operand : mlirOp.getOperands())
90  {
91  auto key = operand.getAsOpaquePointer();
92  JLM_ASSERT(outputMap.find(key) != outputMap.end());
93  inputs.push_back(outputMap.at(key));
94  }
95  return inputs;
96 }
97 
98 ::llvm::SmallVector<jlm::rvsdg::Output *>
99 MlirToJlmConverter::ConvertBlock(::mlir::Block & block, rvsdg::Region & rvsdgRegion)
100 {
101  ::mlir::sortTopologically(&block);
102 
103  // Create an RVSDG node for each MLIR operation and store the mapping from
104  // MLIR values to RVSDG outputs in a hash map for easy lookup
105  std::unordered_map<void *, rvsdg::Output *> outputMap;
106 
107  for (size_t i = 0; i < block.getNumArguments(); i++)
108  {
109  auto arg = block.getArgument(i);
110  auto key = arg.getAsOpaquePointer();
111  outputMap[key] = rvsdgRegion.argument(i);
112  }
113 
114  for (auto & mlirOp : block.getOperations())
115  {
116  if (auto argument = ::mlir::dyn_cast<::mlir::rvsdg::OmegaArgument>(mlirOp))
117  {
118  auto valueType = argument.getValueType();
119  auto importedType = argument.getImportedValue().getType();
120  auto jlmValueType = ConvertType(valueType);
121  auto jlmImportedType = ConvertType(importedType);
122 
124  *rvsdgRegion.graph(),
125  jlmValueType,
126  jlmImportedType,
127  argument.getNameAttr().cast<::mlir::StringAttr>().str(),
128  llvm::linkageFromString(argument.getLinkageAttr().cast<::mlir::StringAttr>().str()));
129 
130  auto key = argument.getResult().getAsOpaquePointer();
131  outputMap[key] = rvsdgRegion.argument(rvsdgRegion.narguments() - 1);
132  }
133  else
134  {
135  ::llvm::SmallVector<jlm::rvsdg::Output *> inputs = GetConvertedInputs(mlirOp, outputMap);
136 
137  auto outputs = ConvertOperation(mlirOp, rvsdgRegion, inputs);
138  JLM_ASSERT(outputs.size() == mlirOp.getNumResults());
139  for (size_t i = 0; i < mlirOp.getNumResults(); i++)
140  {
141  auto result = mlirOp.getResult(i);
142  auto key = result.getAsOpaquePointer();
143  outputMap[key] = outputs[i];
144  }
145  }
146  }
147 
148  // The results of the region/block are encoded in the terminator operation
149  ::mlir::Operation * terminator = block.getTerminator();
150 
151  return GetConvertedInputs(*terminator, outputMap);
152 }
153 
154 rvsdg::Node *
156  ::mlir::arith::CmpIOp & CompOp,
157  const ::llvm::SmallVector<rvsdg::Output *> & inputs,
158  size_t nbits)
159 {
160  if (CompOp.getPredicate() == ::mlir::arith::CmpIPredicate::eq)
161  {
162  return &rvsdg::CreateOpNode<jlm::llvm::IntegerEqOperation>({ inputs[0], inputs[1] }, nbits);
163  }
164  else if (CompOp.getPredicate() == ::mlir::arith::CmpIPredicate::ne)
165  {
166  return &rvsdg::CreateOpNode<jlm::llvm::IntegerNeOperation>({ inputs[0], inputs[1] }, nbits);
167  }
168  else if (CompOp.getPredicate() == ::mlir::arith::CmpIPredicate::sge)
169  {
170  return &rvsdg::CreateOpNode<jlm::llvm::IntegerSgeOperation>({ inputs[0], inputs[1] }, nbits);
171  }
172  else if (CompOp.getPredicate() == ::mlir::arith::CmpIPredicate::sgt)
173  {
174  return &rvsdg::CreateOpNode<jlm::llvm::IntegerSgtOperation>({ inputs[0], inputs[1] }, nbits);
175  }
176  else if (CompOp.getPredicate() == ::mlir::arith::CmpIPredicate::sle)
177  {
178  return &rvsdg::CreateOpNode<jlm::llvm::IntegerSleOperation>({ inputs[0], inputs[1] }, nbits);
179  }
180  else if (CompOp.getPredicate() == ::mlir::arith::CmpIPredicate::slt)
181  {
182  return &rvsdg::CreateOpNode<jlm::llvm::IntegerSltOperation>({ inputs[0], inputs[1] }, nbits);
183  }
184  else if (CompOp.getPredicate() == ::mlir::arith::CmpIPredicate::uge)
185  {
186  return &rvsdg::CreateOpNode<jlm::llvm::IntegerUgeOperation>({ inputs[0], inputs[1] }, nbits);
187  }
188  else if (CompOp.getPredicate() == ::mlir::arith::CmpIPredicate::ugt)
189  {
190  return &rvsdg::CreateOpNode<jlm::llvm::IntegerUgtOperation>({ inputs[0], inputs[1] }, nbits);
191  }
192  else if (CompOp.getPredicate() == ::mlir::arith::CmpIPredicate::ule)
193  {
194  return &rvsdg::CreateOpNode<jlm::llvm::IntegerUleOperation>({ inputs[0], inputs[1] }, nbits);
195  }
196  else if (CompOp.getPredicate() == ::mlir::arith::CmpIPredicate::ult)
197  {
198  return &rvsdg::CreateOpNode<jlm::llvm::IntegerUltOperation>({ inputs[0], inputs[1] }, nbits);
199  }
200  else
201  {
202  JLM_UNREACHABLE("frontend : Unknown comparison predicate.");
203  }
204 }
205 
206 rvsdg::Node *
208  ::mlir::LLVM::ICmpOp & operation,
209  rvsdg::Region & rvsdgRegion,
210  const ::llvm::SmallVector<rvsdg::Output *> & inputs)
211 {
212  if (operation.getPredicate() == ::mlir::LLVM::ICmpPredicate::eq)
213  {
214  auto newOp =
215  std::make_unique<llvm::PtrCmpOperation>(llvm::PointerType::Create(), llvm::cmp::eq);
217  rvsdgRegion,
218  std::move(newOp),
219  std::vector<jlm::rvsdg::Output *>(inputs.begin(), inputs.end()));
220  }
221  else if (operation.getPredicate() == ::mlir::LLVM::ICmpPredicate::ne)
222  {
223  auto newOp =
224  std::make_unique<llvm::PtrCmpOperation>(llvm::PointerType::Create(), llvm::cmp::ne);
226  rvsdgRegion,
227  std::move(newOp),
228  std::vector<jlm::rvsdg::Output *>(inputs.begin(), inputs.end()));
229  }
230  else if (operation.getPredicate() == ::mlir::LLVM::ICmpPredicate::sge)
231  {
232  auto newOp =
233  std::make_unique<llvm::PtrCmpOperation>(llvm::PointerType::Create(), llvm::cmp::ge);
235  rvsdgRegion,
236  std::move(newOp),
237  std::vector<jlm::rvsdg::Output *>(inputs.begin(), inputs.end()));
238  }
239  else if (operation.getPredicate() == ::mlir::LLVM::ICmpPredicate::sgt)
240  {
241  auto newOp =
242  std::make_unique<llvm::PtrCmpOperation>(llvm::PointerType::Create(), llvm::cmp::gt);
244  rvsdgRegion,
245  std::move(newOp),
246  std::vector<jlm::rvsdg::Output *>(inputs.begin(), inputs.end()));
247  }
248  else if (operation.getPredicate() == ::mlir::LLVM::ICmpPredicate::sle)
249  {
250  auto newOp =
251  std::make_unique<llvm::PtrCmpOperation>(llvm::PointerType::Create(), llvm::cmp::le);
253  rvsdgRegion,
254  std::move(newOp),
255  std::vector<jlm::rvsdg::Output *>(inputs.begin(), inputs.end()));
256  }
257  else if (operation.getPredicate() == ::mlir::LLVM::ICmpPredicate::slt)
258  {
259  auto newOp =
260  std::make_unique<llvm::PtrCmpOperation>(llvm::PointerType::Create(), llvm::cmp::lt);
262  rvsdgRegion,
263  std::move(newOp),
264  std::vector<jlm::rvsdg::Output *>(inputs.begin(), inputs.end()));
265  }
266  else
267  {
268  JLM_UNREACHABLE("MLIR frontend: Unknown pointer compare operation");
269  }
270 }
271 
272 rvsdg::Node *
274  const ::mlir::Operation & mlirOperation,
275  const ::llvm::SmallVector<rvsdg::Output *> & inputs)
276 {
277  if (inputs.size() != 2)
278  return nullptr;
279  auto op = llvm::fpop::add;
280  auto size = llvm::fpsize::half;
281  if (auto castedOp = ::mlir::dyn_cast<::mlir::arith::AddFOp>(&mlirOperation))
282  {
283  op = llvm::fpop::add;
284  size = ConvertFPSize(castedOp.getType().cast<::mlir::FloatType>().getWidth());
285  }
286  else if (auto castedOp = ::mlir::dyn_cast<::mlir::arith::SubFOp>(&mlirOperation))
287  {
288  op = llvm::fpop::sub;
289  size = ConvertFPSize(castedOp.getType().cast<::mlir::FloatType>().getWidth());
290  }
291  else if (auto castedOp = ::mlir::dyn_cast<::mlir::arith::MulFOp>(&mlirOperation))
292  {
293  op = llvm::fpop::mul;
294  size = ConvertFPSize(castedOp.getType().cast<::mlir::FloatType>().getWidth());
295  }
296  else if (auto castedOp = ::mlir::dyn_cast<::mlir::arith::DivFOp>(&mlirOperation))
297  {
298  op = llvm::fpop::div;
299  size = ConvertFPSize(castedOp.getType().cast<::mlir::FloatType>().getWidth());
300  }
301  else if (auto castedOp = ::mlir::dyn_cast<::mlir::arith::RemFOp>(&mlirOperation))
302  {
303  op = llvm::fpop::mod;
304  size = ConvertFPSize(castedOp.getType().cast<::mlir::FloatType>().getWidth());
305  }
306  else
307  {
308  return nullptr;
309  }
310  return &rvsdg::CreateOpNode<llvm::FBinaryOperation>({ inputs[0], inputs[1] }, op, size);
311 }
312 
314 MlirToJlmConverter::TryConvertFPCMP(const ::mlir::arith::CmpFPredicate & op)
315 {
316  const auto & map = GetFpCmpPredicateMap();
317  return map.LookupKey(op);
318 }
319 
320 rvsdg::Node *
322  ::mlir::Operation & mlirOperation,
323  const ::llvm::SmallVector<rvsdg::Output *> & inputs)
324 {
325  if (inputs.size() != 2 || mlirOperation.getNumResults() != 1)
326  return nullptr;
327 
328  auto type = mlirOperation.getResult(0).getType();
329 
330  size_t width = 0;
331  if (type.isa<::mlir::IntegerType>())
332  {
333  auto integerType = type.cast<::mlir::IntegerType>();
334  width = integerType.getWidth();
335  }
336  else if (type.isIndex())
337  {
339  }
340  else
341  {
342  return nullptr;
343  }
344 
345  if (::mlir::isa<::mlir::arith::AddIOp>(mlirOperation))
346  {
347  return &rvsdg::CreateOpNode<jlm::llvm::IntegerAddOperation>({ inputs[0], inputs[1] }, width);
348  }
349  else if (::mlir::isa<::mlir::arith::SubIOp>(mlirOperation))
350  {
351  return &rvsdg::CreateOpNode<jlm::llvm::IntegerSubOperation>({ inputs[0], inputs[1] }, width);
352  }
353  else if (::mlir::isa<::mlir::arith::MulIOp>(mlirOperation))
354  {
355  return &rvsdg::CreateOpNode<jlm::llvm::IntegerMulOperation>({ inputs[0], inputs[1] }, width);
356  }
357  else if (::mlir::isa<::mlir::arith::DivSIOp>(mlirOperation))
358  {
359  return &rvsdg::CreateOpNode<jlm::llvm::IntegerSDivOperation>({ inputs[0], inputs[1] }, width);
360  }
361  else if (::mlir::isa<::mlir::arith::DivUIOp>(mlirOperation))
362  {
363  return &rvsdg::CreateOpNode<jlm::llvm::IntegerUDivOperation>({ inputs[0], inputs[1] }, width);
364  }
365  else if (::mlir::isa<::mlir::arith::RemSIOp>(mlirOperation))
366  {
367  return &rvsdg::CreateOpNode<jlm::llvm::IntegerSRemOperation>({ inputs[0], inputs[1] }, width);
368  }
369  else if (::mlir::isa<::mlir::arith::RemUIOp>(mlirOperation))
370  {
371  return &rvsdg::CreateOpNode<jlm::llvm::IntegerURemOperation>({ inputs[0], inputs[1] }, width);
372  }
373  else if (::mlir::isa<::mlir::LLVM::ShlOp>(mlirOperation))
374  {
375  return &rvsdg::CreateOpNode<jlm::llvm::IntegerShlOperation>({ inputs[0], inputs[1] }, width);
376  }
377  else if (::mlir::isa<::mlir::LLVM::AShrOp>(mlirOperation))
378  {
379  return &rvsdg::CreateOpNode<jlm::llvm::IntegerAShrOperation>({ inputs[0], inputs[1] }, width);
380  }
381  else if (::mlir::isa<::mlir::LLVM::LShrOp>(mlirOperation))
382  {
383  return &rvsdg::CreateOpNode<jlm::llvm::IntegerLShrOperation>({ inputs[0], inputs[1] }, width);
384  }
385  else if (::mlir::isa<::mlir::arith::AndIOp>(mlirOperation))
386  {
387  return &rvsdg::CreateOpNode<jlm::llvm::IntegerAndOperation>({ inputs[0], inputs[1] }, width);
388  }
389  else if (::mlir::isa<::mlir::arith::OrIOp>(mlirOperation))
390  {
391  return &rvsdg::CreateOpNode<jlm::llvm::IntegerOrOperation>({ inputs[0], inputs[1] }, width);
392  }
393  else if (::mlir::isa<::mlir::arith::XOrIOp>(mlirOperation))
394  {
395  return &rvsdg::CreateOpNode<jlm::llvm::IntegerXorOperation>({ inputs[0], inputs[1] }, width);
396  }
397  else
398  {
399  return nullptr;
400  }
401 }
402 
403 static std::vector<llvm::MemoryNodeId>
404 arrayAttrToMemoryNodeIds(::mlir::ArrayAttr arrayAttr)
405 {
406  std::vector<llvm::MemoryNodeId> memoryNodeIds;
407  for (auto memoryNodeId : arrayAttr)
408  {
409  memoryNodeIds.push_back(memoryNodeId.cast<::mlir::IntegerAttr>().getInt());
410  }
411  return memoryNodeIds;
412 }
413 
414 std::vector<jlm::rvsdg::Output *>
416  ::mlir::Operation & mlirOperation,
417  rvsdg::Region & rvsdgRegion,
418  const ::llvm::SmallVector<rvsdg::Output *> & inputs)
419 {
420 
421  // ** region Arithmetic Integer Operation **
422  auto convertedBitBinaryNode = ConvertBitBinaryNode(mlirOperation, inputs);
423  // If the operation was converted it means it has been casted to a bit binary operation
424  if (convertedBitBinaryNode)
425  {
426  return rvsdg::outputs(convertedBitBinaryNode);
427  }
428  // ** endregion Arithmetic Integer Operation **
429 
430  // ** region Arithmetic Float Operation **
431  auto convertedFloatBinaryNode = ConvertFPBinaryNode(mlirOperation, inputs);
432  // If the operation was converted it means it has been casted to a fp binary operation
433  if (convertedFloatBinaryNode)
434  {
435  return rvsdg::outputs(convertedFloatBinaryNode);
436  }
437 
438  if (::mlir::isa<::mlir::LLVM::FMulAddOp>(&mlirOperation))
439  {
440  JLM_ASSERT(inputs.size() == 3);
441  return rvsdg::outputs(
442  &llvm::FMulAddIntrinsicOperation::CreateNode(*inputs[0], *inputs[1], *inputs[2]));
443  }
444  // ** endregion Arithmetic Float Operation **
445 
446  if (auto castedOp = ::mlir::dyn_cast<::mlir::arith::ExtUIOp>(&mlirOperation))
447  {
448  auto st = std::dynamic_pointer_cast<const rvsdg::BitType>(inputs[0]->Type());
449  if (!st)
450  JLM_UNREACHABLE("Expected bitstring type for ExtUIOp operation.");
451  ::mlir::Type type = castedOp.getType();
452  return { &llvm::ZExtOperation::Create(*(inputs[0]), ConvertType(type)) };
453  }
454  else if (auto castedOp = ::mlir::dyn_cast<::mlir::arith::ExtSIOp>(&mlirOperation))
455  {
456  auto outputType = castedOp.getOut().getType();
457  auto convertedOutputType = ConvertType(outputType);
458  if (!::mlir::isa<::mlir::IntegerType>(castedOp.getType()))
459  JLM_UNREACHABLE("Expected IntegerType for ExtSIOp operation output.");
461  castedOp.getType().cast<::mlir::IntegerType>().getWidth(),
462  inputs[0]) };
463  }
464  else if (auto sitofpOp = ::mlir::dyn_cast<::mlir::arith::SIToFPOp>(&mlirOperation))
465  {
466  auto st = std::dynamic_pointer_cast<const jlm::rvsdg::BitType>(inputs[0]->Type());
467  if (!st)
468  JLM_UNREACHABLE("Expected bits type for SIToFPOp operation.");
469 
470  auto mlirOutputType = sitofpOp.getType();
471  auto rt = ConvertType(mlirOutputType);
472 
473  return rvsdg::outputs(&rvsdg::CreateOpNode<llvm::SIToFPOperation>(
474  std::vector<jlm::rvsdg::Output *>(inputs.begin(), inputs.end()),
475  std::move(st),
476  std::move(rt)));
477  }
478 
479  else if (::mlir::isa<::mlir::rvsdg::OmegaNode>(&mlirOperation))
480  {
481  // Omega doesn't have a corresponding RVSDG node, so we return an empty vector
482  return {};
483  }
484  else if (::mlir::isa<::mlir::rvsdg::LambdaNode>(&mlirOperation))
485  {
486  return rvsdg::outputs(ConvertLambda(mlirOperation, rvsdgRegion, inputs));
487  }
488  else if (auto callOp = ::mlir::dyn_cast<::mlir::jlm::Call>(&mlirOperation))
489  {
490  std::vector<std::shared_ptr<const rvsdg::Type>> argumentTypes;
491  for (auto arg : callOp.getArgs())
492  {
493  auto type = arg.getType();
494  argumentTypes.push_back(ConvertType(type));
495  }
496  argumentTypes.push_back(llvm::IOStateType::Create());
497  argumentTypes.push_back(llvm::MemoryStateType::Create());
498  std::vector<std::shared_ptr<const rvsdg::Type>> resultTypes;
499  for (auto res : callOp.getResults())
500  {
501  auto type = res.getType();
502  resultTypes.push_back(ConvertType(type));
503  }
504 
505  return rvsdg::outputs(&rvsdg::CreateOpNode<llvm::CallOperation>(
506  std::vector<jlm::rvsdg::Output *>(inputs.begin(), inputs.end()),
507  std::make_shared<rvsdg::FunctionType>(argumentTypes, resultTypes)));
508  }
509  else if (auto constant = ::mlir::dyn_cast<::mlir::arith::ConstantIntOp>(&mlirOperation))
510  {
511  auto type = constant.getType();
512  JLM_ASSERT(type.getTypeID() == ::mlir::IntegerType::getTypeID());
513  auto integerType = ::mlir::cast<::mlir::IntegerType>(type);
514 
516  rvsdgRegion,
517  integerType.getWidth(),
518  constant.value()));
519  }
520  else if (auto constant = ::mlir::dyn_cast<::mlir::arith::ConstantFloatOp>(&mlirOperation))
521  {
522  auto type = constant.getType();
523  if (!::mlir::isa<::mlir::FloatType>(type))
524  JLM_UNREACHABLE("Expected FloatType for ConstantFloatOp operation.");
525  auto floatType = ::mlir::cast<::mlir::FloatType>(type);
526 
527  auto size = ConvertFPSize(floatType.getWidth());
528  return rvsdg::outputs(
529  &rvsdg::CreateOpNode<llvm::ConstantFP>(rvsdgRegion, size, constant.value()));
530  }
531 
532  // RVSDG does not have an index type. Indices are therefore converted to integers.
533 
534  else if (auto constant = ::mlir::dyn_cast<::mlir::arith::ConstantIndexOp>(&mlirOperation))
535  {
536  auto type = constant.getType();
537  JLM_ASSERT(type.getTypeID() == ::mlir::IndexType::getTypeID());
538 
540  rvsdgRegion,
542  constant.value()));
543  }
544  else if (auto indexCast = ::mlir::dyn_cast<::mlir::arith::IndexCastOp>(&mlirOperation))
545  {
546  auto outputType = indexCast.getResult().getType();
547  auto inputType = indexCast.getIn().getType();
548  unsigned inputBits = inputType.getIntOrFloatBitWidth();
549  unsigned outputBits = outputType.getIntOrFloatBitWidth();
550 
551  if (inputType.isIndex())
552  {
553  if (outputBits == MlirToJlmConverter::GetIndexBitWidth())
554  {
555  // Nothing is needed to be done so we simply pass on the inputs
556  return { inputs.begin(), inputs.end() };
557  }
558  else if (outputBits > MlirToJlmConverter::GetIndexBitWidth())
559  {
560  return { llvm::SExtOperation::create(outputBits, inputs[0]) };
561  }
562  else
563  {
564  return { llvm::TruncOperation::create(outputBits, inputs[0]) };
565  }
566  }
567  else
568  {
569  if (inputBits == MlirToJlmConverter::GetIndexBitWidth())
570  {
571  // Nothing to be done as indices are not supported and of default width
572  return { inputs.begin(), inputs.end() };
573  }
574  else if (inputBits > MlirToJlmConverter::GetIndexBitWidth())
575  {
577  }
578  else
579  {
580  return { &llvm::ZExtOperation::Create(
581  *(inputs[0]),
583  }
584  }
585  }
586 
587  else if (auto negOp = ::mlir::dyn_cast<::mlir::arith::NegFOp>(&mlirOperation))
588  {
589  auto type = negOp.getResult().getType();
590  auto floatType = ::mlir::cast<::mlir::FloatType>(type);
591 
592  llvm::fpsize size = ConvertFPSize(floatType.getWidth());
593  return rvsdg::outputs(&rvsdg::CreateOpNode<jlm::llvm::FNegOperation>({ inputs[0] }, size));
594  }
595 
596  else if (auto extOp = ::mlir::dyn_cast<::mlir::arith::ExtFOp>(&mlirOperation))
597  {
598  auto type = extOp.getResult().getType();
599  auto floatType = ::mlir::cast<::mlir::FloatType>(type);
600 
601  llvm::fpsize size = ConvertFPSize(floatType.getWidth());
602  return rvsdg::outputs(&rvsdg::CreateOpNode<jlm::llvm::FPExtOperation>(
603  { inputs[0] },
604  inputs[0]->Type(),
606  }
607 
608  else if (auto truncOp = ::mlir::dyn_cast<::mlir::arith::TruncIOp>(&mlirOperation))
609  {
610  auto type = truncOp.getResult().getType();
611  auto intType = ::mlir::cast<::mlir::IntegerType>(type);
612  return { llvm::TruncOperation::create(intType.getIntOrFloatBitWidth(), inputs[0]) };
613  }
614  else if (auto constant = ::mlir::dyn_cast<::mlir::arith::ConstantFloatOp>(&mlirOperation))
615  {
616  auto type = constant.getType();
617  auto floatType = ::mlir::cast<::mlir::FloatType>(type);
618 
619  llvm::fpsize size = ConvertFPSize(floatType.getWidth());
620  return rvsdg::outputs(&rvsdg::CreateOpNode<jlm::llvm::ConstantFP>({}, size, constant.value()));
621  }
622 
623  // Binary Integer Comparision operations
624  else if (auto ComOp = ::mlir::dyn_cast<::mlir::arith::CmpIOp>(&mlirOperation))
625  {
626  auto type = ComOp.getOperandTypes()[0];
627  if (type.isa<::mlir::IntegerType>())
628  {
629  auto integerType = ::mlir::cast<::mlir::IntegerType>(type);
630  return rvsdg::outputs(ConvertCmpIOp(ComOp, inputs, integerType.getWidth()));
631  }
632  else if (type.isIndex())
633  {
635  }
636  else
637  {
638  JLM_UNREACHABLE("Wrong type given to CmpIOp.");
639  }
640  }
641 
642  else if (auto ComOp = ::mlir::dyn_cast<::mlir::arith::CmpFOp>(&mlirOperation))
643  {
644  auto type = ComOp.getOperandTypes()[0];
645  auto floatType = ::mlir::cast<::mlir::FloatType>(type);
646  return rvsdg::outputs(&rvsdg::CreateOpNode<llvm::FCmpOperation>(
647  std::vector(inputs.begin(), inputs.end()),
648  TryConvertFPCMP(ComOp.getPredicate()),
649  ConvertFPSize(floatType.getWidth())));
650  }
651 
652  // Pointer compare is mapped to LLVM::ICmpOp
653  else if (auto iComOp = ::mlir::dyn_cast<::mlir::LLVM::ICmpOp>(&mlirOperation))
654  {
655  return rvsdg::outputs(ConvertICmpOp(iComOp, rvsdgRegion, inputs));
656  }
657 
658  else if (auto UndefOp = ::mlir::dyn_cast<::mlir::jlm::Undef>(&mlirOperation))
659  {
660  auto type = UndefOp.getResult().getType();
661  auto jlmType = ConvertType(type);
662  return { jlm::llvm::UndefValueOperation::Create(rvsdgRegion, jlmType) };
663  }
664 
665  else if (auto ArrayOp = ::mlir::dyn_cast<::mlir::jlm::ConstantDataArray>(&mlirOperation))
666  {
667  return { llvm::ConstantDataArray::Create(std::vector(inputs.begin(), inputs.end())) };
668  }
669 
670  else if (auto ZeroOp = ::mlir::dyn_cast<::mlir::LLVM::ZeroOp>(&mlirOperation))
671  {
672  auto type = ZeroOp.getType();
673  // NULL pointers are a special case of ZeroOp
674  if (::mlir::isa<::mlir::LLVM::LLVMPointerType>(type))
675  {
677  }
679  }
680 
681  else if (auto VarArgOp = ::mlir::dyn_cast<::mlir::jlm::CreateVarArgList>(&mlirOperation))
682  {
684  rvsdgRegion,
685  std::vector(inputs.begin(), inputs.end())) };
686  }
687 
688  // Memory operations
689 
690  else if (auto FreeOp = ::mlir::dyn_cast<::mlir::jlm::Free>(&mlirOperation))
691  {
692  return rvsdg::outputs(&rvsdg::CreateOpNode<llvm::FreeOperation>(
693  std::vector(inputs.begin(), inputs.end()),
694  inputs.size() - 2));
695  }
696 
697  else if (auto AllocaOp = ::mlir::dyn_cast<::mlir::jlm::Alloca>(&mlirOperation))
698  {
699  auto outputType = AllocaOp.getValueType();
700 
701  auto jlmType = ConvertType(outputType);
702  if (jlmType->Kind() != rvsdg::TypeKind::Value)
703  JLM_UNREACHABLE("Expected ValueType for AllocaOp operation.");
704 
705  if (!rvsdg::is<const rvsdg::BitType>(inputs[0]->Type()))
706  JLM_UNREACHABLE("Expected BitType for AllocaOp operation.");
707 
708  auto jlmBitType = std::dynamic_pointer_cast<const jlm::rvsdg::BitType>(inputs[0]->Type());
709 
710  return rvsdg::outputs(&rvsdg::CreateOpNode<llvm::AllocaOperation>(
711  std::vector(inputs.begin(), inputs.end()),
712  jlmType,
713  jlmBitType,
714  AllocaOp.getAlignment()));
715  }
716  else if (auto MemstateMergeOp = ::mlir::dyn_cast<::mlir::rvsdg::MemStateMerge>(&mlirOperation))
717  {
718  auto operands = std::vector(inputs.begin(), inputs.end());
720  }
721  else if (
722  auto LambdaEntryMemstateSplitOp =
723  ::mlir::dyn_cast<::mlir::rvsdg::LambdaEntryMemoryStateSplit>(&mlirOperation))
724  {
725  auto memoryNodeIds =
726  arrayAttrToMemoryNodeIds(LambdaEntryMemstateSplitOp.getMemoryStateIndices());
727 
728  auto operands = std::vector(inputs.begin(), inputs.end());
730  *operands.front(),
731  std::move(memoryNodeIds)));
732  }
733  if (auto LambdaExitMemstateMergeOp =
734  ::mlir::dyn_cast<::mlir::rvsdg::LambdaExitMemoryStateMerge>(&mlirOperation))
735  {
736  auto memoryNodeIds =
737  arrayAttrToMemoryNodeIds(LambdaExitMemstateMergeOp.getMemoryStateIndices());
738 
739  auto operands = std::vector(inputs.begin(), inputs.end());
741  rvsdgRegion,
742  operands,
743  std::move(memoryNodeIds)));
744  }
745  else if (
746  auto CallEntryMemstateMergeOp =
747  ::mlir::dyn_cast<::mlir::rvsdg::CallEntryMemoryStateMerge>(&mlirOperation))
748  {
749  auto memoryNodeIds = arrayAttrToMemoryNodeIds(CallEntryMemstateMergeOp.getMemoryStateIndices());
750 
751  auto operands = std::vector(inputs.begin(), inputs.end());
753  rvsdgRegion,
754  operands,
755  std::move(memoryNodeIds)));
756  }
757  else if (
758  auto CallExitMemstateSplitOp =
759  ::mlir::dyn_cast<::mlir::rvsdg::CallExitMemoryStateSplit>(&mlirOperation))
760  {
761  auto memoryNodeIds = arrayAttrToMemoryNodeIds(CallExitMemstateSplitOp.getMemoryStateIndices());
762 
763  auto operands = std::vector(inputs.begin(), inputs.end());
765  *operands.front(),
766  std::move(memoryNodeIds)));
767  }
768  else if (::mlir::isa<::mlir::rvsdg::MemoryStateJoin>(&mlirOperation))
769  {
770  std::vector operands(inputs.begin(), inputs.end());
772  }
773  else if (auto IOBarrierOp = ::mlir::dyn_cast<::mlir::jlm::IOBarrier>(&mlirOperation))
774  {
775  auto type = IOBarrierOp.getResult().getType();
776  return rvsdg::outputs(&rvsdg::CreateOpNode<llvm::IOBarrierOperation>(
777  std::vector(inputs.begin(), inputs.end()),
778  ConvertType(type)));
779  }
780  else if (auto MallocOp = ::mlir::dyn_cast<::mlir::jlm::Malloc>(&mlirOperation))
781  {
782  return outputs(&llvm::MallocOperation::createNode(*inputs[0], *inputs[1]));
783  }
784  else if (auto StoreOp = ::mlir::dyn_cast<::mlir::jlm::Store>(&mlirOperation))
785  {
786  auto address = inputs[0];
787  auto value = inputs[1];
788  auto memoryStateInputs = std::vector(std::next(inputs.begin(), 2), inputs.end());
790  *address,
791  *value,
792  memoryStateInputs,
793  StoreOp.getAlignment()));
794  }
795  else if (auto LoadOp = ::mlir::dyn_cast<::mlir::jlm::Load>(&mlirOperation))
796  {
797  auto address = inputs[0];
798  auto memoryStateInputs = std::vector(std::next(inputs.begin()), inputs.end());
799  auto outputType = LoadOp.getOutput().getType();
800  auto jlmType = ConvertType(outputType);
801  if (jlmType->Kind() != rvsdg::TypeKind::Value)
802  JLM_UNREACHABLE("Expected ValueType for LoadOp operation output.");
804  *address,
805  memoryStateInputs,
806  jlmType,
807  LoadOp.getAlignment()));
808  }
809  else if (auto GepOp = ::mlir::dyn_cast<::mlir::LLVM::GEPOp>(&mlirOperation))
810  {
811  auto elemType = GepOp.getElemType();
812  auto pointeeType = ConvertType(elemType);
813  if (pointeeType->Kind() != rvsdg::TypeKind::Value)
814  JLM_UNREACHABLE("Expected ValueType for GepOp operation pointee.");
815 
816  std::vector<rvsdg::Output *> indices;
817  // The first input is the base pointer
818  size_t dynamicInput = 1;
819  for (int32_t constant : GepOp.getRawConstantIndices())
820  {
821  // If magic number then its a dynamic index
822  if (constant == ::mlir::LLVM::GEPOp::kDynamicIndex)
823  {
824  indices.push_back(inputs[dynamicInput++]);
825  }
826  else
827  {
828  // Constant indices are not part of the inputs to a GEPOp,
829  // but they are required as explicit nodes in RVSDG
830  indices.push_back(
831  jlm::llvm::IntegerConstantOperation::Create(rvsdgRegion, 32, constant).output(0));
832  }
833  }
834 
836  inputs[0],
837  indices,
838  pointeeType,
840  }
841  // * region Structural nodes **
842  else if (auto MlirCtrlConst = ::mlir::dyn_cast<::mlir::rvsdg::ConstantCtrl>(&mlirOperation))
843  {
844  JLM_ASSERT(::mlir::isa<::mlir::rvsdg::RVSDG_CTRLType>(MlirCtrlConst.getType()));
846  rvsdgRegion,
847  ::mlir::cast<::mlir::rvsdg::RVSDG_CTRLType>(MlirCtrlConst.getType()).getNumOptions(),
848  MlirCtrlConst.getValue()) };
849  }
850  else if (auto mlirGammaNode = ::mlir::dyn_cast<::mlir::rvsdg::GammaNode>(&mlirOperation))
851  {
852  auto rvsdgGammaNode = rvsdg::GammaNode::create(
853  inputs[0], // predicate
854  mlirGammaNode.getNumRegions() // nalternatives
855  );
856 
857  // Add inputs to the gamma node and to all it's subregions
858  for (size_t i = 1; i < inputs.size(); i++)
859  {
860  rvsdgGammaNode->AddEntryVar(inputs[i]);
861  }
862 
863  ::llvm::SmallVector<::llvm::SmallVector<jlm::rvsdg::Output *>> regionResults;
864  for (size_t i = 0; i < mlirGammaNode.getNumRegions(); i++)
865  {
866  regionResults.push_back(
867  ConvertRegion(mlirGammaNode.getRegion(i), *rvsdgGammaNode->subregion(i)));
868  }
869 
870  // Connect the outputs
872  for (size_t exitvarIndex = 0; exitvarIndex < regionResults[0].size(); exitvarIndex++)
873  {
874  std::vector<rvsdg::Output *> exitvars;
875  for (size_t regionIndex = 0; regionIndex < mlirGammaNode.getNumRegions(); regionIndex++)
876  {
877  JLM_ASSERT(regionResults[regionIndex].size() == regionResults[0].size());
878  exitvars.push_back(regionResults[regionIndex][exitvarIndex]);
879  }
880  rvsdgGammaNode->AddExitVar(exitvars);
881  }
882 
883  return rvsdg::outputs(rvsdgGammaNode);
884  }
885  else if (auto mlirThetaNode = ::mlir::dyn_cast<::mlir::rvsdg::ThetaNode>(&mlirOperation))
886  {
887  auto rvsdgThetaNode = rvsdg::ThetaNode::create(&rvsdgRegion);
888 
889  // Add loop vars to the theta node
890  for (size_t i = 0; i < inputs.size(); i++)
891  {
892  rvsdgThetaNode->AddLoopVar(inputs[i]);
893  }
894 
895  auto regionResults = ConvertRegion(mlirThetaNode.getRegion(), *rvsdgThetaNode->subregion());
896 
897  rvsdgThetaNode->set_predicate(regionResults[0]);
898 
899  auto loopvars = rvsdgThetaNode->GetLoopVars();
900  for (size_t i = 1; i < regionResults.size(); i++)
901  {
902  loopvars[i - 1].post->divert_to(regionResults[i]);
903  }
904 
905  return rvsdg::outputs(rvsdgThetaNode);
906  }
907  else if (auto mlirDeltaNode = ::mlir::dyn_cast<::mlir::rvsdg::DeltaNode>(&mlirOperation))
908  {
909  auto & deltaRegion = mlirDeltaNode.getRegion();
910  auto & deltaBlock = deltaRegion.front();
911  auto terminator = deltaBlock.getTerminator();
912 
913  auto mlirOutputType = terminator->getOperand(0).getType();
914  auto outputType = ConvertType(mlirOutputType);
915  auto linakgeString = mlirDeltaNode.getLinkage().str();
916  auto rvsdgDeltaNode = rvsdg::DeltaNode::Create(
917  &rvsdgRegion,
919  outputType,
920  mlirDeltaNode.getName().str(),
921  ConvertLinkage(linakgeString),
922  mlirDeltaNode.getSection().str(),
923  mlirDeltaNode.getConstant()));
924 
925  auto outputVector = ConvertRegion(mlirDeltaNode.getRegion(), *rvsdgDeltaNode->subregion());
926 
927  if (outputVector.size() != 1)
928  JLM_UNREACHABLE("Expected 1 output for Delta operation.");
929 
930  rvsdgDeltaNode->finalize(outputVector[0]);
931 
932  return rvsdg::outputs(rvsdgDeltaNode);
933  }
934  else if (auto mlirMatch = ::mlir::dyn_cast<::mlir::rvsdg::Match>(&mlirOperation))
935  {
936  std::unordered_map<uint64_t, uint64_t> mapping;
937  uint64_t defaultAlternative = 0;
938  for (auto & attr : mlirMatch.getMapping())
939  {
940  JLM_ASSERT(attr.isa<::mlir::rvsdg::MatchRuleAttr>());
941  auto matchRuleAttr = attr.cast<::mlir::rvsdg::MatchRuleAttr>();
942  if (matchRuleAttr.isDefault())
943  {
944  defaultAlternative = matchRuleAttr.getIndex();
945  continue;
946  }
947  // In our Mlir implementation, an index is associated with a single value
948  mapping[matchRuleAttr.getValues().front()] = matchRuleAttr.getIndex();
949  }
950 
952  *(inputs[0]), // predicate
953  mapping, // mapping
954  defaultAlternative, // defaultAlternative
955  mlirMatch.getMapping().size() // numAlternatives
956  ) };
957  }
958  else if (auto selectOp = ::mlir::dyn_cast<::mlir::arith::SelectOp>(&mlirOperation))
959  {
960  auto type = selectOp.getType();
961  auto jlmType = ConvertType(type);
962  return rvsdg::outputs(&rvsdg::CreateOpNode<jlm::llvm::SelectOperation>(
963  std::vector(inputs.begin(), inputs.end()),
964  jlmType));
965  }
966  else if (auto mlirOmegaResult = ::mlir::dyn_cast<::mlir::rvsdg::OmegaResult>(&mlirOperation))
967  {
968  for (auto input : inputs)
969  {
970  auto origin = rvsdg::TryGetOwnerNode<rvsdg::Node>(*input);
971  if (auto lambda = dynamic_cast<rvsdg::LambdaNode *>(origin))
972  {
973  auto op = dynamic_cast<llvm::LlvmLambdaOperation *>(&lambda->GetOperation());
974  jlm::rvsdg::GraphExport::Create(*input, op->name());
975  }
976  else if (auto delta = dynamic_cast<rvsdg::DeltaNode *>(origin))
977  {
978  auto op = util::assertedCast<const llvm::DeltaOperation>(&delta->GetOperation());
979  jlm::rvsdg::GraphExport::Create(*input, op->name());
980  }
981  }
982  return {};
983  }
984  // ** endregion Structural nodes **
985 
986  else if (
987  ::mlir::isa<::mlir::rvsdg::LambdaResult>(&mlirOperation)
988  || ::mlir::isa<::mlir::rvsdg::GammaResult>(&mlirOperation)
989  || ::mlir::isa<::mlir::rvsdg::ThetaResult>(&mlirOperation)
990  || ::mlir::isa<::mlir::rvsdg::DeltaResult>(&mlirOperation)
991  // This is a terminating operation that doesn't have a corresponding RVSDG node
992  || ::mlir::isa<::mlir::rvsdg::OmegaArgument>(&mlirOperation)) // Handled at the top level
993  {
994  return {};
995  }
996  else
997  {
998  mlirOperation.dump();
999  auto message = util::strfmt(
1000  "Operation not implemented: ",
1001  mlirOperation.getName().getStringRef().str(),
1002  "\n");
1003  JLM_UNREACHABLE(message.c_str());
1004  }
1005 }
1006 
1009 {
1010  switch (size)
1011  {
1012  case 16:
1013  return llvm::fpsize::half;
1014  case 32:
1015  return llvm::fpsize::flt;
1016  case 64:
1017  return llvm::fpsize::dbl;
1018  case 80:
1019  return llvm::fpsize::x86fp80;
1020  case 128:
1021  return llvm::fpsize::fp128;
1022  default:
1023  auto message = util::strfmt("Unsupported floating point size: ", size, "\n");
1024  JLM_UNREACHABLE(message.c_str());
1025  break;
1026  }
1027 }
1028 
1030 MlirToJlmConverter::ConvertLinkage(std::string stringValue)
1031 {
1032  if (!stringValue.compare("external_linkage"))
1033  {
1035  }
1036  else if (!stringValue.compare("available_externally_linkage"))
1037  {
1039  }
1040  else if (!stringValue.compare("link_once_any_linkage"))
1041  {
1043  }
1044  else if (!stringValue.compare("link_once_odr_linkage"))
1045  {
1047  }
1048  else if (!stringValue.compare("weak_any_linkage"))
1049  {
1051  }
1052  else if (!stringValue.compare("weak_odr_linkage"))
1053  {
1055  }
1056  else if (!stringValue.compare("appending_linkage"))
1057  {
1059  }
1060  else if (!stringValue.compare("internal_linkage"))
1061  {
1063  }
1064  else if (!stringValue.compare("private_linkage"))
1065  {
1067  }
1068  else if (!stringValue.compare("external_weak_linkage"))
1069  {
1071  }
1072  else if (!stringValue.compare("common_linkage"))
1073  {
1075  }
1076  auto message = util::strfmt("Unsupported linkage: ", stringValue, "\n");
1077  JLM_UNREACHABLE(message.c_str());
1078 }
1079 
1082  ::mlir::Operation & mlirOperation,
1083  rvsdg::Region & rvsdgRegion,
1084  const ::llvm::SmallVector<rvsdg::Output *> & inputs)
1085 {
1086  // Get the name of the function
1087  auto functionNameAttribute = mlirOperation.getAttr(::llvm::StringRef("sym_name"));
1088  JLM_ASSERT(functionNameAttribute != nullptr);
1089  auto functionName = ::mlir::cast<::mlir::StringAttr>(functionNameAttribute);
1090 
1091  auto lambdaOp = ::mlir::dyn_cast<::mlir::rvsdg::LambdaNode>(&mlirOperation);
1092  auto & lambdaRegion = lambdaOp.getRegion();
1093  auto numNonContextVars = lambdaRegion.getNumArguments() - lambdaOp.getNumOperands();
1094  auto & lambdaBlock = lambdaRegion.front();
1095  auto lamdbaTerminator = lambdaBlock.getTerminator();
1096 
1097  // Create the RVSDG function signature
1098  std::vector<std::shared_ptr<const rvsdg::Type>> argumentTypes;
1099  for (size_t argumentIndex = 0; argumentIndex < numNonContextVars; argumentIndex++)
1100  {
1101  auto type = lambdaRegion.getArgument(argumentIndex).getType();
1102  argumentTypes.push_back(ConvertType(type));
1103  }
1104  std::vector<std::shared_ptr<const rvsdg::Type>> resultTypes;
1105  for (auto returnType : lamdbaTerminator->getOperandTypes())
1106  {
1107  resultTypes.push_back(ConvertType(returnType));
1108  }
1109  auto functionType = rvsdg::FunctionType::Create(std::move(argumentTypes), std::move(resultTypes));
1110 
1111  // FIXME
1112  // The linkage should be part of the MLIR attributes so it can be extracted here
1113  auto rvsdgLambda = rvsdg::LambdaNode::Create(
1114  rvsdgRegion,
1116  functionType,
1117  functionName.getValue().str(),
1119 
1120  for (auto input : inputs)
1121  {
1122  rvsdgLambda->AddContextVar(*input);
1123  }
1124 
1125  auto jlmLambdaRegion = rvsdgLambda->subregion();
1126  auto regionResults = ConvertRegion(lambdaRegion, *jlmLambdaRegion);
1127 
1128  rvsdgLambda->finalize(std::vector<rvsdg::Output *>(regionResults.begin(), regionResults.end()));
1129 
1130  return rvsdgLambda;
1131 }
1132 
1133 std::shared_ptr<const rvsdg::Type>
1135 {
1136  if (auto ctrlType = ::mlir::dyn_cast<::mlir::rvsdg::RVSDG_CTRLType>(type))
1137  {
1138  return rvsdg::ControlType::Create(ctrlType.getNumOptions());
1139  }
1140  else if (auto intType = ::mlir::dyn_cast<::mlir::IntegerType>(type))
1141  {
1142  return rvsdg::BitType::Create(intType.getWidth());
1143  }
1144  else if (::mlir::isa<::mlir::Float16Type>(type))
1145  {
1147  }
1148  else if (::mlir::isa<::mlir::Float32Type>(type))
1149  {
1151  }
1152  else if (::mlir::isa<::mlir::Float64Type>(type))
1153  {
1155  }
1156  else if (::mlir::isa<::mlir::Float80Type>(type))
1157  {
1159  }
1160  else if (::mlir::isa<::mlir::Float128Type>(type))
1161  {
1163  }
1164  else if (::mlir::isa<::mlir::rvsdg::MemStateEdgeType>(type))
1165  {
1167  }
1168  else if (::mlir::isa<::mlir::rvsdg::IOStateEdgeType>(type))
1169  {
1170  return llvm::IOStateType::Create();
1171  }
1172  else if (::mlir::isa<::mlir::LLVM::LLVMPointerType>(type))
1173  {
1174  return llvm::PointerType::Create();
1175  }
1176  else if (::mlir::isa<::mlir::jlm::VarargListType>(type))
1177  {
1179  }
1180  else if (auto arrayType = ::mlir::dyn_cast<::mlir::LLVM::LLVMArrayType>(type))
1181  {
1182  auto mlirElementType = arrayType.getElementType();
1183  std::shared_ptr<const rvsdg::Type> elementType = ConvertType(mlirElementType);
1184  return llvm::ArrayType::Create(elementType, arrayType.getNumElements());
1185  }
1186  else if (auto functionType = ::mlir::dyn_cast<::mlir::FunctionType>(type))
1187  {
1188  std::vector<std::shared_ptr<const rvsdg::Type>> argumentTypes;
1189  for (auto argumentType : functionType.getInputs())
1190  {
1191  argumentTypes.push_back(ConvertType(argumentType));
1192  }
1193  std::vector<std::shared_ptr<const rvsdg::Type>> resultTypes;
1194  for (auto resultType : functionType.getResults())
1195  {
1196  resultTypes.push_back(ConvertType(resultType));
1197  }
1198  return rvsdg::FunctionType::Create(argumentTypes, resultTypes);
1199  }
1200  else if (type.isIndex())
1201  {
1202  // RVSDG does not support indices, which are modeled as integers
1204  }
1205  else if (auto structType = ::mlir::dyn_cast<::mlir::LLVM::LLVMStructType>(type))
1206  {
1207  if (StructTypeMap_.HasKey(&structType))
1208  {
1209  return StructTypeMap_.LookupKey(&structType);
1210  }
1211 
1212  std::vector<std::shared_ptr<const rvsdg::Type>> types;
1213  for (auto element : structType.getBody())
1214  {
1215  types.push_back(ConvertType(element));
1216  }
1217 
1218  std::shared_ptr<const llvm::StructType> jlmStructType;
1219  if (structType.isIdentified())
1220  {
1222  structType.getName().str(),
1223  types,
1224  structType.isPacked());
1225  }
1226  else
1227  {
1228  jlmStructType = jlm::llvm::StructType::CreateLiteral(types, structType.isPacked());
1229  }
1230 
1231  StructTypeMap_.Insert(&structType, jlmStructType);
1232  return jlmStructType;
1233  }
1234  else
1235  {
1236  type.dump();
1237  JLM_UNREACHABLE("Type conversion not implemented\n");
1238  }
1239 }
1240 
1241 } // jlm::mlirrvsdg
static std::shared_ptr< const ArrayType > Create(std::shared_ptr< const Type > type, size_t nelements)
Definition: types.hpp:98
static rvsdg::SimpleNode & CreateNode(rvsdg::Region &region, const std::vector< rvsdg::Output * > &operands, std::vector< MemoryNodeId > memoryNodeIds)
static rvsdg::SimpleNode & CreateNode(rvsdg::Output &operand, std::vector< MemoryNodeId > memoryNodeIds)
static jlm::rvsdg::Output * Create(rvsdg::Region &region, std::shared_ptr< const jlm::rvsdg::Type > type)
Definition: operators.hpp:1900
static jlm::rvsdg::Output * Create(const std::vector< jlm::rvsdg::Output * > &elements)
Definition: operators.hpp:668
static std::unique_ptr< llvm::ThreeAddressCode > Create(std::shared_ptr< const rvsdg::Type > type)
Definition: operators.hpp:454
static std::unique_ptr< DeltaOperation > Create(std::shared_ptr< const rvsdg::Type > type, const std::string &name, const llvm::Linkage &linkage, std::string section, bool constant)
Definition: delta.hpp:76
static rvsdg::SimpleNode & CreateNode(rvsdg::Output &multiplier, rvsdg::Output &multiplicand, rvsdg::Output &summand)
static std::shared_ptr< const FloatingPointType > Create(fpsize size)
Definition: types.cpp:117
static std::unique_ptr< llvm::ThreeAddressCode > Create(const Variable *baseAddress, const std::vector< const Variable * > &offsets, std::shared_ptr< const rvsdg::Type > pointeeType, std::shared_ptr< const rvsdg::Type > resultType)
static std::shared_ptr< const IOStateType > Create()
Definition: types.cpp:343
static rvsdg::Node & Create(rvsdg::Region &region, IntegerValueRepresentation representation)
static rvsdg::SimpleNode & CreateNode(rvsdg::Output &operand, std::vector< MemoryNodeId > memoryNodeIds)
static rvsdg::Node & CreateNode(rvsdg::Region &region, const std::vector< rvsdg::Output * > &operands, const std::vector< MemoryNodeId > &memoryNodeIds)
static LlvmGraphImport & Create(rvsdg::Graph &graph, std::shared_ptr< const rvsdg::Type > valueType, std::shared_ptr< const rvsdg::Type > importedType, std::string name, Linkage linkage, bool isConstant=false)
Definition: RvsdgModule.hpp:81
Lambda operation.
Definition: lambda.hpp:30
static std::unique_ptr< LlvmLambdaOperation > Create(std::shared_ptr< const jlm::rvsdg::FunctionType > type, std::string name, const jlm::llvm::Linkage &linkage, jlm::llvm::AttributeSet attributes)
Definition: lambda.hpp:77
static std::unique_ptr< LlvmRvsdgModule > Create(const util::FilePath &sourceFileName, const std::string &targetTriple, const std::string &dataLayout)
static rvsdg::SimpleNode & CreateNode(rvsdg::Region &region, std::unique_ptr< LoadNonVolatileOperation > loadOperation, const std::vector< rvsdg::Output * > &operands)
Definition: Load.hpp:451
static rvsdg::SimpleNode & createNode(rvsdg::Output &size, rvsdg::Output &ioState)
Definition: operators.hpp:2489
static rvsdg::SimpleNode & CreateNode(const std::vector< rvsdg::Output * > &operands)
static rvsdg::Output * Create(const std::vector< rvsdg::Output * > &operands)
static std::shared_ptr< const MemoryStateType > Create()
Definition: types.cpp:379
static std::shared_ptr< const PointerType > Create()
Definition: types.cpp:45
static std::unique_ptr< llvm::ThreeAddressCode > create(const Variable *operand, const std::shared_ptr< const rvsdg::Type > &type)
Definition: sext.hpp:75
static rvsdg::SimpleNode & CreateNode(rvsdg::Output &address, rvsdg::Output &value, const std::vector< rvsdg::Output * > &memoryStates, size_t alignment)
Definition: Store.hpp:300
static std::shared_ptr< const StructType > CreateIdentified(const std::string &name, std::vector< std::shared_ptr< const Type >> types, bool isPacked)
Definition: types.hpp:296
static std::shared_ptr< const StructType > CreateLiteral(std::vector< std::shared_ptr< const Type >> types, bool isPacked)
Definition: types.hpp:323
static std::unique_ptr< llvm::ThreeAddressCode > create(const Variable *operand, const std::shared_ptr< const jlm::rvsdg::Type > &type)
Definition: operators.hpp:1661
static jlm::rvsdg::Output * Create(rvsdg::Region &region, std::shared_ptr< const jlm::rvsdg::Type > type)
Definition: operators.hpp:1024
static std::shared_ptr< const VariableArgumentType > Create()
Definition: types.cpp:180
static rvsdg::Output * Create(rvsdg::Region &region, const std::vector< rvsdg::Output * > &operands)
Definition: operators.hpp:1447
static rvsdg::Output & Create(rvsdg::Output &operand, const std::shared_ptr< const rvsdg::Type > &resultType)
Definition: operators.hpp:822
static ::llvm::SmallVector< jlm::rvsdg::Output * > GetConvertedInputs(::mlir::Operation &mlirOp, const std::unordered_map< void *, rvsdg::Output * > &outputMap)
std::unique_ptr<::mlir::MLIRContext > Context_
std::vector< jlm::rvsdg::Output * > ConvertOperation(::mlir::Operation &mlirOperation, rvsdg::Region &rvsdgRegion, const ::llvm::SmallVector< rvsdg::Output * > &inputs)
rvsdg::Node * ConvertFPBinaryNode(const ::mlir::Operation &mlirOperation, const ::llvm::SmallVector< rvsdg::Output * > &inputs)
rvsdg::Node * ConvertICmpOp(::mlir::LLVM::ICmpOp &operation, rvsdg::Region &rvsdgRegion, const ::llvm::SmallVector< rvsdg::Output * > &inputs)
rvsdg::Node * ConvertLambda(::mlir::Operation &mlirLambda, rvsdg::Region &rvsdgRegion, const ::llvm::SmallVector< rvsdg::Output * > &inputs)
llvm::fpsize ConvertFPSize(unsigned int size)
llvm::Linkage ConvertLinkage(std::string stringValue)
util::BijectiveMap<::mlir::LLVM::LLVMStructType *, std::shared_ptr< const llvm::StructType > > StructTypeMap_
std::unique_ptr< llvm::LlvmRvsdgModule > ReadAndConvertMlir(const util::FilePath &filePath)
jlm::llvm::fpcmp TryConvertFPCMP(const ::mlir::arith::CmpFPredicate &op)
rvsdg::Node * ConvertCmpIOp(::mlir::arith::CmpIOp &CompOp, const ::llvm::SmallVector< rvsdg::Output * > &inputs, size_t nbits)
::llvm::SmallVector< jlm::rvsdg::Output * > ConvertBlock(::mlir::Block &block, rvsdg::Region &rvsdgRegion)
std::unique_ptr< llvm::LlvmRvsdgModule > ConvertOmega(::mlir::rvsdg::OmegaNode &omegaNode)
::llvm::SmallVector< jlm::rvsdg::Output * > ConvertRegion(::mlir::Region &region, rvsdg::Region &rvsdgRegion)
std::shared_ptr< const rvsdg::Type > ConvertType(const ::mlir::Type &type)
std::unique_ptr< llvm::LlvmRvsdgModule > ConvertMlir(std::unique_ptr<::mlir::Block > &block)
rvsdg::Node * ConvertBitBinaryNode(::mlir::Operation &mlirOperation, const ::llvm::SmallVector< rvsdg::Output * > &inputs)
static std::shared_ptr< const BitType > Create(std::size_t nbits)
Creates bit type of specified width.
Definition: type.cpp:45
static Output & create(Region &region, ControlValueRepresentation value)
Definition: control.hpp:122
static std::shared_ptr< const ControlType > Create(std::size_t nalternatives)
Instantiates control type.
Definition: control.cpp:50
Delta node.
Definition: delta.hpp:129
static DeltaNode * Create(rvsdg::Region *parent, std::unique_ptr< DeltaOperation > op)
Definition: delta.hpp:313
static std::shared_ptr< const FunctionType > Create(std::vector< std::shared_ptr< const jlm::rvsdg::Type >> argumentTypes, std::vector< std::shared_ptr< const jlm::rvsdg::Type >> resultTypes)
static GammaNode * create(jlm::rvsdg::Output *predicate, size_t nalternatives)
Definition: gamma.hpp:161
static GraphExport & Create(Output &origin, std::string name)
Definition: graph.cpp:62
Lambda node.
Definition: lambda.hpp:83
static LambdaNode * Create(rvsdg::Region &parent, std::unique_ptr< LambdaOperation > operation)
Definition: lambda.cpp:140
static Output * Create(Output &predicate, const std::unordered_map< uint64_t, uint64_t > &mapping, const uint64_t defaultAlternative, const size_t numAlternatives)
Definition: control.hpp:242
Represent acyclic RVSDG subgraphs.
Definition: region.hpp:213
Graph * graph() const noexcept
Definition: region.hpp:363
RegionArgument * argument(size_t index) const noexcept
Definition: region.hpp:437
size_t narguments() const noexcept
Definition: region.hpp:431
static SimpleNode & Create(Region &region, std::unique_ptr< Operation > operation, const std::vector< rvsdg::Output * > &operands)
Definition: simple-node.hpp:49
static ThetaNode * create(rvsdg::Region *parent)
Definition: theta.hpp:73
const std::string & to_str() const noexcept
Definition: file.hpp:275
#define JLM_ASSERT(x)
Definition: common.hpp:16
#define JLM_UNREACHABLE(msg)
Definition: common.hpp:43
Linkage linkageFromString(const std::string_view stringValue)
Definition: Linkage.cpp:42
const util::BijectiveMap<::mlir::arith::CmpFPredicate, llvm::fpcmp > & GetFpCmpPredicateMap()
static std::vector< llvm::MemoryNodeId > arrayAttrToMemoryNodeIds(::mlir::ArrayAttr arrayAttr)
static std::string type(const Node *n)
Definition: view.cpp:255
@ Value
Designate a value type.
static std::vector< jlm::rvsdg::Output * > operands(const Node *node)
Definition: node.hpp:1049
static std::vector< jlm::rvsdg::Output * > outputs(const Node *node)
Definition: node.hpp:1058
static std::string strfmt(Args... args)
Definition: strfmt.hpp:35