Skip to content

Commit

Permalink
Support multiple funcs in single mlir file (#298)
Browse files Browse the repository at this point in the history
  • Loading branch information
nsmithtt authored Aug 6, 2024
1 parent 7185429 commit da52460
Show file tree
Hide file tree
Showing 2 changed files with 62 additions and 11 deletions.
20 changes: 9 additions & 11 deletions lib/Target/TTNN/TTNNToFlatbuffer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -237,11 +237,6 @@ std::shared_ptr<void> ttnnToFlatbuffer(Operation *op) {
auto systemDesc =
toFlatbuffer(cache, mlir::cast<tt::SystemDescAttr>(
module->getAttr(tt::SystemDescAttr::name)));
func::FuncOp entry = dyn_cast<func::FuncOp>(*module.getRegion().op_begin());
assert(entry && "Expected an entry function");
Program<::tt::target::ttnn::Operation> program =
funcOpToProgram<::tt::target::ttnn::Operation>(cache, entry,
emitTTNNOperation);

auto mlir = toDebugInfo(fbb, "ttnn", module);
std::string cpp;
Expand All @@ -250,13 +245,16 @@ std::shared_ptr<void> ttnnToFlatbuffer(Operation *op) {
(void)result;

auto debugInfo = ::tt::target::CreateDebugInfoDirect(fbb, mlir, cpp.c_str());
auto programOffset = ::tt::target::ttnn::CreateProgramDirect(
fbb, program.name, &program.inputs, &program.outputs, &program.ops,
debugInfo);

std::vector<::flatbuffers::Offset<::tt::target::ttnn::Program>> programs = {
programOffset,
};
std::vector<::flatbuffers::Offset<::tt::target::ttnn::Program>> programs;
module->walk([&](func::FuncOp func) {
Program<::tt::target::ttnn::Operation> program =
funcOpToProgram<::tt::target::ttnn::Operation>(cache, func,
emitTTNNOperation);
programs.push_back(::tt::target::ttnn::CreateProgramDirect(
fbb, program.name, &program.inputs, &program.outputs, &program.ops,
debugInfo));
});

auto binary = ::tt::target::ttnn::CreateTTNNBinaryDirect(
fbb, &binaryVersion, ::ttmlir::getGitHash(), systemDesc, &programs);
Expand Down
53 changes: 53 additions & 0 deletions test/ttmlir/Silicon/TTNN/simple_eltwise.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
// RUN: ttmlir-opt --ttir-load-system-desc --ttir-implicit-device --ttir-layout --ttnn-open-device --convert-ttir-to-ttnn %s > %t.mlir
// RUN: FileCheck %s --input-file=%t.mlir
// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn

#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>

func.func @subtract(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK: %[[C:.*]] = "ttnn.open_device"[[C:.*]]
// CHECK: %[[C:.*]] = "ttnn.full"[[C:.*]]
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.to_memory_config"[[C:.*]]
// CHECK: %[[C:.*]] = "ttnn.subtract"[[C:.*]]
%1 = "ttir.subtract"(%arg0, %arg1, %0) <{operandSegmentSizes = array<i32: 2, 1>, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.to_memory_config"[[C:.*]]
// CHECK: "ttnn.close_device"[[C:.*]]
return %1 : tensor<64x128xf32>
}

func.func @multiply(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK: %[[C:.*]] = "ttnn.open_device"[[C:.*]]
// CHECK: %[[C:.*]] = "ttnn.full"[[C:.*]]
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.to_memory_config"[[C:.*]]
// CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]]
%1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array<i32: 2, 1>, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.to_memory_config"[[C:.*]]
// CHECK: "ttnn.close_device"[[C:.*]]
return %1 : tensor<64x128xf32>
}

func.func @relu(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK: %[[C:.*]] = "ttnn.open_device"[[C:.*]]
// CHECK: %[[C:.*]] = "ttnn.full"[[C:.*]]
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.to_memory_config"[[C:.*]]
// CHECK: %[[C:.*]] = "ttnn.relu"[[C:.*]]
%1 = "ttir.relu"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.to_memory_config"[[C:.*]]
// CHECK: "ttnn.close_device"[[C:.*]]
return %1 : tensor<64x128xf32>
}

func.func @ge(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK: %[[C:.*]] = "ttnn.open_device"[[C:.*]]
// CHECK: %[[C:.*]] = "ttnn.full"[[C:.*]]
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.to_memory_config"[[C:.*]]
// CHECK: %[[C:.*]] = "ttnn.ge"[[C:.*]]
%1 = "ttir.ge"(%arg0, %arg1, %0) <{operandSegmentSizes = array<i32: 2, 1>, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.to_memory_config"[[C:.*]]
// CHECK: "ttnn.close_device"[[C:.*]]
return %1 : tensor<64x128xf32>
}

0 comments on commit da52460

Please sign in to comment.