Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

dlrm pytorch model support #261

Open
1 task
AmosLewis opened this issue Jun 12, 2024 · 0 comments
Open
1 task

dlrm pytorch model support #261

AmosLewis opened this issue Jun 12, 2024 · 0 comments
Labels

Comments

@AmosLewis
Copy link
Collaborator

python ./run.py --torchmlirbuild ../../torch-mlir/build --tolerance 0.001 0.001 --cachedir ./huggingface_cache --ireebuild ../../iree-build -f pytorch -g models --mode onnx --report --torchtolinalg --tests pytorch/models/dlrm

tests model-run onnx-import torch-mlir iree-compile inference
pytorch/models/dlrm passed passed failed notrun notrun
iree candidate-20240610.920

torch-mlir 
commit 7e0e23c66820d1db548103acbdf1337f701dc5a3 (upstream/main)
Author: Sambhav Jain <[email protected]>
Date:   Sun Jun 9 00:32:49 2024 -0700

    Test custom op import with symbolic shapes (#3431)
    
    Tests the basic constructs of registering a custom op and its abstract
    implementations (with FakeTensors) in python, going through TorchDynamo
    export, followed by importing the shape expressions in the Torch
    dialect.
    
    Also fixes the importer were previously the symbolic bind op insertion
    was not gated in one place.

commands.log

PYTHONPATH=/home/chi/src/torch-mlir/build/tools/torch-mlir/python_packages/torch_mlir python runmodel.py  --torchmlirimport fximport --todtype default --mode onnx --outfileprefix dlrm 1> model-run.log 2>&1
PYTHONPATH=/home/chi/src/torch-mlir/build/tools/torch-mlir/python_packages/torch_mlir python -m torch_mlir.tools.import_onnx dlrm.default.onnx -o dlrm.default.torch-onnx.mlir 1> onnx-import.log 2>&1
/home/chi/src/torch-mlir/build/bin/torch-mlir-opt -pass-pipeline='builtin.module(func.func(convert-torch-onnx-to-torch),torch-lower-to-backend-contract,func.func(cse,canonicalize),torch-backend-to-linalg-on-tensors-backend-pipeline)' dlrm.default.torch-onnx.mlir > dlrm.default.pytorch.linalg.mlir 2>torch-mlir.log

torch-mlir.log

dlrm.default.torch-onnx.mlir:149:12: error: failed to legalize operation 'torch.operator' that was explicitly marked illegal
    %145 = torch.operator "onnx.Loop"(%144, %129) : (!torch.vtensor<[],si64>, !torch.vtensor<[],i1>) -> !torch.vtensor<[?,16],f32> {
           ^
dlrm.default.torch-onnx.mlir:149:12: note: see current operation: 
%186 = "torch.operator"(%53, %65) <{name = "onnx.Loop"}> ({
^bb0(%arg53: !torch.vtensor<[],si64>, %arg54: !torch.vtensor<[],i1>):
  %972 = "torch.aten.lt.Scalar"(%arg53, %108) : (!torch.vtensor<[],si64>, !torch.int) -> !torch.vtensor<[],i1>
  %973 = "torch.aten.add.Scalar"(%arg53, %52, %109) : (!torch.vtensor<[],si64>, !torch.int, !torch.int) -> !torch.vtensor<[],si64>
  %974 = "torch.aten.where.self"(%972, %973, %arg53) : (!torch.vtensor<[],i1>, !torch.vtensor<[],si64>, !torch.vtensor<[],si64>) -> !torch.vtensor<[],si64>
  %975 = "torch.aten.unsqueeze"(%974, %108) : (!torch.vtensor<[],si64>, !torch.int) -> !torch.vtensor<[1],si64>
  %976 = "torch.aten.index_select"(%184, %108, %975) : (!torch.vtensor<[129],si64>, !torch.int, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64>
  %977 = "torch.aten.squeeze"(%976) : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[],si64>
  %978 = "torch.aten.lt.Scalar"(%arg53, %108) : (!torch.vtensor<[],si64>, !torch.int) -> !torch.vtensor<[],i1>
  %979 = "torch.aten.add.Scalar"(%arg53, %63, %109) : (!torch.vtensor<[],si64>, !torch.int, !torch.int) -> !torch.vtensor<[],si64>
  %980 = "torch.aten.where.self"(%978, %979, %arg53) : (!torch.vtensor<[],i1>, !torch.vtensor<[],si64>, !torch.vtensor<[],si64>) -> !torch.vtensor<[],si64>
  %981 = "torch.aten.unsqueeze"(%980, %108) : (!torch.vtensor<[],si64>, !torch.int) -> !torch.vtensor<[1],si64>
  %982 = "torch.aten.index_select"(%185, %108, %981) : (!torch.vtensor<[128],si64>, !torch.int, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1],si64>
  %983 = "torch.aten.squeeze"(%982) : (!torch.vtensor<[1],si64>) -> !torch.vtensor<[],si64>
  %984 = "torch.aten.unsqueeze"(%977, %108) : (!torch.vtensor<[],si64>, !torch.int) -> !torch.vtensor<[1],si64>
  %985 = "torch.aten.unsqueeze"(%983, %108) : (!torch.vtensor<[],si64>, !torch.int) -> !torch.vtensor<[1],si64>
  %986 = "torch.aten.item"(%984) : (!torch.vtensor<[1],si64>) -> !torch.int
  %987 = "torch.aten.item"(%985) : (!torch.vtensor<[1],si64>) -> !torch.int
  %988 = "torch.aten.slice.Tensor"(%130, %108, %986, %987, %109) : (!torch.vtensor<[128],si64>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[?],si64>
  %989 = "torch.aten.lt.Scalar"(%988, %108) : (!torch.vtensor<[?],si64>, !torch.int) -> !torch.vtensor<[?],i1>
  %990 = "torch.aten.add.Scalar"(%988, %51, %109) : (!torch.vtensor<[?],si64>, !torch.int, !torch.int) -> !torch.vtensor<[?],si64>
  %991 = "torch.aten.where.self"(%989, %990, %988) : (!torch.vtensor<[?],i1>, !torch.vtensor<[?],si64>, !torch.vtensor<[?],si64>) -> !torch.vtensor<[?],si64>
  %992 = "torch.aten.size.int"(%991, %108) : (!torch.vtensor<[?],si64>, !torch.int) -> !torch.int
  %993 = "torch.aten.index_select"(%75, %108, %991) : (!torch.vtensor<[1460,16],f32>, !torch.int, !torch.vtensor<[?],si64>) -> !torch.vtensor<[?,16],f32>
  %994 = "torch.prim.ListConstruct"(%108) : (!torch.int) -> !torch.list<int>
  %995 = "torch.aten.sum.dim_IntList"(%993, %994, %64, %107) : (!torch.vtensor<[?,16],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[16],f32>
  "torch.operator_terminator"(%65, %995) : (!torch.vtensor<[],i1>, !torch.vtensor<[16],f32>) -> ()
}) : (!torch.vtensor<[],si64>, !torch.vtensor<[],i1>) -> !torch.vtensor<[?,16],f32>

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
Projects
None yet
Development

No branches or pull requests

1 participant