Skip to content

Commit

Permalink
Fix code to support the latest MLIR as well as cvc5 1.2.0
Browse files Browse the repository at this point in the history
The MLIR commit is 7eb3264
  • Loading branch information
aqjune committed Oct 23, 2024
1 parent a661c37 commit 2c4224f
Show file tree
Hide file tree
Showing 16 changed files with 397 additions and 351 deletions.
4 changes: 2 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@ if(USE_Z3)
endif()

if(USE_cvc5)
find_package(cvc5 0.0.3)
find_package(cvc5 1.2.0)
if(cvc5_FOUND)
set(CVC5_LIBRARY cvc5::cvc5-shared)
set(CVC5_LIBRARY cvc5::cvc5)
get_property(CVC5_INCLUDE_DIRS TARGET ${CVC5_LIBRARY} PROPERTY INTERFACE_INCLUDE_DIRECTORIES)
add_compile_definitions(SOLVER_CVC5)
message(STATUS "Found cvc5 ${cvc5_VERSION} from ${cvc5_DIR}/cvc5Config.cmake")
Expand Down
21 changes: 15 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,21 +11,29 @@ Prerequisites: [CMake](https://cmake.org/download/)(>=3.13),
[Python3](https://www.python.org/downloads/),
Solvers (at least one of them must be used):
[z3-4.8.13](https://github.com/Z3Prover/z3/releases/tag/z3-4.8.13) ,
[cvc5-0.0.3(limited support)](https://github.com/cvc5/cvc5/releases/tag/cvc5-0.0.3)
[cvc5-1.2.0](https://github.com/cvc5/cvc5/releases/tag/cvc5-1.2.0)

Optional prerequisites: [Ninja](https://ninja-build.org/)

### Building dependencies

You will need to build & install MLIR.
Please follow LLVM's [Getting Started](https://llvm.org/docs/GettingStarted.html#getting-the-source-code-and-building-llvm), and run `cmake --build . --target install`.
If you already have your MLIR built but found that you are not sudo priviledge that is to install, you can update the `CMAKE_INSTALL_PREFIX` variable via
`cmake -DCMAKE_INSTALL_PREFIX=<your local path> ../llvm` and run the install command.

You will also need to build & install Z3.
Please [build Z3 using CMake](https://github.com/Z3Prover/z3/blob/master/README-CMake.md) and install it to somewhere designated by `CMAKE_INSTALL_PREFIX`.
<b>Z3.</b>
Please [build Z3 using CMake](https://github.com/Z3Prover/z3/blob/master/README-CMake.md) and install it to
somewhere designated by `CMAKE_INSTALL_PREFIX`.

<b>cvc5.</b>
Download the binary release (shared libs) and extract the compressed files.

### Building MLIR-TV

```bash
cmake -Bbuild \
# We recommend you use Ninja if you have it on your system
# We recommend to use Ninja if you have it on your system
[-GNinja] \
# At least one of USE_Z3 and USE_cvc5 should be set to ON. Build will fail otherwise.
[-DUSE_Z3=ON|OFF] \
Expand Down Expand Up @@ -53,13 +61,14 @@ mlir-tv <.mlir before opt> <.mlir after opt>
```

## How to test MLIR-TV

```bash
cd build
# A detailed log will be written to build/Testing/Temporary/LastTest.log
# If you want detailed output on the terminal, please add -V
ctest -R Opts # Test IR transformation passes
ctest -R Long # Test passes that take a lot of time
ctest -R Litmus # Test litmus only
ctest -R Opts # Test IR transformation passes
ctest -R Long # Testcases that take a lot of time
```

## Contributions
Expand Down
15 changes: 11 additions & 4 deletions src/abstractops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,13 @@ namespace aop {

UsedAbstractOps getUsedAbstractOps() { return usedOps; }

void clearAbstractions() {
floatEnc.reset();
doubleEnc.reset();
int_sumfn.clear();
int_dotfn.clear();
}

void setAbstraction(
Abstraction abs,
bool addAssoc,
Expand Down Expand Up @@ -264,7 +271,7 @@ AbsFpEncoding::AbsFpEncoding(const llvm::fltSemantics &semantics,
unsigned limit_bw, unsigned smaller_value_bw, unsigned prec_bw,
bool useIEEE754Encoding,
std::string &&fnsuffix)
:semantics(semantics), fn_suffix(move(fnsuffix)),
:semantics(semantics), fn_suffix(std::move(fnsuffix)),
useIEEE754Encoding(useIEEE754Encoding) {
assert(smaller_value_bw > 0);
verbose("AbsFpEncoding") << fn_suffix << ": limit bits: " << limit_bw
Expand Down Expand Up @@ -491,7 +498,7 @@ void AbsFpEncoding::addConstants(const set<llvm::APFloat>& const_set) {
auto limit_var = Expr::mkFreshVar(limit_bits, "fp_const_limit_bits_");
limit_bits = Expr::mkIte(sv_prec_bits.uge(inf_sv_value),
limit_var,
mkNonzero(move(limit_var)));
mkNonzero(std::move(limit_var)));
e_value = limit_bits.concat(sv_prec_bits);

} else if (!casting_info->zero_prec_bits) {
Expand Down Expand Up @@ -1081,7 +1088,7 @@ Expr AbsFpEncoding::dot(const Expr &a, const Expr &b,
Expr ai = a.select(i), bi = b.select(i);
Expr arr = Expr::mkLambda(i, mul(ai, bi));

return sum(arr, n, nullopt, move(initValue));
return sum(arr, n, nullopt, std::move(initValue));
}
llvm_unreachable("Unknown abstraction level for fp dot");
}
Expand Down Expand Up @@ -1568,7 +1575,7 @@ Expr intDot(const Expr &a, const Expr &b,
Expr ai = a.select(i), bi = b.select(i);
Expr arr = Expr::mkLambda(i, ai * bi);

return intSum(arr, n, move(initValue));
return intSum(arr, n, std::move(initValue));
}
llvm_unreachable("Unknown abstraction level for int dot");
}
Expand Down
2 changes: 2 additions & 0 deletions src/abstractops.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,8 @@ void setAbstraction(Abstraction abs,
// A set of options that must not change the precision of validation.
// useMultiset: To encode commutativity of fp summation, use multiset?
void setEncodingOptions(bool useMultiset);
// Release globally allocated objects for abstraction.
void clearAbstractions();

bool getFpAddAssociativity();
bool getFpCastIsPrecise();
Expand Down
29 changes: 15 additions & 14 deletions src/analysis.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#include "analysis.h"
#include "debug.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "value.h"
#include "utils.h"

Expand Down Expand Up @@ -101,7 +102,7 @@ void analyzeAPFloat(
void analyzeAttr(const mlir::Attribute &a, AnalysisResult &res) {
assert(!a.isa<mlir::ElementsAttr>());

auto fa = a.dyn_cast<mlir::FloatAttr>();
auto fa = mlir::dyn_cast<mlir::FloatAttr>(a);
if (!fa)
return;

Expand All @@ -111,7 +112,7 @@ void analyzeAttr(const mlir::Attribute &a, AnalysisResult &res) {

bool analyzeElemAttr(
const mlir::ElementsAttr &attr, AnalysisResult &res) {
if (auto denseAttr = attr.dyn_cast<mlir::DenseElementsAttr>()) {
if (auto denseAttr = mlir::dyn_cast<mlir::DenseElementsAttr>(attr)) {
if (denseAttr.isSplat()) {
analyzeAttr(denseAttr.getSplatValue<mlir::Attribute>(), res);
} else {
Expand All @@ -123,7 +124,7 @@ bool analyzeElemAttr(
analyzeAttr(attr, res);
}
}
} else if (auto sparseAttr = attr.dyn_cast<mlir::SparseElementsAttr>()) {
} else if (auto sparseAttr = mlir::dyn_cast<mlir::SparseElementsAttr>(attr)) {
if (Tensor::MAX_CONST_SIZE >= 0 &&
sparseAttr.getNumElements() > Tensor::MAX_CONST_SIZE)
return false;
Expand Down Expand Up @@ -176,10 +177,10 @@ void analyzeVariable(
f64Count++;

return;
} else if (!ty.isa<mlir::TensorType>() && !ty.isa<mlir::MemRefType>())
} else if (!mlir::isa<mlir::TensorType>(ty) && !mlir::isa<mlir::MemRefType>(ty))
return;

auto tensorty = ty.cast<mlir::ShapedType>();
auto tensorty = mlir::cast<mlir::ShapedType>(ty);
auto elemty = tensorty.getElementType();

if (doCount) {
Expand All @@ -201,12 +202,12 @@ void analyzeVariable(
f64Count ++;
if (cnt > 0)
f64ElemsCount += cnt - 1;
} else if (elemty.isa<mlir::FloatType>()) {
} else if (mlir::isa<mlir::FloatType>(elemty)) {
throw UnsupportedException(ty, "Unsupported type");
}
}

if (ty.isa<mlir::MemRefType>() && !config.isOperand)
if (mlir::isa<mlir::MemRefType>(ty) && !config.isOperand)
memrefCnt[elemty]++;
}

Expand All @@ -226,7 +227,7 @@ bool analyzeOp(mlir::memref::GetGlobalOp op, AnalysisResult &res) {
res.memref.usedGlobals[glbName.str()] = glb;

if (glb.getConstant() && glb.getInitialValue()) {
analyzeElemAttr(glb.getInitialValue()->cast<mlir::ElementsAttr>(), res);
analyzeElemAttr(mlir::cast<mlir::ElementsAttr>(glb.getInitialValue().value()), res);
}
return true;
}
Expand All @@ -241,8 +242,8 @@ bool analyzeOp(mlir::arith::ConstantFloatOp op, AnalysisResult &res) {

template<>
bool analyzeOp(mlir::arith::ConstantOp op, AnalysisResult &res) {
auto tensorty = op.getType().dyn_cast<mlir::RankedTensorType>();
auto eattr = op.getValue().dyn_cast<mlir::ElementsAttr>();
auto tensorty = mlir::dyn_cast<mlir::RankedTensorType>(op.getType());
auto eattr = mlir::dyn_cast<mlir::ElementsAttr>(op.getValue());
if (!tensorty || !eattr) return true;

bool processed = analyzeElemAttr(eattr, res);
Expand All @@ -257,8 +258,8 @@ bool analyzeOp(mlir::arith::ConstantOp op, AnalysisResult &res) {

template<>
bool analyzeOp(mlir::tosa::ConstOp op, AnalysisResult &res) {
auto tensorty = op.getType().dyn_cast<mlir::RankedTensorType>();
auto eattr = op.getValue().dyn_cast<mlir::ElementsAttr>();
auto tensorty = mlir::dyn_cast<mlir::RankedTensorType>(op.getType());
auto eattr = mlir::dyn_cast<mlir::ElementsAttr>(op.getValue());
if (!tensorty || !eattr) return true;

bool processed = analyzeElemAttr(eattr, res);
Expand All @@ -283,7 +284,7 @@ template<>
bool analyzeOp(mlir::linalg::GenericOp op, AnalysisResult &res) {
// If generic loop has reduction loops, then result is not elementwise
auto indexingMaps = op.getIndexingMaps().getValue();
auto outputMap = indexingMaps.back().cast<mlir::AffineMapAttr>().getValue();
auto outputMap = mlir::cast<mlir::AffineMapAttr>(indexingMaps.back()).getValue();
bool isReudctionLoop = !outputMap.isPermutation();
if (isReudctionLoop)
res.isElementwiseFPOps = false;
Expand Down Expand Up @@ -337,7 +338,7 @@ void analyzeBlock(
// GenericOp accepts memref argument as input & output
// and newly created FPs can be stored to output memref.
if (auto op2 = mlir::dyn_cast<mlir::linalg::GenericOp>(op)) {
if (op2.hasBufferSemantics()) {
if (op2.hasPureBufferSemantics()) {
for (const auto &operand: op2.getOutputs()) {
analyzeVariable(operand, res, VarAnalysisConfig::operand());
}
Expand Down
Loading

0 comments on commit 2c4224f

Please sign in to comment.