From 244f1cbdd310d88f5bf3a6141a70f80a28284d48 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Mon, 8 Jun 2015 23:48:47 -0400 Subject: [PATCH] store tuple and vector types to the stack eagerly [ci skip] fix #11187, fix #11450, fix #11026, ref #10525, fix #11003 TODO: confirm all of those numbers were fixed TODO: ensure the lazy-loaded objects have gc-roots TODO: re-enable VectorType objects, so small objects still end up in registers in the calling convention TODO: allow moving pointers sometimes rather than copying TODO: teach the GC how it can re-use an existing pointer as a box this also changes the julia specSig calling convention to pass non-primitive types by pointer instead of by-value this additionally fixes a bug in gen_cfunction that could be exposed by turning off specSig this additionally moves the alloca calls in ccall (and other places) to the entry BasicBlock in the function, ensuring that llvm detects them as static allocations and moves them into the function prologue this additionally fixes some undefined behavior from changing a variable's size through a alloca-cast instead of zext/sext/trunc --- base/inference.jl | 5 +- src/builtins.c | 2 +- src/ccall.cpp | 172 +++++++++++++++++++++++++++++++-------------- src/cgutils.cpp | 132 ++++++++++++++++++++-------------- src/codegen.cpp | 160 +++++++++++++++++++++++++---------------- src/intrinsics.cpp | 54 +++++++++----- test/ccall.jl | 12 +++- 7 files changed, 347 insertions(+), 190 deletions(-) diff --git a/base/inference.jl b/base/inference.jl index d6f3a81a10005..d3ea908603ebc 100644 --- a/base/inference.jl +++ b/base/inference.jl @@ -3038,6 +3038,7 @@ end function remove_redundant_temp_vars(ast, sa) varinfo = ast.args[2][2] gensym_types = ast.args[2][4] + body = ast.args[3] for (v,init) in sa if ((isa(init,Symbol) || isa(init,SymbolNode)) && any(vi->symequal(vi[1],init), varinfo) && @@ -3046,7 +3047,7 @@ function remove_redundant_temp_vars(ast, sa) # this transformation is not valid for vars used before def. # we need to preserve the point of assignment to know where to # throw errors (issue #4645). - if !occurs_undef(v, ast.args[3], varinfo) + if !occurs_undef(v, body, varinfo) # the transformation is not ideal if the assignment # is present for the auto-unbox functionality @@ -3055,7 +3056,7 @@ function remove_redundant_temp_vars(ast, sa) # everywhere later in the function if (isa(init,SymbolNode) ? (init.typ <: (isa(v,GenSym)?gensym_types[(v::GenSym).id+1]:local_typeof(v, varinfo))) : true) delete_var!(ast, v) - sym_replace(ast.args[3], [v], [], [init], []) + sym_replace(body, Any[v], Void[], Any[init], Void[]) end end end diff --git a/src/builtins.c b/src/builtins.c index cbf85182e9866..78658572f918a 100644 --- a/src/builtins.c +++ b/src/builtins.c @@ -273,7 +273,7 @@ static int NOINLINE compare_fields(jl_value_t *a, jl_value_t *b, return 1; } -int jl_egal(jl_value_t *a, jl_value_t *b) +int jl_egal(jl_value_t *a, jl_value_t *b) // warning: a,b may NOT have been gc-rooted by the caller { if (a == b) return 1; diff --git a/src/ccall.cpp b/src/ccall.cpp index eeca6e1627ff8..085e00027d6a9 100644 --- a/src/ccall.cpp +++ b/src/ccall.cpp @@ -246,17 +246,68 @@ static Value *runtime_sym_lookup(PointerType *funcptype, char *f_lib, char *f_na # include "abi_llvm.cpp" #endif -Value *llvm_type_rewrite(Value *v, Type *target_type, jl_value_t *ty, bool isret) +Value *llvm_type_rewrite(Value *v, Type *from_type, Type *target_type, bool tojulia, bool byref, bool issigned) { - if (preferred_llvm_type(ty,isret) == NULL || target_type == NULL || target_type == v->getType()) + Type *ptarget_type = PointerType::get(target_type, 0); + + if (tojulia) { + if (byref) { + if (v->getType() != ptarget_type) { + v = builder.CreatePointerCast(v, ptarget_type); + } + return builder.CreateLoad(v); + } + } + else { + if (byref) { // client is supposed to have already done the alloca and store + if (v->getType() != target_type) { + v = builder.CreatePointerCast(v, target_type); + } + return v; + } + + if (v->getType() != from_type) { // this is already be a pointer in the codegen + if (v->getType() != ptarget_type) { + v = builder.CreatePointerCast(v, ptarget_type); + } + return builder.CreateLoad(v); + } + } + assert(v->getType() == from_type); + + if (target_type == from_type) { return v; + } - assert(!v->getType()->isPointerTy()); + if ((target_type->isIntegerTy() && from_type->isIntegerTy()) || + (target_type->isFloatingPointTy() && from_type->isFloatingPointTy()) || + (target_type->isPointerTy() && from_type->isPointerTy())) { + if (target_type->isPointerTy()) { + return builder.CreatePointerCast(v, target_type); + } + if (target_type->isFloatingPointTy()) { + if (target_type->getPrimitiveSizeInBits() > from_type->getPrimitiveSizeInBits()) { + return builder.CreateFPExt(v, target_type); + } + else if (target_type->getPrimitiveSizeInBits() < from_type->getPrimitiveSizeInBits()) { + return builder.CreateFPTrunc(v, target_type); + } + else { + return v; + } + } + assert(target_type->isIntegerTy()); + if (issigned) + return builder.CreateSExtOrTrunc(v, target_type); + else + return builder.CreateZExtOrTrunc(v, target_type); + } + // Vector or non-Aggregate types // LLVM doesn't allow us to cast values directly, so // we need to use this alloca trick - Value *mem = builder.CreateAlloca(target_type); - builder.CreateStore(v,builder.CreatePointerCast(mem,v->getType()->getPointerTo())); + Value *mem = builder.CreateAlloca(target_type); // XXX: don't frob the stack + builder.CreateStore(v, builder.CreatePointerCast(mem, from_type->getPointerTo())); return builder.CreateLoad(mem); } @@ -265,27 +316,36 @@ Value *llvm_type_rewrite(Value *v, Type *target_type, jl_value_t *ty, bool isret static Value *julia_to_native(Type *ty, jl_value_t *jt, Value *jv, jl_value_t *aty, bool addressOf, bool byRef, bool inReg, - bool needCopy, + bool needCopy, bool tojulia, int argn, jl_codectx_t *ctx, bool *needStackRestore) { Type *vt = jv->getType(); - // We're passing any + // We're passing Any if (ty == jl_pvalue_llvmt) { return boxed(jv,ctx); } + + if (!tojulia && julia_type_to_llvm(aty)->isAggregateType()) { + // this value is expected to be a pointer in the julia codegen, + // so it needs to be extracted first if not tojulia + vt = vt->getContainedType(0); + } + if (ty == vt && !addressOf && !byRef) { return jv; } + if (vt != jl_pvalue_llvmt) { // argument value is unboxed + if (vt != jv->getType()) + jv = builder.CreateLoad(jv); if (addressOf || (byRef && inReg)) { - if (ty->isPointerTy() && ty->getContainedType(0)==vt) { + if (ty->isPointerTy() && ty->getContainedType(0) == vt) { // pass the address of an alloca'd thing, not a box // since those are immutable. - *needStackRestore = true; - Value *slot = builder.CreateAlloca(vt); + Value *slot = emit_static_alloca(vt, ctx); builder.CreateStore(jv, slot); return builder.CreateBitCast(slot, ty); } @@ -299,29 +359,30 @@ static Value *julia_to_native(Type *ty, jl_value_t *jt, Value *jv, return builder.CreateBitCast(jv, ty); } else { - *needStackRestore = true; - Value *mem = builder.CreateAlloca(ty); + Value *mem = emit_static_alloca(ty, ctx); builder.CreateStore(jv,builder.CreateBitCast(mem,vt->getPointerTo())); return mem; } } } else if (vt->isStructTy()) { - if (!byRef) { - return jv; + if (byRef) { + Value *mem = emit_static_alloca(vt, ctx); + builder.CreateStore(jv, mem); + return mem; } else { - *needStackRestore = true; - Value *mem = builder.CreateAlloca(vt); - builder.CreateStore(jv,mem); - return mem; + return jv; } } emit_error("ccall: argument type did not match declaration", ctx); } + + // argument value is boxed if (jl_is_tuple(jt)) { - return emit_unbox(ty,jv,jt); + emit_error("ccall: unimplemented: boxed tuple argument type", ctx); + return jv; // TODO: this is wrong } if (jl_is_cpointer_type(jt) && addressOf) { assert(ty->isPointerTy()); @@ -349,7 +410,7 @@ static Value *julia_to_native(Type *ty, jl_value_t *jt, Value *jv, *needStackRestore = true; AllocaInst *ai = builder.CreateAlloca(T_int8, nbytes); ai->setAlignment(16); - builder.CreateMemCpy(ai, builder.CreateBitCast(jv, T_pint8), nbytes, 1); + builder.CreateMemCpy(ai, builder.CreateBitCast(jv, T_pint8), nbytes, 0); return builder.CreateBitCast(ai, ty); } // emit maybe copy @@ -375,7 +436,7 @@ static Value *julia_to_native(Type *ty, jl_value_t *jt, Value *jv, false)); AllocaInst *ai = builder.CreateAlloca(T_int8, nbytes); ai->setAlignment(16); - builder.CreateMemCpy(ai, builder.CreatePointerCast(jv, T_pint8), nbytes, 1); + builder.CreateMemCpy(ai, builder.CreatePointerCast(jv, T_pint8), nbytes, 0); Value *p2 = builder.CreatePointerCast(ai, ty); builder.CreateBr(afterBB); builder.SetInsertPoint(afterBB); @@ -393,21 +454,19 @@ static Value *julia_to_native(Type *ty, jl_value_t *jt, Value *jv, msg << argn; emit_typecheck(jv, jt, msg.str(), ctx); } - Value *p = data_pointer(jv); - Value *pjv = builder.CreatePointerCast(p, PointerType::get(ty,0)); + Value *pjv = builder.CreatePointerCast(jv, PointerType::get(ty,0)); if (byRef) { if (!needCopy) { return pjv; } else { - *needStackRestore = true; - Value *mem = builder.CreateAlloca(ty); - builder.CreateMemCpy(mem,pjv,(uint64_t)jl_datatype_size(jt),(uint64_t)((jl_datatype_t*)jt)->alignment); + Value *mem = emit_static_alloca(ty, ctx); + builder.CreateMemCpy(mem, pjv, (uint64_t)jl_datatype_size(jt), (uint64_t)((jl_datatype_t*)jt)->alignment); return mem; } } else { - return builder.CreateLoad(pjv,false); + return pjv; // lazy load by llvm_type_rewrite } } @@ -661,7 +720,9 @@ static Value *emit_llvmcall(jl_value_t **args, size_t nargs, jl_codectx_t *ctx) make_gcroot(arg, ctx); } #endif - argvals[i] = julia_to_native(t, tti, arg, expr_type(argi, ctx), false, false, false, false, i, ctx, NULL); + Value *v = julia_to_native(t, tti, arg, expr_type(argi, ctx), false, false, false, false, false, i, ctx, NULL); + bool issigned = jl_signed_type && jl_subtype(tti, (jl_value_t*)jl_signed_type, 0); + argvals[i] = llvm_type_rewrite(v, t, t, false, false, issigned); } Function *f; @@ -762,7 +823,7 @@ static Value *emit_llvmcall(jl_value_t **args, size_t nargs, jl_codectx_t *ctx) jl_error("Return type of llvmcall'ed function does not match declared return type"); } - return mark_julia_type(inst,rtt); + return mark_julia_type(emit_reg2mem(inst, ctx), rtt); } // --- code generator for ccall itself --- @@ -786,6 +847,7 @@ typedef AttributeSet attr_type; static std::string generate_func_sig(Type **lrt, Type **prt, int &sret, std::vector &fargt, std::vector &fargt_sig, + Type *&fargt_vasig, std::vector &inRegList, std::vector &byRefList, attr_type &attributes, jl_value_t *rt, jl_svec_t *tt) @@ -805,7 +867,7 @@ static std::string generate_func_sig(Type **lrt, Type **prt, int &sret, *prt = *lrt = T_void; } else { - *prt = preferred_llvm_type(rt,true); + *prt = preferred_llvm_type(rt, true); if (*prt == NULL) *prt = *lrt; @@ -892,7 +954,7 @@ static std::string generate_func_sig(Type **lrt, Type **prt, int &sret, fargt.push_back(t); - Type *pat = preferred_llvm_type(tti,false); + Type *pat = preferred_llvm_type(tti, false); if (pat != NULL) t = pat; else if (byRef) @@ -901,6 +963,9 @@ static std::string generate_func_sig(Type **lrt, Type **prt, int &sret, if (!current_isVa) { fargt_sig.push_back(t); } + else { + fargt_vasig = t; + } } if (retattrs.hasAttributes()) @@ -1128,12 +1193,13 @@ static Value *emit_ccall(jl_value_t **args, size_t nargs, jl_codectx_t *ctx) std::vector fargt(0); std::vector fargt_sig(0); + Type *fargt_vasig = NULL; std::vector inRegList(0); std::vector byRefList(0); attr_type attrs; Type *prt = NULL; int sret = 0; - std::string err_msg = generate_func_sig(&lrt, &prt, sret, fargt, fargt_sig, inRegList, byRefList, attrs, rt, tt); + std::string err_msg = generate_func_sig(&lrt, &prt, sret, fargt, fargt_sig, fargt_vasig, inRegList, byRefList, attrs, rt, tt); if (!err_msg.empty()) { JL_GC_POP(); emit_error(err_msg,ctx); @@ -1143,19 +1209,21 @@ static Value *emit_ccall(jl_value_t **args, size_t nargs, jl_codectx_t *ctx) // emit arguments Value **argvals = (Value**) alloca(((nargs-3)/2 + sret)*sizeof(Value*)); Value *result = NULL; + bool needStackRestore = false; // First, if the ABI requires us to provide the space for the return // argument, allocate the box and store that as the first argument type if (sret) { - result = emit_new_struct(rt,1,NULL,ctx); + result = emit_new_struct(rt,1,NULL,ctx); // TODO: is it valid to be creating an incomplete type this way? assert(result != NULL && "Type was not concrete"); if (!result->getType()->isPointerTy()) { - Value *mem = builder.CreateAlloca(lrt); + Value *mem = emit_static_alloca(lrt, ctx); builder.CreateStore(result, mem); result = mem; argvals[0] = result; } else { + // XXX: result needs a GC root here if result->getType() == jl_pvalue_llvmt argvals[0] = builder.CreateBitCast(result, fargt_sig[0]); } } @@ -1164,7 +1232,6 @@ static Value *emit_ccall(jl_value_t **args, size_t nargs, jl_codectx_t *ctx) int last_depth = ctx->gc.argDepth; // number of parameters to the c function - bool needStackRestore = false; for(i=4; i < nargs+1; i+=2) { // Current C function parameter size_t ai = (i-4)/2; @@ -1238,11 +1305,12 @@ static Value *emit_ccall(jl_value_t **args, size_t nargs, jl_codectx_t *ctx) #endif bool nSR=false; + bool issigned = jl_signed_type && jl_subtype(jargty, (jl_value_t*)jl_signed_type, 0); argvals[ai + sret] = llvm_type_rewrite( julia_to_native(largty, jargty, arg, expr_type(argi, ctx), addressOf, byRefList[ai], inRegList[ai], - need_private_copy(jargty, byRefList[ai]), ai + 1, ctx, &nSR), - fargt_sig.size() > ai + sret ? fargt_sig[ai + sret] : preferred_llvm_type(jargty, false), - jargty, false); + need_private_copy(jargty, byRefList[ai]), false, ai + 1, ctx, &nSR), + largty, ai + sret < fargt_sig.size() ? fargt_sig[ai + sret] : fargt_vasig, + false, byRefList[ai], issigned); needStackRestore |= nSR; } @@ -1330,7 +1398,7 @@ static Value *emit_ccall(jl_value_t **args, size_t nargs, jl_codectx_t *ctx) JL_GC_POP(); // Finally we need to box the result into julia type // However, if we have already created a box for the return - // type because we the ABI required us to pass a pointer (sret), + // type because the ABI required us to pass a pointer (sret), // then we do not need to do this. if (!sret) { if (lrt == T_void) @@ -1338,32 +1406,30 @@ static Value *emit_ccall(jl_value_t **args, size_t nargs, jl_codectx_t *ctx) else if (lrt->isStructTy()) { //fprintf(stderr, "ccall rt: %s -> %s\n", f_name, ((jl_tag_type_t*)rt)->name->name->name); assert(jl_is_structtype(rt)); - Value *newst = emit_new_struct(rt,1,NULL,ctx); assert(newst != NULL && "Type was not concrete"); - if (newst->getType()->isPointerTy()) { - builder.CreateStore(result,builder.CreateBitCast(newst, prt->getPointerTo())); - result = newst; - } - else if (lrt != prt) { - result = llvm_type_rewrite(result,lrt,rt,true); - } - // otherwise it's fine to pass this by value. Technically we could do alloca/store/load, - // but why should we? + assert(newst->getType()->isPointerTy()); + builder.CreateStore(result, builder.CreateBitCast(newst, prt->getPointerTo())); + result = newst; } else { if (prt->getPrimitiveSizeInBits() == lrt->getPrimitiveSizeInBits()) { result = builder.CreateBitCast(result,lrt); } else { - Value *rloc = builder.CreateAlloca(prt); - builder.CreateStore(result, rloc); - result = builder.CreateLoad(builder.CreatePointerCast(rloc, PointerType::get(lrt,0))); + Value *rloc = emit_static_alloca(lrt, ctx); + builder.CreateStore(result, builder.CreatePointerCast(rloc, PointerType::get(prt,0))); + if (lrt->isAggregateType()) { + result = rloc; + } + else { + result = builder.CreateLoad(rloc); + } } } } else { - if (result->getType() != jl_pvalue_llvmt) + if (result->getType() != jl_pvalue_llvmt && !lrt->isAggregateType()) result = builder.CreateLoad(result); } diff --git a/src/cgutils.cpp b/src/cgutils.cpp index 71c6ec5320438..ed59610692b73 100644 --- a/src/cgutils.cpp +++ b/src/cgutils.cpp @@ -613,8 +613,6 @@ static Type *julia_struct_to_llvm(jl_value_t *jt) else { if (isvector && lasttype != T_int1 && lasttype != T_void) { // TODO: currently we get LLVM assertion failures for other vector sizes - // TODO: DataTypes do not yet support vector alignment requirements, - // so only use array ABI for tuples for now. /* bool validVectorSize = ntypes <= 6 || (ntypes&1)==0; if (lasttype->isSingleValueType() && !lasttype->isVectorTy() && validVectorSize) @@ -1032,8 +1030,9 @@ static Value *emit_bounds_check(Value *a, jl_value_t *ty, Value *i, Value *len, builder.CreateCall3(prepare_call(jlvboundserror_func), a, len, i); #endif } - else if (ty) { + else if (ty && a->getType() != jl_pvalue_llvmt) { if (!a->getType()->isPtrOrPtrVectorTy()) { + // CreateAlloca is OK here since we are on an error branch Value *tempSpace = builder.CreateAlloca(a->getType()); builder.CreateStore(a, tempSpace); a = tempSpace; @@ -1067,6 +1066,22 @@ static Value *emit_bounds_check(Value *a, jl_value_t *ty, Value *i, Value *len, // --- loading and storing --- +static AllocaInst *emit_static_alloca(Type *lty, jl_codectx_t *ctx) { + return new AllocaInst(lty, "", /*InsertBefore=*/ctx->gc.gcframe); +} + +static Value *emit_reg2mem(Value *v, jl_codectx_t *ctx) { + // eagerly put this back onto the stack + // llvm mem2reg pass will remove this if unneeded + if (v->getType()->isAggregateType() && !type_is_ghost(v->getType())) { + Value *loc = emit_static_alloca(v->getType(), ctx); + builder.CreateStore(v, loc); + return loc; + } + return v; +} + + static Value *ghostValue(jl_value_t *ty); static Value *typed_load(Value *ptr, Value *idx_0based, jl_value_t *jltype, @@ -1077,16 +1092,28 @@ static Value *typed_load(Value *ptr, Value *idx_0based, jl_value_t *jltype, if (elty == T_void) return ghostValue(jltype); bool isbool=false; - if (elty==T_int1) { elty = T_int8; isbool=true; } + if (elty == T_int1) { + elty = T_int8; + isbool = true; + } Value *data; if (ptr->getType()->getContainedType(0) != elty) data = builder.CreateBitCast(ptr, PointerType::get(elty, 0)); else data = ptr; - Value *elt = tbaa_decorate(tbaa, builder.CreateAlignedLoad(builder.CreateGEP(data, idx_0based), - alignment, false)); - if (elty == jl_pvalue_llvmt) - null_pointer_check(elt, ctx); + if (idx_0based) + data = builder.CreateGEP(data, idx_0based); + Value *elt; + if (elty->isAggregateType() && tbaa == tbaa_immut && !alignment) { // can lazy load on demand, no copy needed + elt = data; + } + else { + elt = tbaa_decorate(tbaa, builder.CreateAlignedLoad(data, alignment, false)); + if (elty == jl_pvalue_llvmt) { + null_pointer_check(elt, ctx); + } + elt = emit_reg2mem(elt, ctx); + } if (isbool) return builder.CreateTrunc(elt, T_int1); return mark_julia_type(elt, jltype); @@ -1103,7 +1130,9 @@ static void typed_store(Value *ptr, Value *idx_0based, Value *rhs, assert(elty != NULL); if (elty == T_void) return; - if (elty==T_int1) { elty = T_int8; } + if (elty == T_int1) { + elty = T_int8; + } if (jl_isbits(jltype) && ((jl_datatype_t*)jltype)->size > 0) { rhs = emit_unbox(elty, rhs, jltype); } @@ -1222,9 +1251,9 @@ static Value *emit_getfield_unknownidx(Value *strct, Value *idx, jl_datatype_t * { Type *llvm_st = strct->getType(); size_t nfields = jl_datatype_nfields(stt); - if (llvm_st == jl_pvalue_llvmt) { //boxed + if (llvm_st == jl_pvalue_llvmt || llvm_st->isPointerTy()) { //boxed if (is_datatype_all_pointers(stt)) { - idx = emit_bounds_check(strct, NULL, idx, ConstantInt::get(T_size, nfields), ctx); + idx = emit_bounds_check(strct, (jl_value_t*)stt, idx, ConstantInt::get(T_size, nfields), ctx); Value *fld = tbaa_decorate(tbaa_user, builder.CreateLoad( builder.CreateGEP( builder.CreateBitCast(strct, jl_ppvalue_llvmt), @@ -1234,13 +1263,13 @@ static Value *emit_getfield_unknownidx(Value *strct, Value *idx, jl_datatype_t * return fld; } else if (is_tupletype_homogeneous(stt->types)) { - assert(nfields > 0); // nf==0 trapped by all_pointers case + assert(nfields > 0); // nf == 0 trapped by all_pointers case jl_value_t *jt = jl_field_type(stt, 0); - idx = emit_bounds_check(strct, NULL, idx, ConstantInt::get(T_size, nfields), ctx); + idx = emit_bounds_check(strct, (jl_value_t*)stt, idx, ConstantInt::get(T_size, nfields), ctx); Value *ptr = data_pointer(strct); return typed_load(ptr, idx, jt, ctx, stt->mutabl ? tbaa_user : tbaa_immut); } - else { + else if (llvm_st == jl_pvalue_llvmt) { idx = builder.CreateSub(idx, ConstantInt::get(T_size, 1)); #ifdef LLVM37 Value *fld = builder.CreateCall(prepare_call(jlgetnthfieldchecked_func), { strct, idx }); @@ -1251,33 +1280,26 @@ static Value *emit_getfield_unknownidx(Value *strct, Value *idx, jl_datatype_t * } } else if (is_tupletype_homogeneous(stt->types)) { - // TODO: move these allocas to the first basic block instead of - // frobbing the stack - Value *fld; + assert(jl_isbits(stt)); if (nfields == 0) { // TODO: pass correct thing to emit_bounds_check ? idx = emit_bounds_check(tbaa_decorate(tbaa_const, builder.CreateLoad(prepare_global(jlemptysvec_var))), - NULL, idx, ConstantInt::get(T_size, nfields), ctx); - fld = UndefValue::get(jl_pvalue_llvmt); + (jl_value_t*)stt, idx, ConstantInt::get(T_size, nfields), ctx); + return UndefValue::get(jl_pvalue_llvmt); } - else { - Instruction *stacksave = - CallInst::Create(Intrinsic::getDeclaration(jl_Module,Intrinsic::stacksave)); - builder.Insert(stacksave); - Value *tempSpace = builder.CreateAlloca(llvm_st); - builder.CreateStore(strct, tempSpace); - jl_value_t *jt = jl_field_type(stt,0); - if (!stt->uid) { - // add root for types not cached - jl_add_linfo_root(ctx->linfo, (jl_value_t*)stt); - } - // TODO: pass correct thing to emit_bounds_check ? - idx = emit_bounds_check(tempSpace, (jl_value_t*)stt, idx, ConstantInt::get(T_size, nfields), ctx); - fld = typed_load(tempSpace, idx, jt, ctx, stt->mutabl ? tbaa_user : tbaa_immut); - builder.CreateCall(Intrinsic::getDeclaration(jl_Module,Intrinsic::stackrestore), - stacksave); + jl_value_t *jt = jl_field_type(stt,0); + if (!stt->uid) { + // add root for types not cached + jl_add_linfo_root(ctx->linfo, (jl_value_t*)stt); + } + // TODO: pass correct thing to emit_bounds_check ? + Value *idx0 = emit_bounds_check(strct, (jl_value_t*)stt, idx, ConstantInt::get(T_size, nfields), ctx); + if (type_is_ghost(llvm_st)) { + return mark_julia_type(UndefValue::get(NoopType), jt); } - return fld; + // llvm::VectorType + Value *fld = builder.CreateExtractElement(strct, idx0); + return mark_julia_type(fld, jt); } return NULL; } @@ -1305,13 +1327,14 @@ static Value *emit_getfield_knownidx(Value *strct, unsigned idx, jl_datatype_t * return typed_load(addr, ConstantInt::get(T_size, 0), jfty, ctx, tbaa); } } + else if (strct->getType()->isPointerTy()) { + Value *addr = builder.CreateConstInBoundsGEP2_32(strct, 0, idx); + assert(!jt->mutabl); + return typed_load(addr, NULL, jfty, ctx, tbaa_immut); + } else { - if (strct->getType()->isVectorTy()) { - fldv = builder.CreateExtractElement(strct, ConstantInt::get(T_int32, idx)); - } - else { - fldv = builder.CreateExtractValue(strct, ArrayRef(&idx,1)); - } + assert(strct->getType()->isVectorTy()); + fldv = builder.CreateExtractElement(strct, ConstantInt::get(T_int32, idx)); if (jfty == (jl_value_t*)jl_bool_type) { fldv = builder.CreateTrunc(fldv, T_int1); } @@ -1496,6 +1519,7 @@ static Value *emit_array_nd_index(Value *a, jl_value_t *ex, size_t nd, jl_value_ ctx->f->getBasicBlockList().push_back(failBB); builder.SetInsertPoint(failBB); + // CreateAlloca is OK here since we are on an error branch Value *tmp = builder.CreateAlloca(T_size, ConstantInt::get(T_size, nidxs)); for(size_t k=0; k < nidxs; k++) { builder.CreateStore(idxs[k], builder.CreateGEP(tmp, ConstantInt::get(T_size, k))); @@ -1529,8 +1553,7 @@ static Value *tpropagate(Value *a, Value *b) static Value *init_bits_value(Value *newv, Value *jt, Type *t, Value *v) { builder.CreateStore(jt, builder.CreateBitCast(emit_typeptr_addr(newv), jl_ppvalue_llvmt)); - // TODO: stricter alignment if possible - builder.CreateAlignedStore(v, builder.CreateBitCast(data_pointer(newv), PointerType::get(t,0)), sizeof(void*)); + builder.CreateStore(v, builder.CreateBitCast(data_pointer(newv), PointerType::get(t,0))); return newv; } @@ -1594,12 +1617,9 @@ static jl_value_t *static_constant_instance(Constant *constant, jl_value_t *jt) assert(jl_is_tuple_type(jt)); size_t nargs = 0; - ConstantArray *carr = NULL; ConstantStruct *cst = NULL; ConstantVector *cvec = NULL; - if ((carr = dyn_cast(constant)) != NULL) - nargs = carr->getType()->getNumElements(); - else if ((cst = dyn_cast(constant)) != NULL) + if ((cst = dyn_cast(constant)) != NULL) nargs = cst->getType()->getNumElements(); else if ((cvec = dyn_cast(constant)) != NULL) nargs = cvec->getType()->getNumElements(); @@ -1706,6 +1726,9 @@ static Value *boxed(Value *v, jl_codectx_t *ctx, jl_value_t *jt) if (jb == jl_char_type) return call_with_unsigned(box_char_func, v); if (jb == jl_gensym_type) { unsigned zero = 0; + if (v->getType()->isPointerTy()) { + v = builder.CreateLoad(v); + } v = builder.CreateExtractValue(v, ArrayRef(&zero,1)); return call_with_unsigned(box_gensym_func, v); } @@ -1719,7 +1742,12 @@ static Value *boxed(Value *v, jl_codectx_t *ctx, jl_value_t *jt) assert(jb->instance != NULL); return literal_pointer_val(jb->instance); } - return allocate_box_dynamic(literal_pointer_val(jt),ConstantInt::get(T_size,jl_datatype_size(jt)),v); + + Type *llvmt = julia_type_to_llvm(jt); + if (llvmt->isAggregateType() && v->getType()->isPointerTy()) { + v = builder.CreateLoad(v); + } + return allocate_box_dynamic(literal_pointer_val(jt), ConstantInt::get(T_size, jl_datatype_size(jt)), v); } static void emit_cpointercheck(Value *x, const std::string &msg, @@ -1852,13 +1880,13 @@ static Value *emit_new_struct(jl_value_t *ty, size_t nargs, jl_value_t **args, j Type *lt = julia_type_to_llvm(ty); size_t na = nargs-1 < nf ? nargs-1 : nf; if (lt == T_void) { - for(size_t i=0; i < na; i++) + for (size_t i=0; i < na; i++) emit_unboxed(args[i+1], ctx); // do side effects return mark_julia_type(UndefValue::get(NoopType),ty); } Value *strct = UndefValue::get(lt); unsigned idx = 0; - for(size_t i=0; i < na; i++) { + for (size_t i=0; i < na; i++) { jl_value_t *jtype = jl_svecref(sty->types,i); Type *fty = julia_type_to_llvm(jtype); Value *fval = emit_unboxed(args[i+1], ctx); @@ -1873,7 +1901,7 @@ static Value *emit_new_struct(jl_value_t *ty, size_t nargs, jl_value_t **args, j } idx++; } - return mark_julia_type(strct,ty); + return mark_julia_type(emit_reg2mem(strct, ctx), ty); } Value *f1 = NULL; int fieldStart = ctx->gc.argDepth; diff --git a/src/codegen.cpp b/src/codegen.cpp index 27ce56dec4c81..1aa0a2f26949e 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -303,6 +303,7 @@ static Function *jlalloc3w_func; static Function *jl_alloc_svec_func; static Function *jlsubtype_func; static Function *setjmp_func; +static Function *memcmp_func; static Function *box_int8_func; static Function *box_uint8_func; static Function *box_int16_func; @@ -641,7 +642,6 @@ static Function *to_function(jl_lambda_info_t *li) Function *f = NULL; JL_TRY { f = emit_function(li); - //jl_printf(JL_STDOUT, "emit %s\n", li->name->name); //n_emit++; } JL_CATCH { @@ -676,8 +676,10 @@ static Function *to_function(jl_lambda_info_t *li) FPM->run(*f); //n_compile++; // print out the function's LLVM code + //jl_static_show(JL_STDERR, (jl_value_t*)li); //jl_printf(JL_STDERR, "%s:%d\n", // ((jl_sym_t*)li->file)->name, li->line); + //f->dump(); //if (verifyFunction(*f,PrintMessageAction)) { // f->dump(); // abort(); @@ -1761,7 +1763,7 @@ static Value *emit_lambda_closure(jl_value_t *expr, jl_codectx_t *ctx) } } else { - val = builder.CreateLoad(l, false); + val = builder.CreateLoad(l); } } captured[i+1] = val; @@ -1898,13 +1900,25 @@ static Value *emit_f_is(jl_value_t *rt1, jl_value_t *rt2, Type *at2 = varg2->getType(); if (at1 != jl_pvalue_llvmt && at2 != jl_pvalue_llvmt) { assert(at1 == at2); + assert(!at1->isAggregateType()); + assert(!ptr_comparable); + if (at1->isPointerTy()) { + Type *elty = julia_type_to_llvm(rt1); + if (elty->isAggregateType()) { + answer = builder.CreateCall3(memcmp_func, + builder.CreatePointerCast(varg1, T_pint8), + builder.CreatePointerCast(varg2, T_pint8), + builder.CreateTrunc(ConstantExpr::getSizeOf(elty), T_size)); + answer = builder.CreateICmpEQ(answer, ConstantInt::get(T_int32, 0)); + goto done; + } + } if (at1->isIntegerTy() || at1->isPointerTy() || at1->isFloatingPointTy()) { answer = builder.CreateICmpEQ(JL_INT(varg1),JL_INT(varg2)); goto done; } - bool isStructOrArray = at1->isStructTy() || at1->isArrayTy(); - if ((isStructOrArray || at1->isVectorTy()) && !ptr_comparable) { + if (at1->isVectorTy()) { assert(jl_is_datatype(rt1)); jl_svec_t *types = ((jl_datatype_t*)rt1)->types; answer = ConstantInt::get(T_int1, 1); @@ -1912,25 +1926,11 @@ static Value *emit_f_is(jl_value_t *rt1, jl_value_t *rt2, for(unsigned i=0; i < l; i++) { jl_value_t *fldty = jl_svecref(types,i); Value *subAns; - if (isStructOrArray) { - if (julia_type_to_llvm(fldty) != T_void) { - subAns = - emit_f_is(fldty, fldty, NULL, NULL, - builder.CreateExtractValue(varg1, ArrayRef(&i,1)), - builder.CreateExtractValue(varg2, ArrayRef(&i,1)), - ctx); - } - else { - continue; - } - } - else { - subAns = - emit_f_is(fldty, fldty, NULL, NULL, - builder.CreateExtractElement(varg1, ConstantInt::get(T_int32,i)), - builder.CreateExtractElement(varg2, ConstantInt::get(T_int32,i)), - ctx); - } + subAns = + emit_f_is(fldty, fldty, NULL, NULL, + builder.CreateExtractElement(varg1, ConstantInt::get(T_int32,i)), + builder.CreateExtractElement(varg2, ConstantInt::get(T_int32,i)), + ctx); answer = builder.CreateAnd(answer, subAns); } goto done; @@ -2314,7 +2314,7 @@ static Value *emit_known_call(jl_value_t *ff, jl_value_t **args, size_t nargs, idx = builder.CreateAdd(idx, ConstantInt::get(T_size, ctx->nReqArgs)); JL_GC_POP(); return tbaa_decorate(tbaa_user, builder. - CreateLoad(builder.CreateGEP(ctx->argArray,idx),false)); + CreateLoad(builder.CreateGEP(ctx->argArray,idx))); } if (fldt == (jl_value_t*)jl_long_type && jl_is_leaf_type((jl_value_t*)stt)) { @@ -2513,16 +2513,24 @@ static Value *emit_call_function_object(jl_function_t *f, Value *theF, Value *th make_gcroot(argvals[idx], ctx); } } + else if (et->isAggregateType()) { + assert(at == PointerType::get(et, 0)); + Value *arg = emit_unboxed(args[i+1], ctx); + if (arg->getType() == at && jl_is_immutable_datatype(jt)) // can lazy load on demand, no copy needed + argvals[idx] = arg; + else + argvals[idx] = emit_reg2mem(emit_unbox(et, arg, jt), ctx); + } else { assert(at == et); - argvals[idx] = emit_unbox(at, emit_unboxed(args[i+1], ctx), jt); + argvals[idx] = emit_unbox(et, emit_unboxed(args[i+1], ctx), jt); assert(dyn_cast(argvals[idx]) == 0); } idx++; } assert(idx == nfargs); result = builder.CreateCall(prepare_call(cf), ArrayRef(&argvals[0],nfargs)); - result = mark_julia_type(result, jl_ast_rettype(f->linfo, f->linfo->ast)); + result = mark_julia_type(emit_reg2mem(result, ctx), jl_ast_rettype(f->linfo, f->linfo->ast)); } else { result = emit_jlcall(theFptr, theF, &args[1], nargs, ctx); @@ -2762,7 +2770,7 @@ static Value *var_binding_pointer(jl_sym_t *s, jl_binding_t **pbnd, Value *l = vi.memvalue; if (l == NULL) return NULL; if (isBoxed(s, ctx)) { - return builder.CreatePointerCast(builder.CreateLoad(l,false), jl_ppvalue_llvmt); + return builder.CreatePointerCast(builder.CreateLoad(l), jl_ppvalue_llvmt); } return l; } @@ -2817,7 +2825,7 @@ static Value *emit_var(jl_sym_t *sym, jl_value_t *ty, jl_codectx_t *ctx, bool is // double-check that a global variable is actually defined. this // can be a problem in parallel when a definition is missing on // one machine. - return tpropagate(bp, builder.CreateLoad(bp, false)); + return tpropagate(bp, builder.CreateLoad(bp)); } return emit_checked_var(bp, sym, ctx); } @@ -2843,6 +2851,8 @@ static Value *emit_var(jl_sym_t *sym, jl_value_t *ty, jl_codectx_t *ctx, bool is assert(jbp == NULL); if (arg != NULL || // arguments are always defined ((!is_var_closed(sym, ctx) || !vi.isAssigned) && !vi.usedUndef)) { + if (bp->getType()->getContainedType(0)->isAggregateType()) + return bp; Value *theLoad = builder.CreateLoad(bp, vi.isVolatile); if (vi.closureidx > -1 && !(vi.isAssigned && vi.isCaptured)) theLoad = tbaa_decorate(tbaa_const, (Instruction*)theLoad); @@ -2915,7 +2925,8 @@ static void emit_assignment(jl_value_t *l, jl_value_t *r, jl_codectx_t *ctx) jl_value_t *gensym_types = jl_lam_gensyms(ctx->ast); jl_value_t *declType = (jl_is_array(gensym_types) ? jl_cellref(gensym_types, idx) : (jl_value_t*)jl_any_type); Value *rval = emit_assignment(bp, r, declType, false, true, ctx); - ctx->gensym_SAvalues.at(idx) = rval; // now gensym_SAvalues[idx] actually contains the SAvalue + if (!julia_type_to_llvm(declType)->isAggregateType()) + ctx->gensym_SAvalues.at(idx) = rval; // now gensym_SAvalues[idx] actually contains the SAvalue assert(ctx->gensym_assigned.at(idx) = true); return; } @@ -2955,7 +2966,10 @@ static void emit_assignment(jl_value_t *l, jl_value_t *r, jl_codectx_t *ctx) (!vi.isCaptured && !vi.isArgument && !vi.usedUndef && !vi.isVolatile))) { // use SSA value instead of GC frame load for var access - vi.SAvalue = rval; + if (bp && bp->getType()->getContainedType(0)->isAggregateType()) + vi.SAvalue = bp; + else + vi.SAvalue = rval; } if (!isa(rval) && builder.GetInsertBlock()->getTerminator() == NULL) { @@ -3385,6 +3399,7 @@ static Value *alloc_local(jl_sym_t *s, jl_codectx_t *ctx) Type *vtype = julia_struct_to_llvm(jt); assert(vtype != jl_pvalue_llvmt); if (!type_is_ghost(vtype)) { + // CreateAlloca is OK here because alloc_local is only called during prologue setup lv = builder.CreateAlloca(vtype, 0, s->name); if (vtype != jl_pvalue_llvmt) lv = mark_julia_type(lv, jt); @@ -3422,6 +3437,7 @@ static void maybe_alloc_arrayvar(jl_sym_t *s, jl_codectx_t *ctx) Type *elt = julia_type_to_llvm(jl_tparam0(jt)); if (elt == T_void) return; + // CreateAlloca is OK here because maybe_alloc_arrayvar is only called in the prologue setup av.dataptr = builder.CreateAlloca(PointerType::get(elt,0)); av.len = builder.CreateAlloca(T_size); for(int i=0; i < ndims-1; i++) @@ -3613,12 +3629,13 @@ static Function *gen_cfun_wrapper(jl_function_t *ff, jl_value_t *jlrettype, jl_t std::vector fargt(0); std::vector fargt_sig(0); + Type* fargt_vasig; std::vector inRegList(0); std::vector byRefList(0); attr_type attrs; Type *prt = NULL; int sret = 0; - std::string err_msg = generate_func_sig(&crt, &prt, sret, fargt, fargt_sig, inRegList, byRefList, attrs, + std::string err_msg = generate_func_sig(&crt, &prt, sret, fargt, fargt_sig, fargt_vasig, inRegList, byRefList, attrs, ((isref&1) ? (jl_value_t*)jl_any_type : jlrettype), argt->parameters); if (!err_msg.empty()) jl_error(err_msg.c_str()); @@ -3705,30 +3722,36 @@ static Function *gen_cfun_wrapper(jl_function_t *ff, jl_value_t *jlrettype, jl_t } } else { - if (fargt[i+sret] != fargt_sig[i+sret]) { - // undo whatever we did to this poor argument - val = llvm_type_rewrite(val, fargt[i+sret], jargty, false); - } - if (byRefList[i]) { - val = builder.CreateLoad(val,false); - } + // undo whatever we might have done to this poor argument + bool issigned = jl_signed_type && jl_subtype(jargty, (jl_value_t*)jl_signed_type, 0); + val = llvm_type_rewrite(val, val->getType(), fargt[i+sret], true, byRefList[i], issigned); } // figure out how to repack this type Type *at = specsig ? theFptr->getFunctionType()->getParamType(i) : jl_pvalue_llvmt; if (val->getType() != at) { if (at == jl_pvalue_llvmt) { - Value *mem = emit_new_struct(jargty, 1, NULL, &ctx); - if (mem->getType() == jl_pvalue_llvmt) { - builder.CreateStore(val, builder.CreateBitCast(mem, val->getType()->getPointerTo())); - val = mem; + assert(jl_is_leaf_type(jargty)); + if (jl_datatype_nfields(jargty) == 0) { + val = literal_pointer_val(jl_new_struct_uninit((jl_datatype_t*)jargty)); + } + else if (jl_isbits(jargty)) { + val = boxed(val, &ctx, jargty); } else { - val = boxed(mem, &ctx, jargty); + Value *mem = emit_allocobj(jl_datatype_size(jargty)); + builder.CreateStore(literal_pointer_val((jl_value_t*)jargty), + emit_typeptr_addr(mem)); + builder.CreateStore(val, builder.CreateBitCast(mem, val->getType()->getPointerTo())); + val = mem; } if (specsig) make_gcroot(val, &ctx); } + else if (val->getType()->isAggregateType()) { + val = emit_reg2mem(val, &ctx); + assert(val->getType() == at); + } else { val = emit_unbox(at, val, jargty); assert(dyn_cast(val) == 0); @@ -3774,12 +3797,13 @@ static Function *gen_cfun_wrapper(jl_function_t *ff, jl_value_t *jlrettype, jl_t builder.CreateRetVoid(); } else { - Value *v = julia_to_native(crt, jlrettype, r, jlrettype, 0, false, false, false, 0, &ctx, NULL); + Value *v = julia_to_native(crt, jlrettype, r, jlrettype, false, false, false, false, true, 0, &ctx, NULL); + bool issigned = jl_signed_type && jl_subtype(jlrettype, (jl_value_t*)jl_signed_type, 0); if (!sret) { - builder.CreateRet(llvm_type_rewrite(v, prt, jlrettype, true)); + builder.CreateRet(llvm_type_rewrite(v, crt, prt, false, false, issigned)); } else { - Value *sretVal = llvm_type_rewrite(v, fargt_sig[0], jlrettype, true); + Value *sretVal = llvm_type_rewrite(v, crt, fargt_sig[0], false, false, issigned); builder.CreateStore(sretVal, sretPtr); builder.CreateRetVoid(); } @@ -3844,10 +3868,13 @@ static Function *gen_jlcall_wrapper(jl_lambda_info_t *lam, jl_expr_t *ast, Funct continue; Value *argPtr = builder.CreateGEP(argArray, ConstantInt::get(T_size, i)); - Value *theArg = builder.CreateLoad(argPtr, false); + Value *theArg = builder.CreateLoad(argPtr); Value *theNewArg = theArg; if (lty != NULL && lty != jl_pvalue_llvmt) { - theNewArg = emit_unbox(lty, theArg, ty); + if (lty->isAggregateType()) + theNewArg = builder.CreatePointerCast(theArg, PointerType::get(lty,0)); + else + theNewArg = emit_unbox(lty, theArg, ty); } assert(dyn_cast(theNewArg) == NULL); args[idx] = theNewArg; @@ -4045,10 +4072,11 @@ static Function *emit_function(jl_lambda_info_t *lam) if (type_is_ghost(ty)) { // mark as a ghost for now, we'll revise this later if needed as a local ctx.vars[jl_decl_var(jl_cellref(largs,i))].isGhost = true; + continue; } - else { - fsig.push_back(ty); - } + if (ty->isAggregateType()) + ty = PointerType::get(ty,0); + fsig.push_back(ty); } Type *rt = (jlrettype == (jl_value_t*)jl_void_type ? T_void : julia_type_to_llvm(jlrettype)); f = Function::Create(FunctionType::get(rt, fsig, false), @@ -4529,7 +4557,7 @@ static Function *emit_function(jl_lambda_info_t *lam) } else { assert(argPtr != NULL); - theArg = builder.CreateLoad(argPtr, false); + theArg = builder.CreateLoad(argPtr); } Value *lv = vi.memvalue; @@ -4724,12 +4752,10 @@ static Function *emit_function(jl_lambda_info_t *lam) emit_gcpop(&ctx); if (do_malloc_log && lno != -1) mallocVisitLine(filename, lno); - if (builder.GetInsertBlock()->getTerminator() == NULL) { - if (retty == T_void) - builder.CreateRetVoid(); - else - builder.CreateRet(retval); - } + if (retty == T_void) + builder.CreateRetVoid(); + else + builder.CreateRet(retval); if (i != stmtslen-1) { BasicBlock *bb = BasicBlock::Create(getGlobalContext(), "ret", ctx.f); @@ -4812,10 +4838,13 @@ extern "C" void jl_fptr_to_llvm(void *fptr, jl_lambda_info_t *lam, int specsig) if (specsig) { // assumes !va jl_value_t *jlrettype = jl_ast_rettype(lam, (jl_value_t*)lam->ast); std::vector fsig(0); - for(size_t i=0; i < jl_nparams(lam->specTypes); i++) { + for (size_t i=0; i < jl_nparams(lam->specTypes); i++) { Type *ty = julia_type_to_llvm(jl_tparam(lam->specTypes,i)); - if (!type_is_ghost(ty)) - fsig.push_back(ty); + if (type_is_ghost(ty)) + continue; + if (ty->isAggregateType()) + ty = PointerType::get(ty,0); + fsig.push_back(ty); } Type *rt = (jlrettype == (jl_value_t*)jl_void_type ? T_void : julia_type_to_llvm(jlrettype)); Function *f = Function::Create(FunctionType::get(rt, fsig, false), Function::ExternalLinkage, funcName, @@ -5146,6 +5175,15 @@ static void init_julia_llvm_env(Module *m) setjmp_func->addFnAttr(Attribute::ReturnsTwice); add_named_global(setjmp_func, (void*)&jl_setjmp_f); + std::vector args_memcmp(0); + args_memcmp.push_back(T_pint8); + args_memcmp.push_back(T_pint8); + args_memcmp.push_back(T_size); + memcmp_func = + Function::Create(FunctionType::get(T_int32, args_memcmp, false), + Function::ExternalLinkage, "memcmp", m); + add_named_global(memcmp_func, (void*)&memcmp); + std::vector te_args(0); te_args.push_back(T_pint8); te_args.push_back(T_pint8); diff --git a/src/intrinsics.cpp b/src/intrinsics.cpp index d69ec335c1b88..ef3ebdd5125d2 100644 --- a/src/intrinsics.cpp +++ b/src/intrinsics.cpp @@ -221,22 +221,24 @@ static Constant *julia_const_to_llvm(jl_value_t *e) } JL_GC_POP(); Type *t = julia_struct_to_llvm(jt); - if (t == T_void || t->isEmptyTy()) + if (type_is_ghost(t)) return UndefValue::get(NoopType); + if (t->isVectorTy()) + return ConstantVector::get(ArrayRef(fields,llvm_nf)); + + Constant *init; if (t->isStructTy()) { StructType *st = dyn_cast(t); assert(st); - return ConstantStruct::get(st, ArrayRef(fields,llvm_nf)); - } - else if (t->isVectorTy()) { - return ConstantVector::get(ArrayRef(fields,llvm_nf)); + init = ConstantStruct::get(st, ArrayRef(fields,llvm_nf)); } else { assert(t->isArrayTy()); ArrayType *at = dyn_cast(t); assert(at); - return ConstantArray::get(at, ArrayRef(fields,llvm_nf)); + init = ConstantArray::get(at, ArrayRef(fields,llvm_nf)); } + return new GlobalVariable(*jl_Module, t, true, GlobalVariable::ExternalLinkage, init); } return NULL; } @@ -263,13 +265,19 @@ static Value *emit_unbox(Type *to, Value *x, jl_value_t *jt) return UndefValue::get(to); } if (ty != jl_pvalue_llvmt) { - // bools are stored internally as int8 (for now) - if (ty == T_int1 && to == T_int8) - return builder.CreateZExt(x, T_int8); - if (ty->isPointerTy() && !to->isPointerTy()) - return builder.CreatePtrToInt(x, to); - if (!ty->isPointerTy() && to->isPointerTy()) - return builder.CreateIntToPtr(x, to); + if (to->isAggregateType()) { + x = builder.CreateLoad(x); + ty = x->getType(); + } + else { + // bools are stored internally as int8 (for now) + if (ty == T_int1 && to == T_int8) + return builder.CreateZExt(x, T_int8); + if (ty->isPointerTy() && !to->isPointerTy()) + return builder.CreatePtrToInt(x, to); + if (!ty->isPointerTy() && to->isPointerTy()) + return builder.CreateIntToPtr(x, to); + } if (ty != to) { // this can happen when a branch yielding a different type ends // up being dead code, and type inference knows that the other @@ -292,8 +300,7 @@ static Value *emit_unbox(Type *to, Value *x, jl_value_t *jt) assert(to != T_void); return UndefValue::get(to); } - // TODO: stricter alignment if possible - return builder.CreateAlignedLoad(builder.CreateBitCast(p, to->getPointerTo()), sizeof(void*), false); + return builder.CreateLoad(builder.CreateBitCast(p, to->getPointerTo()), false); } // unbox trying to determine type automatically @@ -327,7 +334,9 @@ static Value *auto_unbox(jl_value_t *x, jl_codectx_t *ctx) if (to == T_void) { return NULL; } - return emit_unbox(to, v, bt); + if (to->isAggregateType() && jl_is_immutable_datatype(bt)) // can lazy load on demand, no copy needed + return builder.CreateBitCast(v, to->getPointerTo()); + return emit_reg2mem(emit_unbox(to, v, bt), ctx); } // figure out how many bits a bitstype has at compile time, or -1 @@ -357,7 +366,10 @@ static Value *generic_unbox(jl_value_t *targ, jl_value_t *x, jl_codectx_t *ctx) jl_value_t *p = jl_tparam0(et); if (jl_is_leaf_type(p)) { Type *to = julia_type_to_llvm(p); - return emit_unbox(to, emit_unboxed(x,ctx), p); + Value *lx = emit_unboxed(x,ctx); + if (to->isAggregateType() && lx->getType() == PointerType::get(to,0) && jl_is_immutable(p)) // can lazy load on demand, no copy needed + return lx; + return emit_reg2mem(emit_unbox(to, lx, p), ctx); } } int nb = try_to_determine_bitstype_nbits(targ, ctx); @@ -429,6 +441,9 @@ static Value *generic_box(jl_value_t *targ, jl_value_t *x, jl_codectx_t *ctx) Value *vx = auto_unbox(x, ctx); Type *vxt = vx->getType(); + if (llvmt->isAggregateType() && vxt->isPointerTy()) { + vxt = vxt->getContainedType(0); + } //if (vx->getType()->getPrimitiveSizeInBits() != (unsigned)nb) // jl_errorf("box: expected argument with %d bits, got %d", nb, // vx->getType()->getPrimitiveSizeInBits()); @@ -472,7 +487,10 @@ static Value *generic_box(jl_value_t *targ, jl_value_t *x, jl_codectx_t *ctx) } // dynamically-determined type; evaluate. - return allocate_box_dynamic(emit_expr(targ, ctx), ConstantInt::get(T_size,(nb+7)/8), vx); + if (llvmt->isAggregateType()) { + vx = builder.CreateLoad(vx); + } + return allocate_box_dynamic(emit_expr(targ, ctx), ConstantInt::get(T_size,nb), vx); } static Type *staticeval_bitstype(jl_value_t *targ, const char *fname, jl_codectx_t *ctx) diff --git a/test/ccall.jl b/test/ccall.jl index a1d774f5db8f2..ab28a36bff83d 100644 --- a/test/ccall.jl +++ b/test/ccall.jl @@ -1,7 +1,7 @@ # This file is a part of Julia. License is MIT: http://julialang.org/license import Base.copy, Base.== -const verbose = false +const verbose = false ccall((:set_verbose, "./libccalltest"), Void, (Int32,), verbose) # Test for proper argument register truncation @@ -115,7 +115,7 @@ b = ccall((:test_big, "./libccalltest"), Struct_Big, (Struct_Big,), a) @test b.y == sbig.y - 2 @test b.z == sbig.z - Int('A') -verbose && flush_cstdio() +verbose && Libc.flush_cstdio() verbose && println("Testing cfunction roundtrip: ") # cfunction roundtrip for (t,v) in ((Complex{Int32},:ci32),(Complex{Int64},:ci64), @@ -184,7 +184,13 @@ for (t,v) in ((Complex{Int32},:ci32),(Complex{Int64},:ci64), if ($(t).mutable) @test !(b === a) end + b = ccall(cfunction($fname,Any,(Ref{$t},)),Any,(Ref{$t},),$v) + verbose && println("C: ",b) + @test b == $v + @test b === c + if ($(t).mutable) + @test !(b === a) + end #b = ccall(cfunction($fname,Any,(Ref{Any},)),Any,(Ref{Any},),$v) # unimplemented - #b = ccall(cfunction($fname,Any,(Ref{$t},)),Any,(Ref{$t},),$v) # broken due to #2818 end end