diff --git a/examples/ROOT b/examples/ROOT index 1fe7b08..aa6c5c6 100644 --- a/examples/ROOT +++ b/examples/ROOT @@ -60,7 +60,7 @@ session "HOL-TestGen-MyKeOS" in "concurrency/MyKeOS" = "HOL-TestGen" + "main.tex" "titlepage.tex" -session "HOL-TestGen-MyKeOS2" in "concurrency/MyKeOS" = "HOL-TestGen" + +session "HOL-TestGen-MyKeOS2" in "concurrency/MyKeOS2" = "HOL-TestGen" + options [quick_and_dirty(*, document = pdf,document_variants="document:outline=/proof,/ML",document_output=output*)] theories MyKeOS diff --git a/src/ROOT b/src/ROOT index 570641d..fe0b41f 100644 --- a/src/ROOT +++ b/src/ROOT @@ -1,11 +1,16 @@ -session "HOL-TestGenLib" = HOL + +session "HOL-TestGenLib" in "test" = "HOL-Library" + description {* HOL-TestGen Basic Libraries without Code-Generators and Testgenerator *} + (*directories "/cygdrive/c/Program Files (x86)/Isabelle HOL/Isabelle2021/hol-testgen-1.9.1/hol-testgen-1.9.1/src"*) + theories "TestLib" -session "HOL-TestGen" (main) = "HOL-TestGenLib" + +session "HOL-TestGen" (main) in "main" = "HOL-TestGenLib" + description {* HOL-TestGen *} + (*directories "/cygdrive/c/Program Files (x86)/Isabelle HOL/Isabelle2021/hol-testgen-1.9.1/hol-testgen-1.9.1/src"*) + (*directories "../../../../Isabelle-linux/Isabelle2021-1/src/HOL"*) + directories "codegen_fsharp" "codegen_gdb" theories "codegen_fsharp/Code_String_Fsharp" "codegen_fsharp/Code_Char_chr_Fsharp" diff --git a/src/Automata.thy b/src/main/Automata.thy similarity index 100% rename from src/Automata.thy rename to src/main/Automata.thy diff --git a/src/BackendUtils.thy b/src/main/BackendUtils.thy similarity index 100% rename from src/BackendUtils.thy rename to src/main/BackendUtils.thy diff --git a/src/EFSM_Toolkit.thy b/src/main/EFSM_Toolkit.thy similarity index 100% rename from src/EFSM_Toolkit.thy rename to src/main/EFSM_Toolkit.thy diff --git a/src/IOCO.thy b/src/main/IOCO.thy similarity index 100% rename from src/IOCO.thy rename to src/main/IOCO.thy diff --git a/src/Interleaving.thy b/src/main/Interleaving.thy similarity index 100% rename from src/Interleaving.thy rename to src/main/Interleaving.thy diff --git a/src/Monads.thy b/src/main/Monads.thy similarity index 100% rename from src/Monads.thy rename to src/main/Monads.thy diff --git a/src/Observers.thy b/src/main/Observers.thy similarity index 100% rename from src/Observers.thy rename to src/main/Observers.thy diff --git a/src/main/Old_SMT/old_smt_builtin.ML b/src/main/Old_SMT/old_smt_builtin.ML new file mode 100644 index 0000000..e492ed4 --- /dev/null +++ b/src/main/Old_SMT/old_smt_builtin.ML @@ -0,0 +1,231 @@ +(* Title: HOL/Library/Old_SMT/old_smt_builtin.ML + Author: Sascha Boehme, TU Muenchen + +Tables of types and terms directly supported by SMT solvers. +*) + +signature OLD_SMT_BUILTIN = +sig + (*for experiments*) + val filter_builtins: (typ -> bool) -> Proof.context -> Proof.context + + (*built-in types*) + val add_builtin_typ: Old_SMT_Utils.class -> + typ * (typ -> string option) * (typ -> int -> string option) -> + Context.generic -> Context.generic + val add_builtin_typ_ext: typ * (typ -> bool) -> Context.generic -> + Context.generic + val dest_builtin_typ: Proof.context -> typ -> string option + val is_builtin_typ_ext: Proof.context -> typ -> bool + + (*built-in numbers*) + val dest_builtin_num: Proof.context -> term -> (string * typ) option + val is_builtin_num: Proof.context -> term -> bool + val is_builtin_num_ext: Proof.context -> term -> bool + + (*built-in functions*) + type 'a bfun = Proof.context -> typ -> term list -> 'a + type bfunr = string * int * term list * (term list -> term) + val add_builtin_fun: Old_SMT_Utils.class -> + (string * typ) * bfunr option bfun -> Context.generic -> Context.generic + val add_builtin_fun': Old_SMT_Utils.class -> term * string -> Context.generic -> + Context.generic + val add_builtin_fun_ext: (string * typ) * term list bfun -> + Context.generic -> Context.generic + val add_builtin_fun_ext': string * typ -> Context.generic -> Context.generic + val add_builtin_fun_ext'': string -> Context.generic -> Context.generic + val dest_builtin_fun: Proof.context -> string * typ -> term list -> + bfunr option + val dest_builtin_eq: Proof.context -> term -> term -> bfunr option + val dest_builtin_pred: Proof.context -> string * typ -> term list -> + bfunr option + val dest_builtin_conn: Proof.context -> string * typ -> term list -> + bfunr option + val dest_builtin: Proof.context -> string * typ -> term list -> bfunr option + val dest_builtin_ext: Proof.context -> string * typ -> term list -> + term list option + val is_builtin_fun: Proof.context -> string * typ -> term list -> bool + val is_builtin_fun_ext: Proof.context -> string * typ -> term list -> bool +end + +structure Old_SMT_Builtin: OLD_SMT_BUILTIN = +struct + + +(* built-in tables *) + +datatype ('a, 'b) kind = Ext of 'a | Int of 'b + +type ('a, 'b) ttab = ((typ * ('a, 'b) kind) Ord_List.T) Old_SMT_Utils.dict + +fun typ_ord ((T, _), (U, _)) = + let + fun tord (TVar _, Type _) = GREATER + | tord (Type _, TVar _) = LESS + | tord (Type (n, Ts), Type (m, Us)) = + if n = m then list_ord tord (Ts, Us) + else Term_Ord.typ_ord (T, U) + | tord TU = Term_Ord.typ_ord TU + in tord (T, U) end + +fun insert_ttab cs T f = + Old_SMT_Utils.dict_map_default (cs, []) + (Ord_List.insert typ_ord (perhaps (try Logic.varifyT_global) T, f)) + +fun merge_ttab ttabp = + Old_SMT_Utils.dict_merge (Ord_List.merge typ_ord) ttabp + +fun lookup_ttab ctxt ttab T = + let fun match (U, _) = Sign.typ_instance (Proof_Context.theory_of ctxt) (T, U) + in + get_first (find_first match) + (Old_SMT_Utils.dict_lookup ttab (Old_SMT_Config.solver_class_of ctxt)) + end + +type ('a, 'b) btab = ('a, 'b) ttab Symtab.table + +fun insert_btab cs n T f = + Symtab.map_default (n, []) (insert_ttab cs T f) + +fun merge_btab btabp = Symtab.join (K merge_ttab) btabp + +fun lookup_btab ctxt btab (n, T) = + (case Symtab.lookup btab n of + NONE => NONE + | SOME ttab => lookup_ttab ctxt ttab T) + +type 'a bfun = Proof.context -> typ -> term list -> 'a + +type bfunr = string * int * term list * (term list -> term) + +structure Builtins = Generic_Data +( + type T = + (typ -> bool, (typ -> string option) * (typ -> int -> string option)) ttab * + (term list bfun, bfunr option bfun) btab + val empty = ([], Symtab.empty) + val extend = I + fun merge ((t1, b1), (t2, b2)) = (merge_ttab (t1, t2), merge_btab (b1, b2)) +) + +fun filter_ttab keep_T = map (apsnd (filter (keep_T o fst))) + +fun filter_builtins keep_T = + Context.proof_map (Builtins.map (fn (ttab, btab) => + (filter_ttab keep_T ttab, Symtab.map (K (filter_ttab keep_T)) btab))) + + +(* built-in types *) + +fun add_builtin_typ cs (T, f, g) = + Builtins.map (apfst (insert_ttab cs T (Int (f, g)))) + +fun add_builtin_typ_ext (T, f) = + Builtins.map (apfst (insert_ttab Old_SMT_Utils.basicC T (Ext f))) + +fun lookup_builtin_typ ctxt = + lookup_ttab ctxt (fst (Builtins.get (Context.Proof ctxt))) + +fun dest_builtin_typ ctxt T = + (case lookup_builtin_typ ctxt T of + SOME (_, Int (f, _)) => f T + | _ => NONE) + +fun is_builtin_typ_ext ctxt T = + (case lookup_builtin_typ ctxt T of + SOME (_, Int (f, _)) => is_some (f T) + | SOME (_, Ext f) => f T + | NONE => false) + + +(* built-in numbers *) + +fun dest_builtin_num ctxt t = + (case try HOLogic.dest_number t of + NONE => NONE + | SOME (T, i) => + if i < 0 then NONE else + (case lookup_builtin_typ ctxt T of + SOME (_, Int (_, g)) => g T i |> Option.map (rpair T) + | _ => NONE)) + +val is_builtin_num = is_some oo dest_builtin_num + +fun is_builtin_num_ext ctxt t = + (case try HOLogic.dest_number t of + NONE => false + | SOME (T, _) => is_builtin_typ_ext ctxt T) + + +(* built-in functions *) + +fun add_builtin_fun cs ((n, T), f) = + Builtins.map (apsnd (insert_btab cs n T (Int f))) + +fun add_builtin_fun' cs (t, n) = + let + val c as (m, T) = Term.dest_Const t + fun app U ts = Term.list_comb (Const (m, U), ts) + fun bfun _ U ts = SOME (n, length (Term.binder_types T), ts, app U) + in add_builtin_fun cs (c, bfun) end + +fun add_builtin_fun_ext ((n, T), f) = + Builtins.map (apsnd (insert_btab Old_SMT_Utils.basicC n T (Ext f))) + +fun add_builtin_fun_ext' c = add_builtin_fun_ext (c, fn _ => fn _ => I) + +fun add_builtin_fun_ext'' n context = + let val thy = Context.theory_of context + in add_builtin_fun_ext' (n, Sign.the_const_type thy n) context end + +fun lookup_builtin_fun ctxt = + lookup_btab ctxt (snd (Builtins.get (Context.Proof ctxt))) + +fun dest_builtin_fun ctxt (c as (_, T)) ts = + (case lookup_builtin_fun ctxt c of + SOME (_, Int f) => f ctxt T ts + | _ => NONE) + +fun dest_builtin_eq ctxt t u = + let + val aT = TFree (Name.aT, @{sort type}) + val c = (@{const_name HOL.eq}, aT --> aT --> @{typ bool}) + fun mk ts = Term.list_comb (HOLogic.eq_const (Term.fastype_of (hd ts)), ts) + in + dest_builtin_fun ctxt c [] + |> Option.map (fn (n, i, _, _) => (n, i, [t, u], mk)) + end + +fun special_builtin_fun pred ctxt (c as (_, T)) ts = + if pred (Term.body_type T, Term.binder_types T) then + dest_builtin_fun ctxt c ts + else NONE + +fun dest_builtin_pred ctxt = special_builtin_fun (equal @{typ bool} o fst) ctxt + +fun dest_builtin_conn ctxt = + special_builtin_fun (forall (equal @{typ bool}) o (op ::)) ctxt + +fun dest_builtin ctxt c ts = + let val t = Term.list_comb (Const c, ts) + in + (case dest_builtin_num ctxt t of + SOME (n, _) => SOME (n, 0, [], K t) + | NONE => dest_builtin_fun ctxt c ts) + end + +fun dest_builtin_fun_ext ctxt (c as (_, T)) ts = + (case lookup_builtin_fun ctxt c of + SOME (_, Int f) => f ctxt T ts |> Option.map (fn (_, _, us, _) => us) + | SOME (_, Ext f) => SOME (f ctxt T ts) + | NONE => NONE) + +fun dest_builtin_ext ctxt c ts = + if is_builtin_num_ext ctxt (Term.list_comb (Const c, ts)) then SOME [] + else dest_builtin_fun_ext ctxt c ts + +fun is_builtin_fun ctxt c ts = is_some (dest_builtin_fun ctxt c ts) + +fun is_builtin_fun_ext ctxt c ts = is_some (dest_builtin_fun_ext ctxt c ts) + +end diff --git a/src/main/Old_SMT/old_smt_config.ML b/src/main/Old_SMT/old_smt_config.ML new file mode 100644 index 0000000..318b2ce --- /dev/null +++ b/src/main/Old_SMT/old_smt_config.ML @@ -0,0 +1,254 @@ +(* Title: HOL/Library/Old_SMT/old_smt_config.ML + Author: Sascha Boehme, TU Muenchen + +Configuration options and diagnostic tools for SMT. +*) + +signature OLD_SMT_CONFIG = +sig + (*solver*) + type solver_info = { + name: string, + class: Proof.context -> Old_SMT_Utils.class, + avail: unit -> bool, + options: Proof.context -> string list } + val add_solver: solver_info -> Context.generic -> Context.generic + val set_solver_options: string * string -> Context.generic -> Context.generic + val is_available: Proof.context -> string -> bool + val available_solvers_of: Proof.context -> string list + val select_solver: string -> Context.generic -> Context.generic + val solver_of: Proof.context -> string + val solver_class_of: Proof.context -> Old_SMT_Utils.class + val solver_options_of: Proof.context -> string list + + (*options*) + val oracle: bool Config.T + val datatypes: bool Config.T + val timeout: real Config.T + val random_seed: int Config.T + val read_only_certificates: bool Config.T + val verbose: bool Config.T + val trace: bool Config.T + val trace_used_facts: bool Config.T + val monomorph_limit: int Config.T + val monomorph_instances: int Config.T + val infer_triggers: bool Config.T + val filter_only_facts: bool Config.T + val debug_files: string Config.T + + (*tools*) + val with_timeout: Proof.context -> ('a -> 'b) -> 'a -> 'b + + (*diagnostics*) + val trace_msg: Proof.context -> ('a -> string) -> 'a -> unit + val verbose_msg: Proof.context -> ('a -> string) -> 'a -> unit + + (*certificates*) + val select_certificates: string -> Context.generic -> Context.generic + val certificates_of: Proof.context -> Cache_IO.cache option + + (*setup*) + val setup: theory -> theory + val print_setup: Proof.context -> unit +end + +structure Old_SMT_Config: OLD_SMT_CONFIG = +struct + +(* solver *) + +type solver_info = { + name: string, + class: Proof.context -> Old_SMT_Utils.class, + avail: unit -> bool, + options: Proof.context -> string list } + +(* FIXME just one data slot (record) per program unit *) +structure Solvers = Generic_Data +( + type T = (solver_info * string list) Symtab.table * string option + val empty = (Symtab.empty, NONE) + val extend = I + fun merge ((ss1, s1), (ss2, s2)) = + (Symtab.merge (K true) (ss1, ss2), merge_options (s1, s2)) +) + +fun set_solver_options (name, options) = + let val opts = String.tokens (Symbol.is_ascii_blank o str) options + in Solvers.map (apfst (Symtab.map_entry name (apsnd (K opts)))) end + +fun add_solver (info as {name, ...} : solver_info) context = + if Symtab.defined (fst (Solvers.get context)) name then + error ("Solver already registered: " ^ quote name) + else + context + |> Solvers.map (apfst (Symtab.update (name, (info, [])))) + |> Context.map_theory (Attrib.setup (Binding.name ("old_" ^ name ^ "_options")) + (Scan.lift (@{keyword "="} |-- Args.name) >> + (Thm.declaration_attribute o K o set_solver_options o pair name)) + ("additional command line options for SMT solver " ^ quote name)) + +fun all_solvers_of ctxt = Symtab.keys (fst (Solvers.get (Context.Proof ctxt))) + +fun solver_name_of ctxt = snd (Solvers.get (Context.Proof ctxt)) + +fun is_available ctxt name = + (case Symtab.lookup (fst (Solvers.get (Context.Proof ctxt))) name of + SOME ({avail, ...}, _) => avail () + | NONE => false) + +fun available_solvers_of ctxt = + filter (is_available ctxt) (all_solvers_of ctxt) + +fun warn_solver (Context.Proof ctxt) name = + if Context_Position.is_visible ctxt then + warning ("The SMT solver " ^ quote name ^ " is not installed.") + else () + | warn_solver _ _ = (); + +fun select_solver name context = + let + val ctxt = Context.proof_of context + val upd = Solvers.map (apsnd (K (SOME name))) + in + if not (member (op =) (all_solvers_of ctxt) name) then + error ("Trying to select unknown solver: " ^ quote name) + else if not (is_available ctxt name) then + (warn_solver context name; upd context) + else upd context + end + +fun no_solver_err () = error "No SMT solver selected" + +fun solver_of ctxt = + (case solver_name_of ctxt of + SOME name => name + | NONE => no_solver_err ()) + +fun solver_info_of default select ctxt = + (case Solvers.get (Context.Proof ctxt) of + (solvers, SOME name) => select (Symtab.lookup solvers name) + | (_, NONE) => default ()) + +fun solver_class_of ctxt = + let fun class_of ({class, ...}: solver_info, _) = class ctxt + in solver_info_of no_solver_err (class_of o the) ctxt end + +fun solver_options_of ctxt = + let + fun all_options NONE = [] + | all_options (SOME ({options, ...} : solver_info, opts)) = + opts @ options ctxt + in solver_info_of (K []) all_options ctxt end + +val setup_solver = + Attrib.setup @{binding old_smt_solver} + (Scan.lift (@{keyword "="} |-- Args.name) >> + (Thm.declaration_attribute o K o select_solver)) + "SMT solver configuration" + + +(* options *) + +val oracle = Attrib.setup_config_bool @{binding old_smt_oracle} (K true) +val datatypes = Attrib.setup_config_bool @{binding old_smt_datatypes} (K false) +val timeout = Attrib.setup_config_real @{binding old_smt_timeout} (K 30.0) +val random_seed = Attrib.setup_config_int @{binding old_smt_random_seed} (K 1) +val read_only_certificates = Attrib.setup_config_bool @{binding old_smt_read_only_certificates} (K false) +val verbose = Attrib.setup_config_bool @{binding old_smt_verbose} (K true) +val trace = Attrib.setup_config_bool @{binding old_smt_trace} (K false) +val trace_used_facts = Attrib.setup_config_bool @{binding old_smt_trace_used_facts} (K false) +val monomorph_limit = Attrib.setup_config_int @{binding old_smt_monomorph_limit} (K 10) +val monomorph_instances = Attrib.setup_config_int @{binding old_smt_monomorph_instances} (K 500) +val infer_triggers = Attrib.setup_config_bool @{binding old_smt_infer_triggers} (K false) +val filter_only_facts = Attrib.setup_config_bool @{binding old_smt_filter_only_facts} (K false) +val debug_files = Attrib.setup_config_string @{binding old_smt_debug_files} (K "") + + +(* diagnostics *) + +fun cond_trace flag f x = if flag then tracing ("SMT: " ^ f x) else () + +fun verbose_msg ctxt = cond_trace (Config.get ctxt verbose) + +fun trace_msg ctxt = cond_trace (Config.get ctxt trace) + + +(* tools *) + +fun with_timeout ctxt f x = + Timeout.apply (seconds (Config.get ctxt timeout)) f x + handle Timeout.TIMEOUT _ => raise Old_SMT_Failure.SMT Old_SMT_Failure.Time_Out + + +(* certificates *) + +(* FIXME just one data slot (record) per program unit *) +structure Certificates = Generic_Data +( + type T = Cache_IO.cache option + val empty = NONE + val extend = I + fun merge (s, _) = s (* FIXME merge options!? *) +) + +val get_certificates_path = + Option.map (Cache_IO.cache_path_of) o Certificates.get o Context.Proof + +fun select_certificates name context = context |> Certificates.put ( + if name = "" then NONE + else + Path.explode name + |> Path.append (Resources.master_directory (Context.theory_of context)) + |> SOME o Cache_IO.unsynchronized_init) + +val certificates_of = Certificates.get o Context.Proof + +val setup_certificates = + Attrib.setup @{binding old_smt_certificates} + (Scan.lift (@{keyword "="} |-- Args.name) >> + (Thm.declaration_attribute o K o select_certificates)) + "SMT certificates configuration" + + +(* setup *) + +val setup = + setup_solver #> + setup_certificates + +fun print_setup ctxt = + let + fun string_of_bool b = if b then "true" else "false" + + val names = available_solvers_of ctxt + val ns = if null names then ["(none)"] else sort_strings names + val n = the_default "(none)" (solver_name_of ctxt) + val opts = solver_options_of ctxt + + val t = string_of_real (Config.get ctxt timeout) + + val certs_filename = + (case get_certificates_path ctxt of + SOME path => Path.print path + | NONE => "(disabled)") + in + Pretty.writeln (Pretty.big_list "SMT setup:" [ + Pretty.str ("Current SMT solver: " ^ n), + Pretty.str ("Current SMT solver options: " ^ space_implode " " opts), + Pretty.str_list "Available SMT solvers: " "" ns, + Pretty.str ("Current timeout: " ^ t ^ " seconds"), + Pretty.str ("With proofs: " ^ + string_of_bool (not (Config.get ctxt oracle))), + Pretty.str ("Certificates cache: " ^ certs_filename), + Pretty.str ("Fixed certificates: " ^ + string_of_bool (Config.get ctxt read_only_certificates))]) + end + +val _ = + Outer_Syntax.command @{command_keyword old_smt_status} + "show the available SMT solvers, the currently selected SMT solver, \ + \and the values of SMT configuration options" + (Scan.succeed (Toplevel.keep (print_setup o Toplevel.context_of))) + +end diff --git a/src/main/Old_SMT/old_smt_datatypes.ML b/src/main/Old_SMT/old_smt_datatypes.ML new file mode 100644 index 0000000..971dc74 --- /dev/null +++ b/src/main/Old_SMT/old_smt_datatypes.ML @@ -0,0 +1,94 @@ +(* Title: HOL/Library/Old_SMT/old_smt_datatypes.ML + Author: Sascha Boehme, TU Muenchen + +Collector functions for common type declarations and their representation +as algebraic datatypes. +*) + +signature OLD_SMT_DATATYPES = +sig + val add_decls: typ -> + (typ * (term * term list) list) list list * Proof.context -> + (typ * (term * term list) list) list list * Proof.context +end + +structure Old_SMT_Datatypes: OLD_SMT_DATATYPES = +struct + +fun mk_selectors T Ts = + Variable.variant_fixes (replicate (length Ts) "select") + #>> map2 (fn U => fn n => Free (n, T --> U)) Ts + + +(* free constructor type declarations *) + +fun get_ctr_sugar_decl ({ctrs, ...} : Ctr_Sugar.ctr_sugar) T Ts ctxt = + let + fun mk_constr ctr0 = + let val ctr = Ctr_Sugar.mk_ctr Ts ctr0 in + mk_selectors T (binder_types (fastype_of ctr)) #>> pair ctr + end + in + fold_map mk_constr ctrs ctxt + |>> (pair T #> single) + end + + +(* typedef declarations *) + +fun get_typedef_decl (({Abs_name, Rep_name, abs_type, rep_type, ...}, {Abs_inverse, ...}) + : Typedef.info) T Ts = + if can (curry (op RS) @{thm UNIV_I}) Abs_inverse then + let + val env = snd (Term.dest_Type abs_type) ~~ Ts + val instT = Term.map_atyps (perhaps (AList.lookup (op =) env)) + + val constr = Const (Abs_name, instT (rep_type --> abs_type)) + val select = Const (Rep_name, instT (abs_type --> rep_type)) + in [(T, [(constr, [select])])] end + else + [] + + +(* collection of declarations *) + +fun declared declss T = exists (exists (equal T o fst)) declss +fun declared' dss T = exists (exists (equal T o fst) o snd) dss + +fun get_decls T n Ts ctxt = + (case Ctr_Sugar.ctr_sugar_of ctxt n of + SOME ctr_sugar => get_ctr_sugar_decl ctr_sugar T Ts ctxt + | NONE => + (case Typedef.get_info ctxt n of + [] => ([], ctxt) + | info :: _ => (get_typedef_decl info T Ts, ctxt))) + +fun add_decls T (declss, ctxt) = + let + fun depends Ts ds = exists (member (op =) (map fst ds)) Ts + + fun add (TFree _) = I + | add (TVar _) = I + | add (T as Type (@{type_name fun}, _)) = + fold add (Term.body_type T :: Term.binder_types T) + | add @{typ bool} = I + | add (T as Type (n, Ts)) = (fn (dss, ctxt1) => + if declared declss T orelse declared' dss T then (dss, ctxt1) + else if Old_SMT_Builtin.is_builtin_typ_ext ctxt1 T then (dss, ctxt1) + else + (case get_decls T n Ts ctxt1 of + ([], _) => (dss, ctxt1) + | (ds, ctxt2) => + let + val constrTs = + maps (map (snd o Term.dest_Const o fst) o snd) ds + val Us = fold (union (op =) o Term.binder_types) constrTs [] + + fun ins [] = [(Us, ds)] + | ins ((Uds as (Us', _)) :: Udss) = + if depends Us' ds then (Us, ds) :: Uds :: Udss + else Uds :: ins Udss + in fold add Us (ins dss, ctxt2) end)) + in add T ([], ctxt) |>> append declss o map snd end + +end diff --git a/src/main/Old_SMT/old_smt_failure.ML b/src/main/Old_SMT/old_smt_failure.ML new file mode 100644 index 0000000..394287c --- /dev/null +++ b/src/main/Old_SMT/old_smt_failure.ML @@ -0,0 +1,61 @@ +(* Title: HOL/Library/Old_SMT/old_smt_failure.ML + Author: Sascha Boehme, TU Muenchen + +Failures and exception of SMT. +*) + +signature OLD_SMT_FAILURE = +sig + type counterexample = { + is_real_cex: bool, + free_constraints: term list, + const_defs: term list} + datatype failure = + Counterexample of counterexample | + Time_Out | + Out_Of_Memory | + Abnormal_Termination of int | + Other_Failure of string + val pretty_counterexample: Proof.context -> counterexample -> Pretty.T + val string_of_failure: Proof.context -> failure -> string + exception SMT of failure +end + +structure Old_SMT_Failure: OLD_SMT_FAILURE = +struct + +type counterexample = { + is_real_cex: bool, + free_constraints: term list, + const_defs: term list} + +datatype failure = + Counterexample of counterexample | + Time_Out | + Out_Of_Memory | + Abnormal_Termination of int | + Other_Failure of string + +fun pretty_counterexample ctxt {is_real_cex, free_constraints, const_defs} = + let + val msg = + if is_real_cex then "Counterexample found (possibly spurious)" + else "Potential counterexample found" + in + if null free_constraints andalso null const_defs then Pretty.str msg + else + Pretty.big_list (msg ^ ":") + (map (Syntax.pretty_term ctxt) (free_constraints @ const_defs)) + end + +fun string_of_failure ctxt (Counterexample cex) = + Pretty.string_of (pretty_counterexample ctxt cex) + | string_of_failure _ Time_Out = "Timed out" + | string_of_failure _ Out_Of_Memory = "Ran out of memory" + | string_of_failure _ (Abnormal_Termination err) = + "Solver terminated abnormally with error code " ^ string_of_int err + | string_of_failure _ (Other_Failure msg) = msg + +exception SMT of failure + +end diff --git a/src/main/Old_SMT/old_smt_normalize.ML b/src/main/Old_SMT/old_smt_normalize.ML new file mode 100644 index 0000000..18cf0b7 --- /dev/null +++ b/src/main/Old_SMT/old_smt_normalize.ML @@ -0,0 +1,652 @@ +(* Title: HOL/Library/Old_SMT/old_smt_normalize.ML + Author: Sascha Boehme, TU Muenchen + +Normalization steps on theorems required by SMT solvers. +*) + +signature OLD_SMT_NORMALIZE = +sig + val drop_fact_warning: Proof.context -> thm -> unit + val atomize_conv: Proof.context -> conv + type extra_norm = Proof.context -> thm list * thm list -> thm list * thm list + val add_extra_norm: Old_SMT_Utils.class * extra_norm -> Context.generic -> + Context.generic + val normalize: (int * (int option * thm)) list -> Proof.context -> + (int * thm) list * Proof.context + val setup: theory -> theory +end + +structure Old_SMT_Normalize: OLD_SMT_NORMALIZE = +struct + +fun drop_fact_warning ctxt = + Old_SMT_Config.verbose_msg ctxt (prefix "Warning: dropping assumption: " o + Thm.string_of_thm ctxt) + + +(* general theorem normalizations *) + +(** instantiate elimination rules **) + +local + val (cpfalse, cfalse) = + `Old_SMT_Utils.mk_cprop (Thm.cterm_of @{context} @{const False}) + + fun inst f ct thm = + let val cv = f (Drule.strip_imp_concl (Thm.cprop_of thm)) + in Thm.instantiate ([], [(dest_Var (Thm.term_of cv), ct)]) thm end +in + +fun instantiate_elim thm = + (case Thm.concl_of thm of + @{const Trueprop} $ Var (_, @{typ bool}) => inst Thm.dest_arg cfalse thm + | Var _ => inst I cpfalse thm + | _ => thm) + +end + + +(** normalize definitions **) + +fun norm_def thm = + (case Thm.prop_of thm of + @{const Trueprop} $ (Const (@{const_name HOL.eq}, _) $ _ $ Abs _) => + norm_def (thm RS @{thm fun_cong}) + | Const (@{const_name Pure.eq}, _) $ _ $ Abs _ => + norm_def (thm RS @{thm meta_eq_to_obj_eq}) + | _ => thm) + + +(** atomization **) + +fun atomize_conv ctxt ct = + (case Thm.term_of ct of + @{const Pure.imp} $ _ $ _ => + Conv.binop_conv (atomize_conv ctxt) then_conv + Conv.rewr_conv @{thm atomize_imp} + | Const (@{const_name Pure.eq}, _) $ _ $ _ => + Conv.binop_conv (atomize_conv ctxt) then_conv + Conv.rewr_conv @{thm atomize_eq} + | Const (@{const_name Pure.all}, _) $ Abs _ => + Conv.binder_conv (atomize_conv o snd) ctxt then_conv + Conv.rewr_conv @{thm atomize_all} + | _ => Conv.all_conv) ct + +val setup_atomize = + fold Old_SMT_Builtin.add_builtin_fun_ext'' [@{const_name Pure.imp}, + @{const_name Pure.eq}, @{const_name Pure.all}, @{const_name Trueprop}] + + +(** unfold special quantifiers **) + +local + val ex1_def = mk_meta_eq @{lemma + "Ex1 = (%P. EX x. P x & (ALL y. P y --> y = x))" + by (rule ext) (simp only: Ex1_def)} + + val ball_def = mk_meta_eq @{lemma "Ball = (%A P. ALL x. x : A --> P x)" + by (rule ext)+ (rule Ball_def)} + + val bex_def = mk_meta_eq @{lemma "Bex = (%A P. EX x. x : A & P x)" + by (rule ext)+ (rule Bex_def)} + + val special_quants = [(@{const_name Ex1}, ex1_def), + (@{const_name Ball}, ball_def), (@{const_name Bex}, bex_def)] + + fun special_quant (Const (n, _)) = AList.lookup (op =) special_quants n + | special_quant _ = NONE + + fun special_quant_conv _ ct = + (case special_quant (Thm.term_of ct) of + SOME thm => Conv.rewr_conv thm + | NONE => Conv.all_conv) ct +in + +fun unfold_special_quants_conv ctxt = + Old_SMT_Utils.if_exists_conv (is_some o special_quant) + (Conv.top_conv special_quant_conv ctxt) + +val setup_unfolded_quants = + fold (Old_SMT_Builtin.add_builtin_fun_ext'' o fst) special_quants + +end + + +(** trigger inference **) + +local + (*** check trigger syntax ***) + + fun dest_trigger (Const (@{const_name pat}, _) $ _) = SOME true + | dest_trigger (Const (@{const_name nopat}, _) $ _) = SOME false + | dest_trigger _ = NONE + + fun eq_list [] = false + | eq_list (b :: bs) = forall (equal b) bs + + fun proper_trigger t = + t + |> these o try HOLogic.dest_list + |> map (map_filter dest_trigger o these o try HOLogic.dest_list) + |> (fn [] => false | bss => forall eq_list bss) + + fun proper_quant inside f t = + (case t of + Const (@{const_name All}, _) $ Abs (_, _, u) => proper_quant true f u + | Const (@{const_name Ex}, _) $ Abs (_, _, u) => proper_quant true f u + | @{const trigger} $ p $ u => + (if inside then f p else false) andalso proper_quant false f u + | Abs (_, _, u) => proper_quant false f u + | u1 $ u2 => proper_quant false f u1 andalso proper_quant false f u2 + | _ => true) + + fun check_trigger_error ctxt t = + error ("SMT triggers must only occur under quantifier and multipatterns " ^ + "must have the same kind: " ^ Syntax.string_of_term ctxt t) + + fun check_trigger_conv ctxt ct = + if proper_quant false proper_trigger (Old_SMT_Utils.term_of ct) then + Conv.all_conv ct + else check_trigger_error ctxt (Thm.term_of ct) + + + (*** infer simple triggers ***) + + fun dest_cond_eq ct = + (case Thm.term_of ct of + Const (@{const_name HOL.eq}, _) $ _ $ _ => Thm.dest_binop ct + | @{const HOL.implies} $ _ $ _ => dest_cond_eq (Thm.dest_arg ct) + | _ => raise CTERM ("no equation", [ct])) + + fun get_constrs thy (Type (n, _)) = these (Old_Datatype_Data.get_constrs thy n) + | get_constrs _ _ = [] + + fun is_constr thy (n, T) = + let fun match (m, U) = m = n andalso Sign.typ_instance thy (T, U) + in can (the o find_first match o get_constrs thy o Term.body_type) T end + + fun is_constr_pat thy t = + (case Term.strip_comb t of + (Free _, []) => true + | (Const c, ts) => is_constr thy c andalso forall (is_constr_pat thy) ts + | _ => false) + + fun is_simp_lhs ctxt t = + (case Term.strip_comb t of + (Const c, ts as _ :: _) => + not (Old_SMT_Builtin.is_builtin_fun_ext ctxt c ts) andalso + forall (is_constr_pat (Proof_Context.theory_of ctxt)) ts + | _ => false) + + fun has_all_vars vs t = + subset (op aconv) (vs, map Free (Term.add_frees t [])) + + fun minimal_pats vs ct = + if has_all_vars vs (Thm.term_of ct) then + (case Thm.term_of ct of + _ $ _ => + (case apply2 (minimal_pats vs) (Thm.dest_comb ct) of + ([], []) => [[ct]] + | (ctss, ctss') => union (eq_set (op aconvc)) ctss ctss') + | _ => []) + else [] + + fun proper_mpat _ _ _ [] = false + | proper_mpat thy gen u cts = + let + val tps = (op ~~) (`gen (map Thm.term_of cts)) + fun some_match u = tps |> exists (fn (t', t) => + Pattern.matches thy (t', u) andalso not (t aconv u)) + in not (Term.exists_subterm some_match u) end + + val pat = + Old_SMT_Utils.mk_const_pat @{theory} @{const_name pat} Old_SMT_Utils.destT1 + fun mk_pat ct = Thm.apply (Old_SMT_Utils.instT' ct pat) ct + + fun mk_clist T = apply2 (Thm.cterm_of @{context}) (HOLogic.cons_const T, HOLogic.nil_const T) + fun mk_list (ccons, cnil) f cts = fold_rev (Thm.mk_binop ccons o f) cts cnil + val mk_pat_list = mk_list (mk_clist @{typ pattern}) + val mk_mpat_list = mk_list (mk_clist @{typ "pattern list"}) + fun mk_trigger ctss = mk_mpat_list (mk_pat_list mk_pat) ctss + + val trigger_eq = + mk_meta_eq @{lemma "p = trigger t p" by (simp add: trigger_def)} + + fun insert_trigger_conv [] ct = Conv.all_conv ct + | insert_trigger_conv ctss ct = + let val (ctr, cp) = Thm.dest_binop (Thm.rhs_of trigger_eq) ||> rpair ct + in Thm.instantiate ([], map (apfst (dest_Var o Thm.term_of)) [cp, (ctr, mk_trigger ctss)]) trigger_eq end + + fun infer_trigger_eq_conv outer_ctxt (ctxt, cvs) ct = + let + val (lhs, rhs) = dest_cond_eq ct + + val vs = map Thm.term_of cvs + val thy = Proof_Context.theory_of ctxt + + fun get_mpats ct = + if is_simp_lhs ctxt (Thm.term_of ct) then minimal_pats vs ct + else [] + val gen = Variable.export_terms ctxt outer_ctxt + val filter_mpats = filter (proper_mpat thy gen (Thm.term_of rhs)) + + in insert_trigger_conv (filter_mpats (get_mpats lhs)) ct end + + fun has_trigger (@{const trigger} $ _ $ _) = true + | has_trigger _ = false + + fun try_trigger_conv cv ct = + if Old_SMT_Utils.under_quant has_trigger (Old_SMT_Utils.term_of ct) then + Conv.all_conv ct + else Conv.try_conv cv ct + + fun infer_trigger_conv ctxt = + if Config.get ctxt Old_SMT_Config.infer_triggers then + try_trigger_conv + (Old_SMT_Utils.under_quant_conv (infer_trigger_eq_conv ctxt) ctxt) + else Conv.all_conv +in + +fun trigger_conv ctxt = + Old_SMT_Utils.prop_conv + (check_trigger_conv ctxt then_conv infer_trigger_conv ctxt) + +val setup_trigger = + fold Old_SMT_Builtin.add_builtin_fun_ext'' + [@{const_name pat}, @{const_name nopat}, @{const_name trigger}] + +end + + +(** adding quantifier weights **) + +local + (*** check weight syntax ***) + + val has_no_weight = + not o Term.exists_subterm (fn @{const weight} => true | _ => false) + + fun is_weight (@{const weight} $ w $ t) = + (case try HOLogic.dest_number w of + SOME (_, i) => i >= 0 andalso has_no_weight t + | _ => false) + | is_weight t = has_no_weight t + + fun proper_trigger (@{const trigger} $ _ $ t) = is_weight t + | proper_trigger t = is_weight t + + fun check_weight_error ctxt t = + error ("SMT weight must be a non-negative number and must only occur " ^ + "under the top-most quantifier and an optional trigger: " ^ + Syntax.string_of_term ctxt t) + + fun check_weight_conv ctxt ct = + if Old_SMT_Utils.under_quant proper_trigger (Old_SMT_Utils.term_of ct) then + Conv.all_conv ct + else check_weight_error ctxt (Thm.term_of ct) + + + (*** insertion of weights ***) + + fun under_trigger_conv cv ct = + (case Thm.term_of ct of + @{const trigger} $ _ $ _ => Conv.arg_conv cv + | _ => cv) ct + + val weight_eq = + mk_meta_eq @{lemma "p = weight i p" by (simp add: weight_def)} + fun mk_weight_eq w = + let val cv = Thm.dest_arg1 (Thm.rhs_of weight_eq) + in + Thm.instantiate ([], [(dest_Var (Thm.term_of cv), Numeral.mk_cnumber @{ctyp int} w)]) + weight_eq + end + + fun add_weight_conv NONE _ = Conv.all_conv + | add_weight_conv (SOME weight) ctxt = + let val cv = Conv.rewr_conv (mk_weight_eq weight) + in Old_SMT_Utils.under_quant_conv (K (under_trigger_conv cv)) ctxt end +in + +fun weight_conv weight ctxt = + Old_SMT_Utils.prop_conv + (check_weight_conv ctxt then_conv add_weight_conv weight ctxt) + +val setup_weight = Old_SMT_Builtin.add_builtin_fun_ext'' @{const_name weight} + +end + + +(** combined general normalizations **) + +fun gen_normalize1_conv ctxt weight = + atomize_conv ctxt then_conv + unfold_special_quants_conv ctxt then_conv + Thm.beta_conversion true then_conv + trigger_conv ctxt then_conv + weight_conv weight ctxt + +fun gen_normalize1 ctxt weight thm = + thm + |> instantiate_elim + |> norm_def + |> Conv.fconv_rule (Thm.beta_conversion true then_conv Thm.eta_conversion) + |> Drule.forall_intr_vars + |> Conv.fconv_rule (gen_normalize1_conv ctxt weight) + +fun gen_norm1_safe ctxt (i, (weight, thm)) = + (case try (gen_normalize1 ctxt weight) thm of + SOME thm' => SOME (i, thm') + | NONE => (drop_fact_warning ctxt thm; NONE)) + +fun gen_normalize ctxt iwthms = map_filter (gen_norm1_safe ctxt) iwthms + + + +(* unfolding of definitions and theory-specific rewritings *) + +fun expand_head_conv cv ct = + (case Thm.term_of ct of + _ $ _ => + Conv.fun_conv (expand_head_conv cv) then_conv + Conv.try_conv (Thm.beta_conversion false) + | _ => cv) ct + + +(** rewrite bool case expressions as if expressions **) + +local + fun is_case_bool (Const (@{const_name "bool.case_bool"}, _)) = true + | is_case_bool _ = false + + val thm = mk_meta_eq @{lemma + "case_bool = (%x y P. if P then x else y)" by (rule ext)+ simp} + + fun unfold_conv _ = + Old_SMT_Utils.if_true_conv (is_case_bool o Term.head_of) + (expand_head_conv (Conv.rewr_conv thm)) +in + +fun rewrite_case_bool_conv ctxt = + Old_SMT_Utils.if_exists_conv is_case_bool (Conv.top_conv unfold_conv ctxt) + +val setup_case_bool = + Old_SMT_Builtin.add_builtin_fun_ext'' @{const_name "bool.case_bool"} + +end + + +(** unfold abs, min and max **) + +local + val abs_def = mk_meta_eq @{lemma + "abs = (%a::'a::abs_if. if a < 0 then - a else a)" + by (rule ext) (rule abs_if)} + + val min_def = mk_meta_eq @{lemma "min = (%a b. if a <= b then a else b)" + by (rule ext)+ (rule min_def)} + + val max_def = mk_meta_eq @{lemma "max = (%a b. if a <= b then b else a)" + by (rule ext)+ (rule max_def)} + + val defs = [(@{const_name min}, min_def), (@{const_name max}, max_def), + (@{const_name abs}, abs_def)] + + fun is_builtinT ctxt T = + Old_SMT_Builtin.is_builtin_typ_ext ctxt (Term.domain_type T) + + fun abs_min_max ctxt (Const (n, T)) = + (case AList.lookup (op =) defs n of + NONE => NONE + | SOME thm => if is_builtinT ctxt T then SOME thm else NONE) + | abs_min_max _ _ = NONE + + fun unfold_amm_conv ctxt ct = + (case abs_min_max ctxt (Term.head_of (Thm.term_of ct)) of + SOME thm => expand_head_conv (Conv.rewr_conv thm) + | NONE => Conv.all_conv) ct +in + +fun unfold_abs_min_max_conv ctxt = + Old_SMT_Utils.if_exists_conv (is_some o abs_min_max ctxt) + (Conv.top_conv unfold_amm_conv ctxt) + +val setup_abs_min_max = fold (Old_SMT_Builtin.add_builtin_fun_ext'' o fst) defs + +end + + +(** embedding of standard natural number operations into integer operations **) + +local + val nat_embedding = @{lemma + "ALL n. nat (int n) = n" + "ALL i. i >= 0 --> int (nat i) = i" + "ALL i. i < 0 --> int (nat i) = 0" + by simp_all} + + val simple_nat_ops = [ + @{const less (nat)}, @{const less_eq (nat)}, + @{const Suc}, @{const plus (nat)}, @{const minus (nat)}] + + val mult_nat_ops = + [@{const times (nat)}, @{const divide (nat)}, @{const modulo (nat)}] + + val nat_ops = simple_nat_ops @ mult_nat_ops + + val nat_consts = nat_ops @ [@{const numeral (nat)}, + @{const zero_class.zero (nat)}, @{const one_class.one (nat)}] + + val nat_int_coercions = [@{const of_nat (int)}, @{const nat}] + + val builtin_nat_ops = nat_int_coercions @ simple_nat_ops + + val is_nat_const = member (op aconv) nat_consts + + fun is_nat_const' @{const of_nat (int)} = true + | is_nat_const' t = is_nat_const t + + val expands = map mk_meta_eq @{lemma + "0 = nat 0" + "1 = nat 1" + "(numeral :: num => nat) = (%i. nat (numeral i))" + "op < = (%a b. int a < int b)" + "op <= = (%a b. int a <= int b)" + "Suc = (%a. nat (int a + 1))" + "op + = (%a b. nat (int a + int b))" + "op - = (%a b. nat (int a - int b))" + "op * = (%a b. nat (int a * int b))" + "op div = (%a b. nat (int a div int b))" + "op mod = (%a b. nat (int a mod int b))" + by (fastforce simp add: nat_mult_distrib nat_div_distrib nat_mod_distrib)+} + + val ints = map mk_meta_eq @{lemma + "int 0 = 0" + "int 1 = 1" + "int (Suc n) = int n + 1" + "int (n + m) = int n + int m" + "int (n - m) = int (nat (int n - int m))" + "int (n * m) = int n * int m" + "int (n div m) = int n div int m" + "int (n mod m) = int n mod int m" + by (auto simp add: of_nat_mult zdiv_int zmod_int)} + + val int_if = mk_meta_eq @{lemma + "int (if P then n else m) = (if P then int n else int m)" + by simp} + + fun mk_number_eq ctxt i lhs = + let + val eq = Old_SMT_Utils.mk_cequals lhs (Numeral.mk_cnumber @{ctyp int} i) + val tac = + Simplifier.simp_tac (put_simpset HOL_ss ctxt addsimps [@{thm of_nat_numeral}]) 1 + in Goal.norm_result ctxt (Goal.prove_internal ctxt [] eq (K tac)) end + + fun ite_conv cv1 cv2 = + Conv.combination_conv (Conv.combination_conv (Conv.arg_conv cv1) cv2) cv2 + + fun int_conv ctxt ct = + (case Thm.term_of ct of + @{const of_nat (int)} $ (n as (@{const numeral (nat)} $ _)) => + Conv.rewr_conv (mk_number_eq ctxt (snd (HOLogic.dest_number n)) ct) + | @{const of_nat (int)} $ _ => + (Conv.rewrs_conv ints then_conv Conv.sub_conv ints_conv ctxt) else_conv + (Conv.rewr_conv int_if then_conv + ite_conv (nat_conv ctxt) (int_conv ctxt)) else_conv + Conv.sub_conv (Conv.top_sweep_conv nat_conv) ctxt + | _ => Conv.no_conv) ct + + and ints_conv ctxt = Conv.top_sweep_conv int_conv ctxt + + and expand_conv ctxt = + Old_SMT_Utils.if_conv (is_nat_const o Term.head_of) + (expand_head_conv (Conv.rewrs_conv expands) then_conv ints_conv ctxt) + (int_conv ctxt) + + and nat_conv ctxt = Old_SMT_Utils.if_exists_conv is_nat_const' + (Conv.top_sweep_conv expand_conv ctxt) + + val uses_nat_int = Term.exists_subterm (member (op aconv) nat_int_coercions) +in + +val nat_as_int_conv = nat_conv + +fun add_nat_embedding thms = + if exists (uses_nat_int o Thm.prop_of) thms then (thms, nat_embedding) + else (thms, []) + +val setup_nat_as_int = + Old_SMT_Builtin.add_builtin_typ_ext (@{typ nat}, K true) #> + fold (Old_SMT_Builtin.add_builtin_fun_ext' o Term.dest_Const) builtin_nat_ops + +end + + +(** normalize numerals **) + +local + (* + rewrite Numeral1 into 1 + rewrite - 0 into 0 + *) + + fun is_irregular_number (Const (@{const_name numeral}, _) $ Const (@{const_name num.One}, _)) = + true + | is_irregular_number (Const (@{const_name uminus}, _) $ Const (@{const_name Groups.zero}, _)) = + true + | is_irregular_number _ = + false; + + fun is_strange_number ctxt t = is_irregular_number t andalso Old_SMT_Builtin.is_builtin_num ctxt t; + + val proper_num_ss = + simpset_of (put_simpset HOL_ss @{context} + addsimps @{thms Num.numeral_One minus_zero}) + + fun norm_num_conv ctxt = + Old_SMT_Utils.if_conv (is_strange_number ctxt) + (Simplifier.rewrite (put_simpset proper_num_ss ctxt)) Conv.no_conv +in + +fun normalize_numerals_conv ctxt = + Old_SMT_Utils.if_exists_conv (is_strange_number ctxt) + (Conv.top_sweep_conv norm_num_conv ctxt) + +end + + +(** combined unfoldings and rewritings **) + +fun unfold_conv ctxt = + rewrite_case_bool_conv ctxt then_conv + unfold_abs_min_max_conv ctxt then_conv + nat_as_int_conv ctxt then_conv + Thm.beta_conversion true + +fun unfold1 ctxt = map (apsnd (Conv.fconv_rule (unfold_conv ctxt))) + +fun burrow_ids f ithms = + let + val (is, thms) = split_list ithms + val (thms', extra_thms) = f thms + in (is ~~ thms') @ map (pair ~1) extra_thms end + +fun unfold2 ctxt ithms = + ithms + |> map (apsnd (Conv.fconv_rule (normalize_numerals_conv ctxt))) + |> burrow_ids add_nat_embedding + + + +(* overall normalization *) + +type extra_norm = Proof.context -> thm list * thm list -> thm list * thm list + +structure Extra_Norms = Generic_Data +( + type T = extra_norm Old_SMT_Utils.dict + val empty = [] + val extend = I + fun merge data = Old_SMT_Utils.dict_merge fst data +) + +fun add_extra_norm (cs, norm) = + Extra_Norms.map (Old_SMT_Utils.dict_update (cs, norm)) + +fun apply_extra_norms ctxt ithms = + let + val cs = Old_SMT_Config.solver_class_of ctxt + val es = Old_SMT_Utils.dict_lookup (Extra_Norms.get (Context.Proof ctxt)) cs + in burrow_ids (fold (fn e => e ctxt) es o rpair []) ithms end + +local + val ignored = member (op =) [@{const_name All}, @{const_name Ex}, + @{const_name Let}, @{const_name If}, @{const_name HOL.eq}] + + val schematic_consts_of = + let + fun collect (@{const trigger} $ p $ t) = + collect_trigger p #> collect t + | collect (t $ u) = collect t #> collect u + | collect (Abs (_, _, t)) = collect t + | collect (t as Const (n, _)) = + if not (ignored n) then Monomorph.add_schematic_consts_of t else I + | collect _ = I + and collect_trigger t = + let val dest = these o try HOLogic.dest_list + in fold (fold collect_pat o dest) (dest t) end + and collect_pat (Const (@{const_name pat}, _) $ t) = collect t + | collect_pat (Const (@{const_name nopat}, _) $ t) = collect t + | collect_pat _ = I + in (fn t => collect t Symtab.empty) end +in + +fun monomorph ctxt xthms = + let val (xs, thms) = split_list xthms + in + map (pair 1) thms + |> Monomorph.monomorph schematic_consts_of ctxt + |> maps (uncurry (map o pair)) o map2 pair xs o map (map snd) + end + +end + +fun normalize iwthms ctxt = + iwthms + |> gen_normalize ctxt + |> unfold1 ctxt + |> monomorph ctxt + |> unfold2 ctxt + |> apply_extra_norms ctxt + |> rpair ctxt + +val setup = Context.theory_map ( + setup_atomize #> + setup_unfolded_quants #> + setup_trigger #> + setup_weight #> + setup_case_bool #> + setup_abs_min_max #> + setup_nat_as_int) + +end diff --git a/src/main/Old_SMT/old_smt_real.ML b/src/main/Old_SMT/old_smt_real.ML new file mode 100644 index 0000000..6a2a793 --- /dev/null +++ b/src/main/Old_SMT/old_smt_real.ML @@ -0,0 +1,134 @@ +(* Title: HOL/Library/Old_SMT/old_smt_real.ML + Author: Sascha Boehme, TU Muenchen + +SMT setup for reals. +*) + +structure Old_SMT_Real: sig end = +struct + + +(* SMT-LIB logic *) + +fun smtlib_logic ts = + if exists (Term.exists_type (Term.exists_subtype (equal @{typ real}))) ts + then SOME "AUFLIRA" + else NONE + + +(* SMT-LIB and Z3 built-ins *) + +local + fun real_num _ i = SOME (string_of_int i ^ ".0") + + fun is_linear [t] = Old_SMT_Utils.is_number t + | is_linear [t, u] = Old_SMT_Utils.is_number t orelse Old_SMT_Utils.is_number u + | is_linear _ = false + + fun mk_times ts = Term.list_comb (@{const times (real)}, ts) + + fun times _ _ ts = if is_linear ts then SOME ("*", 2, ts, mk_times) else NONE +in + +val setup_builtins = + Old_SMT_Builtin.add_builtin_typ Old_SMTLIB_Interface.smtlibC + (@{typ real}, K (SOME "Real"), real_num) #> + fold (Old_SMT_Builtin.add_builtin_fun' Old_SMTLIB_Interface.smtlibC) [ + (@{const less (real)}, "<"), + (@{const less_eq (real)}, "<="), + (@{const uminus (real)}, "~"), + (@{const plus (real)}, "+"), + (@{const minus (real)}, "-") ] #> + Old_SMT_Builtin.add_builtin_fun Old_SMTLIB_Interface.smtlibC + (Term.dest_Const @{const times (real)}, times) #> + Old_SMT_Builtin.add_builtin_fun' Old_Z3_Interface.smtlib_z3C + (@{const times (real)}, "*") #> + Old_SMT_Builtin.add_builtin_fun' Old_Z3_Interface.smtlib_z3C + (@{const divide (real)}, "/") + +end + + +(* Z3 constructors *) + +local + fun z3_mk_builtin_typ (Old_Z3_Interface.Sym ("Real", _)) = SOME @{typ real} + | z3_mk_builtin_typ (Old_Z3_Interface.Sym ("real", _)) = SOME @{typ real} + (*FIXME: delete*) + | z3_mk_builtin_typ _ = NONE + + fun z3_mk_builtin_num _ i T = + if T = @{typ real} then SOME (Numeral.mk_cnumber @{ctyp real} i) + else NONE + + fun mk_nary _ cu [] = cu + | mk_nary ct _ cts = uncurry (fold_rev (Thm.mk_binop ct)) (split_last cts) + + val mk_uminus = Thm.apply (Thm.cterm_of @{context} @{const uminus (real)}) + val add = Thm.cterm_of @{context} @{const plus (real)} + val real0 = Numeral.mk_cnumber @{ctyp real} 0 + val mk_sub = Thm.mk_binop (Thm.cterm_of @{context} @{const minus (real)}) + val mk_mul = Thm.mk_binop (Thm.cterm_of @{context} @{const times (real)}) + val mk_div = Thm.mk_binop (Thm.cterm_of @{context} @{const divide (real)}) + val mk_lt = Thm.mk_binop (Thm.cterm_of @{context} @{const less (real)}) + val mk_le = Thm.mk_binop (Thm.cterm_of @{context} @{const less_eq (real)}) + + fun z3_mk_builtin_fun (Old_Z3_Interface.Sym ("-", _)) [ct] = SOME (mk_uminus ct) + | z3_mk_builtin_fun (Old_Z3_Interface.Sym ("+", _)) cts = + SOME (mk_nary add real0 cts) + | z3_mk_builtin_fun (Old_Z3_Interface.Sym ("-", _)) [ct, cu] = + SOME (mk_sub ct cu) + | z3_mk_builtin_fun (Old_Z3_Interface.Sym ("*", _)) [ct, cu] = + SOME (mk_mul ct cu) + | z3_mk_builtin_fun (Old_Z3_Interface.Sym ("/", _)) [ct, cu] = + SOME (mk_div ct cu) + | z3_mk_builtin_fun (Old_Z3_Interface.Sym ("<", _)) [ct, cu] = + SOME (mk_lt ct cu) + | z3_mk_builtin_fun (Old_Z3_Interface.Sym ("<=", _)) [ct, cu] = + SOME (mk_le ct cu) + | z3_mk_builtin_fun (Old_Z3_Interface.Sym (">", _)) [ct, cu] = + SOME (mk_lt cu ct) + | z3_mk_builtin_fun (Old_Z3_Interface.Sym (">=", _)) [ct, cu] = + SOME (mk_le cu ct) + | z3_mk_builtin_fun _ _ = NONE +in + +val z3_mk_builtins = { + mk_builtin_typ = z3_mk_builtin_typ, + mk_builtin_num = z3_mk_builtin_num, + mk_builtin_fun = (fn _ => fn sym => fn cts => + (case try (Thm.typ_of_cterm o hd) cts of + SOME @{typ real} => z3_mk_builtin_fun sym cts + | _ => NONE)) } + +end + + +(* Z3 proof reconstruction *) + +val real_rules = @{lemma + "0 + (x::real) = x" + "x + 0 = x" + "0 * x = 0" + "1 * x = x" + "x + y = y + x" + by auto} + +val real_linarith_proc = + Simplifier.make_simproc @{context} "fast_real_arith" + {lhss = [@{term "(m::real) < n"}, @{term "(m::real) \ n"}, @{term "(m::real) = n"}], + proc = K Lin_Arith.simproc} + + +(* setup *) + +val _ = + Theory.setup + (Context.theory_map ( + Old_SMTLIB_Interface.add_logic (10, smtlib_logic) #> + setup_builtins #> + Old_Z3_Interface.add_mk_builtins z3_mk_builtins #> + fold Old_Z3_Proof_Reconstruction.add_z3_rule real_rules #> + Old_Z3_Proof_Tools.add_simproc real_linarith_proc)) + +end diff --git a/src/main/Old_SMT/old_smt_setup_solvers.ML b/src/main/Old_SMT/old_smt_setup_solvers.ML new file mode 100644 index 0000000..15e01db --- /dev/null +++ b/src/main/Old_SMT/old_smt_setup_solvers.ML @@ -0,0 +1,189 @@ +(* Title: HOL/Library/Old_SMT/old_smt_setup_solvers.ML + Author: Sascha Boehme, TU Muenchen + +Setup SMT solvers. +*) + +signature OLD_SMT_SETUP_SOLVERS = +sig + datatype z3_non_commercial = + Z3_Non_Commercial_Unknown | + Z3_Non_Commercial_Accepted | + Z3_Non_Commercial_Declined + val z3_non_commercial: unit -> z3_non_commercial + val z3_with_extensions: bool Config.T + val setup: theory -> theory +end + +structure Old_SMT_Setup_Solvers: OLD_SMT_SETUP_SOLVERS = +struct + +(* helper functions *) + +fun make_avail name () = getenv ("OLD_" ^ name ^ "_SOLVER") <> "" +fun make_command name () = [getenv ("OLD_" ^ name ^ "_SOLVER")] + +fun outcome_of unsat sat unknown solver_name line = + if String.isPrefix unsat line then Old_SMT_Solver.Unsat + else if String.isPrefix sat line then Old_SMT_Solver.Sat + else if String.isPrefix unknown line then Old_SMT_Solver.Unknown + else raise Old_SMT_Failure.SMT (Old_SMT_Failure.Other_Failure ("Solver " ^ + quote solver_name ^ " failed. Enable SMT tracing by setting the " ^ + "configuration option " ^ quote (Config.name_of Old_SMT_Config.trace) ^ " and " ^ + "see the trace for details.")) + +fun on_first_line test_outcome solver_name lines = + let + val empty_line = (fn "" => true | _ => false) + val split_first = (fn [] => ("", []) | l :: ls => (l, ls)) + val (l, ls) = split_first (snd (take_prefix empty_line lines)) + in (test_outcome solver_name l, ls) end + + +(* CVC3 *) + +local + fun cvc3_options ctxt = [ + "-seed", string_of_int (Config.get ctxt Old_SMT_Config.random_seed), + "-lang", "smtlib", "-output-lang", "presentation", + "-timeout", string_of_int (Real.ceil (Config.get ctxt Old_SMT_Config.timeout))] +in + +val cvc3: Old_SMT_Solver.solver_config = { + name = "cvc3", + class = K Old_SMTLIB_Interface.smtlibC, + avail = make_avail "CVC3", + command = make_command "CVC3", + options = cvc3_options, + default_max_relevant = 400 (* FUDGE *), + supports_filter = false, + outcome = + on_first_line (outcome_of "Unsatisfiable." "Satisfiable." "Unknown."), + cex_parser = NONE, + reconstruct = NONE } + +end + + +(* Yices *) + +val yices: Old_SMT_Solver.solver_config = { + name = "yices", + class = K Old_SMTLIB_Interface.smtlibC, + avail = make_avail "YICES", + command = make_command "YICES", + options = (fn ctxt => [ + "--rand-seed=" ^ string_of_int (Config.get ctxt Old_SMT_Config.random_seed), + "--timeout=" ^ + string_of_int (Real.ceil (Config.get ctxt Old_SMT_Config.timeout)), + "--smtlib"]), + default_max_relevant = 350 (* FUDGE *), + supports_filter = false, + outcome = on_first_line (outcome_of "unsat" "sat" "unknown"), + cex_parser = NONE, + reconstruct = NONE } + + +(* Z3 *) + +datatype z3_non_commercial = + Z3_Non_Commercial_Unknown | + Z3_Non_Commercial_Accepted | + Z3_Non_Commercial_Declined + + +local + val accepted = member (op =) ["yes", "Yes", "YES"] + val declined = member (op =) ["no", "No", "NO"] +in + +fun z3_non_commercial () = + let + val flag2 = getenv "OLD_Z3_NON_COMMERCIAL" + in + if accepted flag2 then Z3_Non_Commercial_Accepted + else if declined flag2 then Z3_Non_Commercial_Declined + else Z3_Non_Commercial_Unknown + end + +fun if_z3_non_commercial f = + (case z3_non_commercial () of + Z3_Non_Commercial_Accepted => f () + | Z3_Non_Commercial_Declined => + error (Pretty.string_of (Pretty.para + "The SMT solver Z3 may only be used for non-commercial applications.")) + | Z3_Non_Commercial_Unknown => + error + (Pretty.string_of + (Pretty.para + ("The SMT solver Z3 is not activated. To activate it, set the Isabelle \ + \system option \"z3_non_commercial\" to \"yes\" (e.g. via \ + \the Isabelle/jEdit menu Plugin Options / Isabelle / General).")) ^ + "\n\nSee also " ^ Url.print (Url.explode "http://z3.codeplex.com/license"))) + +end + + +val z3_with_extensions = + Attrib.setup_config_bool @{binding old_z3_with_extensions} (K false) + +local + fun z3_make_command name () = if_z3_non_commercial (make_command name) + + fun z3_options ctxt = + ["-rs:" ^ string_of_int (Config.get ctxt Old_SMT_Config.random_seed), + "MODEL=true", + "SOFT_TIMEOUT=" ^ + string_of_int (Real.ceil (1000.0 * Config.get ctxt Old_SMT_Config.timeout)), + "-smt"] + |> not (Config.get ctxt Old_SMT_Config.oracle) ? + append ["DISPLAY_PROOF=true", "PROOF_MODE=2"] + + fun z3_on_first_or_last_line solver_name lines = + let + fun junk l = + if String.isPrefix "WARNING: Out of allocated virtual memory" l + then raise Old_SMT_Failure.SMT Old_SMT_Failure.Out_Of_Memory + else + String.isPrefix "WARNING" l orelse + String.isPrefix "ERROR" l orelse + forall Symbol.is_ascii_blank (Symbol.explode l) + val lines = filter_out junk lines + fun outcome split = + the_default ("", []) (try split lines) + |>> outcome_of "unsat" "sat" "unknown" solver_name + in + (* Starting with version 4.0, Z3 puts the outcome on the first line of the + output rather than on the last line. *) + outcome (fn lines => (hd lines, tl lines)) + handle Old_SMT_Failure.SMT _ => outcome (swap o split_last) + end + + fun select_class ctxt = + if Config.get ctxt z3_with_extensions then Old_Z3_Interface.smtlib_z3C + else Old_SMTLIB_Interface.smtlibC +in + +val z3: Old_SMT_Solver.solver_config = { + name = "z3", + class = select_class, + avail = make_avail "Z3", + command = z3_make_command "Z3", + options = z3_options, + default_max_relevant = 350 (* FUDGE *), + supports_filter = true, + outcome = z3_on_first_or_last_line, + cex_parser = SOME Old_Z3_Model.parse_counterex, + reconstruct = SOME Old_Z3_Proof_Reconstruction.reconstruct } + +end + + +(* overall setup *) + +val setup = + Old_SMT_Solver.add_solver cvc3 #> + Old_SMT_Solver.add_solver yices #> + Old_SMT_Solver.add_solver z3 + +end diff --git a/src/main/Old_SMT/old_smt_solver.ML b/src/main/Old_SMT/old_smt_solver.ML new file mode 100644 index 0000000..da7b8e4 --- /dev/null +++ b/src/main/Old_SMT/old_smt_solver.ML @@ -0,0 +1,374 @@ +(* Title: HOL/Library/Old_SMT/old_smt_solver.ML + Author: Sascha Boehme, TU Muenchen + +SMT solvers registry and SMT tactic. +*) + +signature OLD_SMT_SOLVER = +sig + (*configuration*) + datatype outcome = Unsat | Sat | Unknown + type solver_config = { + name: string, + class: Proof.context -> Old_SMT_Utils.class, + avail: unit -> bool, + command: unit -> string list, + options: Proof.context -> string list, + default_max_relevant: int, + supports_filter: bool, + outcome: string -> string list -> outcome * string list, + cex_parser: (Proof.context -> Old_SMT_Translate.recon -> string list -> + term list * term list) option, + reconstruct: (Proof.context -> Old_SMT_Translate.recon -> string list -> + int list * thm) option } + + (*registry*) + val add_solver: solver_config -> theory -> theory + val solver_name_of: Proof.context -> string + val available_solvers_of: Proof.context -> string list + val apply_solver: Proof.context -> (int * (int option * thm)) list -> + int list * thm + val default_max_relevant: Proof.context -> string -> int + + (*filter*) + type 'a smt_filter_data = + ('a * thm) list * ((int * thm) list * Proof.context) + val smt_filter_preprocess: Proof.context -> thm list -> thm -> + ('a * (int option * thm)) list -> int -> 'a smt_filter_data + val smt_filter_apply: Time.time -> 'a smt_filter_data -> + {outcome: Old_SMT_Failure.failure option, used_facts: ('a * thm) list} + + (*tactic*) + val smt_tac: Proof.context -> thm list -> int -> tactic + val smt_tac': Proof.context -> thm list -> int -> tactic +end + +structure Old_SMT_Solver: OLD_SMT_SOLVER = +struct + + +(* interface to external solvers *) + +local + +fun make_cmd command options problem_path proof_path = + space_implode " " + ("(exec 2>&1;" :: map Bash.string (command () @ options) @ + [File.bash_path problem_path, ")", ">", File.bash_path proof_path]) + +fun trace_and ctxt msg f x = + let val _ = Old_SMT_Config.trace_msg ctxt (fn () => msg) () + in f x end + +fun run ctxt name mk_cmd input = + (case Old_SMT_Config.certificates_of ctxt of + NONE => + if not (Old_SMT_Config.is_available ctxt name) then + error ("The SMT solver " ^ quote name ^ " is not installed.") + else if Config.get ctxt Old_SMT_Config.debug_files = "" then + trace_and ctxt ("Invoking SMT solver " ^ quote name ^ " ...") + (Cache_IO.run mk_cmd) input + else + let + val base_path = Path.explode (Config.get ctxt Old_SMT_Config.debug_files) + val in_path = Path.ext "smt_in" base_path + val out_path = Path.ext "smt_out" base_path + in Cache_IO.raw_run mk_cmd input in_path out_path end + | SOME certs => + (case Cache_IO.lookup certs input of + (NONE, key) => + if Config.get ctxt Old_SMT_Config.read_only_certificates then + error ("Bad certificate cache: missing certificate") + else + Cache_IO.run_and_cache certs key mk_cmd input + | (SOME output, _) => + trace_and ctxt ("Using cached certificate from " ^ + Path.print (Cache_IO.cache_path_of certs) ^ " ...") + I output)) + +fun run_solver ctxt name mk_cmd input = + let + fun pretty tag ls = Pretty.string_of (Pretty.big_list tag + (map Pretty.str ls)) + + val _ = Old_SMT_Config.trace_msg ctxt (pretty "Problem:" o split_lines) input + + val {redirected_output=res, output=err, return_code} = + Old_SMT_Config.with_timeout ctxt (run ctxt name mk_cmd) input + val _ = Old_SMT_Config.trace_msg ctxt (pretty "Solver:") err + + val ls = fst (take_suffix (equal "") res) + val _ = Old_SMT_Config.trace_msg ctxt (pretty "Result:") ls + + val _ = return_code <> 0 andalso + raise Old_SMT_Failure.SMT (Old_SMT_Failure.Abnormal_Termination return_code) + in ls end + +fun trace_assms ctxt = + Old_SMT_Config.trace_msg ctxt (Pretty.string_of o + Pretty.big_list "Assertions:" o map (Thm.pretty_thm ctxt o snd)) + +fun trace_recon_data ({context=ctxt, typs, terms, ...} : Old_SMT_Translate.recon) = + let + fun pretty_eq n p = Pretty.block [Pretty.str n, Pretty.str " = ", p] + fun p_typ (n, T) = pretty_eq n (Syntax.pretty_typ ctxt T) + fun p_term (n, t) = pretty_eq n (Syntax.pretty_term ctxt t) + in + Old_SMT_Config.trace_msg ctxt (fn () => + Pretty.string_of (Pretty.big_list "Names:" [ + Pretty.big_list "sorts:" (map p_typ (Symtab.dest typs)), + Pretty.big_list "functions:" (map p_term (Symtab.dest terms))])) () + end + +in + +fun invoke name command ithms ctxt = + let + val options = Old_SMT_Config.solver_options_of ctxt + val comments = ("solver: " ^ name) :: + ("timeout: " ^ string_of_real (Config.get ctxt Old_SMT_Config.timeout)) :: + ("random seed: " ^ + string_of_int (Config.get ctxt Old_SMT_Config.random_seed)) :: + "arguments:" :: options + + val (str, recon as {context=ctxt', ...}) = + ithms + |> tap (trace_assms ctxt) + |> Old_SMT_Translate.translate ctxt comments + ||> tap trace_recon_data + in (run_solver ctxt' name (make_cmd command options) str, recon) end + +end + + +(* configuration *) + +datatype outcome = Unsat | Sat | Unknown + +type solver_config = { + name: string, + class: Proof.context -> Old_SMT_Utils.class, + avail: unit -> bool, + command: unit -> string list, + options: Proof.context -> string list, + default_max_relevant: int, + supports_filter: bool, + outcome: string -> string list -> outcome * string list, + cex_parser: (Proof.context -> Old_SMT_Translate.recon -> string list -> + term list * term list) option, + reconstruct: (Proof.context -> Old_SMT_Translate.recon -> string list -> + int list * thm) option } + + +(* registry *) + +type solver_info = { + command: unit -> string list, + default_max_relevant: int, + supports_filter: bool, + reconstruct: Proof.context -> string list * Old_SMT_Translate.recon -> + int list * thm } + +structure Solvers = Generic_Data +( + type T = solver_info Symtab.table + val empty = Symtab.empty + val extend = I + fun merge data = Symtab.merge (K true) data +) + +local + fun finish outcome cex_parser reconstruct ocl outer_ctxt + (output, (recon as {context=ctxt, ...} : Old_SMT_Translate.recon)) = + (case outcome output of + (Unsat, ls) => + if not (Config.get ctxt Old_SMT_Config.oracle) andalso is_some reconstruct + then the reconstruct outer_ctxt recon ls + else ([], ocl ()) + | (result, ls) => + let + val (ts, us) = + (case cex_parser of SOME f => f ctxt recon ls | _ => ([], [])) + in + raise Old_SMT_Failure.SMT (Old_SMT_Failure.Counterexample { + is_real_cex = (result = Sat), + free_constraints = ts, + const_defs = us}) + end) + + val cfalse = Thm.cterm_of @{context} (@{const Trueprop} $ @{const False}) +in + +fun add_solver cfg = + let + val {name, class, avail, command, options, default_max_relevant, + supports_filter, outcome, cex_parser, reconstruct} = cfg + + fun core_oracle () = cfalse + + fun solver ocl = { + command = command, + default_max_relevant = default_max_relevant, + supports_filter = supports_filter, + reconstruct = finish (outcome name) cex_parser reconstruct ocl } + + val info = {name=name, class=class, avail=avail, options=options} + in + Thm.add_oracle (Binding.name name, core_oracle) #-> (fn (_, ocl) => + Context.theory_map (Solvers.map (Symtab.update_new (name, solver ocl)))) #> + Context.theory_map (Old_SMT_Config.add_solver info) + end + +end + +fun get_info ctxt name = + the (Symtab.lookup (Solvers.get (Context.Proof ctxt)) name) + +val solver_name_of = Old_SMT_Config.solver_of + +val available_solvers_of = Old_SMT_Config.available_solvers_of + +fun name_and_info_of ctxt = + let val name = solver_name_of ctxt + in (name, get_info ctxt name) end + +fun gen_preprocess ctxt iwthms = Old_SMT_Normalize.normalize iwthms ctxt + +fun gen_apply (ithms, ctxt) = + let val (name, {command, reconstruct, ...}) = name_and_info_of ctxt + in + (ithms, ctxt) + |-> invoke name command + |> reconstruct ctxt + |>> distinct (op =) + end + +fun apply_solver ctxt = gen_apply o gen_preprocess ctxt + +val default_max_relevant = #default_max_relevant oo get_info + +val supports_filter = #supports_filter o snd o name_and_info_of + + +(* check well-sortedness *) + +val has_topsort = Term.exists_type (Term.exists_subtype (fn + TFree (_, []) => true + | TVar (_, []) => true + | _ => false)) + +(* without this test, we would run into problems when atomizing the rules: *) +fun check_topsort ctxt thm = + if has_topsort (Thm.prop_of thm) then + (Old_SMT_Normalize.drop_fact_warning ctxt thm; TrueI) + else + thm + +fun check_topsorts ctxt iwthms = map (apsnd (apsnd (check_topsort ctxt))) iwthms + + +(* filter *) + +val cnot = Thm.cterm_of @{context} @{const Not} + +fun mk_result outcome xrules = { outcome = outcome, used_facts = xrules } + +type 'a smt_filter_data = ('a * thm) list * ((int * thm) list * Proof.context) + +fun smt_filter_preprocess ctxt facts goal xwthms i = + let + val ctxt = + ctxt + |> Config.put Old_SMT_Config.oracle false + |> Config.put Old_SMT_Config.filter_only_facts true + + val ({context=ctxt', prems, concl, ...}, _) = Subgoal.focus ctxt i NONE goal + fun negate ct = Thm.dest_comb ct ||> Thm.apply cnot |-> Thm.apply + val cprop = + (case try negate (Thm.rhs_of (Old_SMT_Normalize.atomize_conv ctxt' concl)) of + SOME ct => ct + | NONE => raise Old_SMT_Failure.SMT (Old_SMT_Failure.Other_Failure ( + "goal is not a HOL term"))) + in + map snd xwthms + |> map_index I + |> append (map (pair ~1 o pair NONE) (Thm.assume cprop :: prems @ facts)) + |> check_topsorts ctxt' + |> gen_preprocess ctxt' + |> pair (map (apsnd snd) xwthms) + end + +fun smt_filter_apply time_limit (xthms, (ithms, ctxt)) = + let + val ctxt' = + ctxt + |> Config.put Old_SMT_Config.timeout (Time.toReal time_limit) + + fun filter_thms false = K xthms + | filter_thms true = map_filter (try (nth xthms)) o fst + in + (ithms, ctxt') + |> gen_apply + |> filter_thms (supports_filter ctxt') + |> mk_result NONE + end + handle Old_SMT_Failure.SMT fail => mk_result (SOME fail) [] + + +(* SMT tactic *) + +local + fun trace_assumptions ctxt iwthms idxs = + let + val wthms = + idxs + |> filter (fn i => i >= 0) + |> map_filter (AList.lookup (op =) iwthms) + in + if Config.get ctxt Old_SMT_Config.trace_used_facts andalso length wthms > 0 + then + tracing (Pretty.string_of (Pretty.big_list "SMT used facts:" + (map (Thm.pretty_thm ctxt o snd) wthms))) + else () + end + + fun solve ctxt iwthms = + iwthms + |> check_topsorts ctxt + |> apply_solver ctxt + |>> trace_assumptions ctxt iwthms + |> snd + + fun str_of ctxt fail = + Old_SMT_Failure.string_of_failure ctxt fail + |> prefix ("Solver " ^ Old_SMT_Config.solver_of ctxt ^ ": ") + + fun safe_solve ctxt iwthms = SOME (solve ctxt iwthms) + handle + Old_SMT_Failure.SMT (fail as Old_SMT_Failure.Counterexample _) => + (Old_SMT_Config.verbose_msg ctxt (str_of ctxt) fail; NONE) + | Old_SMT_Failure.SMT (fail as Old_SMT_Failure.Time_Out) => + error ("SMT: Solver " ^ quote (Old_SMT_Config.solver_of ctxt) ^ ": " ^ + Old_SMT_Failure.string_of_failure ctxt fail ^ " (setting the " ^ + "configuration option " ^ quote (Config.name_of Old_SMT_Config.timeout) ^ " might help)") + | Old_SMT_Failure.SMT fail => error (str_of ctxt fail) + + fun tag_rules thms = map_index (apsnd (pair NONE)) thms + fun tag_prems thms = map (pair ~1 o pair NONE) thms + + fun resolve ctxt (SOME thm) = resolve_tac ctxt [thm] 1 + | resolve _ NONE = no_tac + + fun tac prove ctxt rules = + CONVERSION (Old_SMT_Normalize.atomize_conv ctxt) + THEN' resolve_tac ctxt @{thms ccontr} + THEN' SUBPROOF (fn {context = ctxt', prems, ...} => + resolve ctxt' (prove ctxt' (tag_rules rules @ tag_prems prems))) ctxt +in + +val smt_tac = tac safe_solve +val smt_tac' = tac (SOME oo solve) + +end + +end diff --git a/src/main/Old_SMT/old_smt_translate.ML b/src/main/Old_SMT/old_smt_translate.ML new file mode 100644 index 0000000..ab4a2a2 --- /dev/null +++ b/src/main/Old_SMT/old_smt_translate.ML @@ -0,0 +1,589 @@ +(* Title: HOL/Library/Old_SMT/old_smt_translate.ML + Author: Sascha Boehme, TU Muenchen + +Translate theorems into an SMT intermediate format and serialize them. +*) + +signature OLD_SMT_TRANSLATE = +sig + (*intermediate term structure*) + datatype squant = SForall | SExists + datatype 'a spattern = SPat of 'a list | SNoPat of 'a list + datatype sterm = + SVar of int | + SApp of string * sterm list | + SLet of string * sterm * sterm | + SQua of squant * string list * sterm spattern list * int option * sterm + + (*translation configuration*) + type prefixes = {sort_prefix: string, func_prefix: string} + type sign = { + header: string list, + sorts: string list, + dtyps: (string * (string * (string * string) list) list) list list, + funcs: (string * (string list * string)) list } + type config = { + prefixes: prefixes, + header: term list -> string list, + is_fol: bool, + has_datatypes: bool, + serialize: string list -> sign -> sterm list -> string } + type recon = { + context: Proof.context, + typs: typ Symtab.table, + terms: term Symtab.table, + rewrite_rules: thm list, + assms: (int * thm) list } + + (*translation*) + val add_config: Old_SMT_Utils.class * (Proof.context -> config) -> + Context.generic -> Context.generic + val translate: Proof.context -> string list -> (int * thm) list -> + string * recon +end + +structure Old_SMT_Translate: OLD_SMT_TRANSLATE = +struct + + +(* intermediate term structure *) + +datatype squant = SForall | SExists + +datatype 'a spattern = SPat of 'a list | SNoPat of 'a list + +datatype sterm = + SVar of int | + SApp of string * sterm list | + SLet of string * sterm * sterm | + SQua of squant * string list * sterm spattern list * int option * sterm + + + +(* translation configuration *) + +type prefixes = {sort_prefix: string, func_prefix: string} + +type sign = { + header: string list, + sorts: string list, + dtyps: (string * (string * (string * string) list) list) list list, + funcs: (string * (string list * string)) list } + +type config = { + prefixes: prefixes, + header: term list -> string list, + is_fol: bool, + has_datatypes: bool, + serialize: string list -> sign -> sterm list -> string } + +type recon = { + context: Proof.context, + typs: typ Symtab.table, + terms: term Symtab.table, + rewrite_rules: thm list, + assms: (int * thm) list } + + + +(* translation context *) + +fun make_tr_context {sort_prefix, func_prefix} = + (sort_prefix, 1, Typtab.empty, func_prefix, 1, Termtab.empty) + +fun string_of_index pre i = pre ^ string_of_int i + +fun add_typ T proper (cx as (sp, Tidx, typs, fp, idx, terms)) = + (case Typtab.lookup typs T of + SOME (n, _) => (n, cx) + | NONE => + let + val n = string_of_index sp Tidx + val typs' = Typtab.update (T, (n, proper)) typs + in (n, (sp, Tidx+1, typs', fp, idx, terms)) end) + +fun add_fun t sort (cx as (sp, Tidx, typs, fp, idx, terms)) = + (case Termtab.lookup terms t of + SOME (n, _) => (n, cx) + | NONE => + let + val n = string_of_index fp idx + val terms' = Termtab.update (t, (n, sort)) terms + in (n, (sp, Tidx, typs, fp, idx+1, terms')) end) + +fun sign_of header dtyps (_, _, typs, _, _, terms) = { + header = header, + sorts = Typtab.fold (fn (_, (n, true)) => cons n | _ => I) typs [], + dtyps = dtyps, + funcs = Termtab.fold (fn (_, (n, SOME ss)) => cons (n,ss) | _ => I) terms []} + +fun recon_of ctxt rules thms ithms (_, _, typs, _, _, terms) = + let + fun add_typ (T, (n, _)) = Symtab.update (n, T) + val typs' = Typtab.fold add_typ typs Symtab.empty + + fun add_fun (t, (n, _)) = Symtab.update (n, t) + val terms' = Termtab.fold add_fun terms Symtab.empty + + val assms = map (pair ~1) thms @ ithms + in + {context=ctxt, typs=typs', terms=terms', rewrite_rules=rules, assms=assms} + end + + + +(* preprocessing *) + +(** datatype declarations **) + +fun collect_datatypes_and_records (tr_context, ctxt) ts = + let + val (declss, ctxt') = + fold (Term.fold_types Old_SMT_Datatypes.add_decls) ts ([], ctxt) + + fun is_decl_typ T = exists (exists (equal T o fst)) declss + + fun add_typ' T proper = + (case Old_SMT_Builtin.dest_builtin_typ ctxt' T of + SOME n => pair n + | NONE => add_typ T proper) + + fun tr_select sel = + let val T = Term.range_type (Term.fastype_of sel) + in add_fun sel NONE ##>> add_typ' T (not (is_decl_typ T)) end + fun tr_constr (constr, selects) = + add_fun constr NONE ##>> fold_map tr_select selects + fun tr_typ (T, cases) = add_typ' T false ##>> fold_map tr_constr cases + val (declss', tr_context') = fold_map (fold_map tr_typ) declss tr_context + + fun add (constr, selects) = + Termtab.update (constr, length selects) #> + fold (Termtab.update o rpair 1) selects + val funcs = fold (fold (fold add o snd)) declss Termtab.empty + in ((funcs, declss', tr_context', ctxt'), ts) end + (* FIXME: also return necessary datatype and record theorems *) + + +(** eta-expand quantifiers, let expressions and built-ins *) + +local + fun eta f T t = Abs (Name.uu, T, f (Term.incr_boundvars 1 t $ Bound 0)) + + fun exp f T = eta f (Term.domain_type (Term.domain_type T)) + + fun exp2 T q = + let val U = Term.domain_type T + in Abs (Name.uu, U, q $ eta I (Term.domain_type U) (Bound 0)) end + + fun exp2' T l = + let val (U1, U2) = Term.dest_funT T ||> Term.domain_type + in Abs (Name.uu, U1, eta I U2 (l $ Bound 0)) end + + fun expf k i T t = + let val Ts = drop i (fst (Old_SMT_Utils.dest_funT k T)) + in + Term.incr_boundvars (length Ts) t + |> fold_rev (fn i => fn u => u $ Bound i) (0 upto length Ts - 1) + |> fold_rev (fn T => fn u => Abs (Name.uu, T, u)) Ts + end +in + +fun eta_expand ctxt is_fol funcs = + let + fun exp_func t T ts = + (case Termtab.lookup funcs t of + SOME k => + Term.list_comb (t, ts) + |> k <> length ts ? expf k (length ts) T + | NONE => Term.list_comb (t, ts)) + + fun expand ((q as Const (@{const_name All}, _)) $ Abs a) = q $ abs_expand a + | expand ((q as Const (@{const_name All}, T)) $ t) = q $ exp expand T t + | expand (q as Const (@{const_name All}, T)) = exp2 T q + | expand ((q as Const (@{const_name Ex}, _)) $ Abs a) = q $ abs_expand a + | expand ((q as Const (@{const_name Ex}, T)) $ t) = q $ exp expand T t + | expand (q as Const (@{const_name Ex}, T)) = exp2 T q + | expand ((l as Const (@{const_name Let}, _)) $ t $ Abs a) = + if is_fol then expand (Term.betapply (Abs a, t)) + else l $ expand t $ abs_expand a + | expand ((l as Const (@{const_name Let}, T)) $ t $ u) = + if is_fol then expand (u $ t) + else l $ expand t $ exp expand (Term.range_type T) u + | expand ((l as Const (@{const_name Let}, T)) $ t) = + if is_fol then + let val U = Term.domain_type (Term.range_type T) + in Abs (Name.uu, U, Bound 0 $ Term.incr_boundvars 1 t) end + else exp2 T (l $ expand t) + | expand (l as Const (@{const_name Let}, T)) = + if is_fol then + let val U = Term.domain_type (Term.range_type T) + in + Abs (Name.uu, Term.domain_type T, Abs (Name.uu, U, + Bound 0 $ Bound 1)) + end + else exp2' T l + | expand t = + (case Term.strip_comb t of + (u as Const (c as (_, T)), ts) => + (case Old_SMT_Builtin.dest_builtin ctxt c ts of + SOME (_, k, us, mk) => + if k = length us then mk (map expand us) + else if k < length us then + chop k (map expand us) |>> mk |> Term.list_comb + else expf k (length ts) T (mk (map expand us)) + | NONE => exp_func u T (map expand ts)) + | (u as Free (_, T), ts) => exp_func u T (map expand ts) + | (Abs a, ts) => Term.list_comb (abs_expand a, map expand ts) + | (u, ts) => Term.list_comb (u, map expand ts)) + + and abs_expand (n, T, t) = Abs (n, T, expand t) + + in map expand end + +end + + +(** introduce explicit applications **) + +local + (* + Make application explicit for functions with varying number of arguments. + *) + + fun add t i = apfst (Termtab.map_default (t, i) (Integer.min i)) + fun add_type T = apsnd (Typtab.update (T, ())) + + fun min_arities t = + (case Term.strip_comb t of + (u as Const _, ts) => add u (length ts) #> fold min_arities ts + | (u as Free _, ts) => add u (length ts) #> fold min_arities ts + | (Abs (_, T, u), ts) => add_type T #> min_arities u #> fold min_arities ts + | (_, ts) => fold min_arities ts) + + fun minimize types t i = + let + fun find_min j [] _ = j + | find_min j (U :: Us) T = + if Typtab.defined types T then j + else find_min (j + 1) Us (U --> T) + + val (Ts, T) = Term.strip_type (Term.type_of t) + in find_min 0 (take i (rev Ts)) T end + + fun app u (t, T) = + (Const (@{const_name fun_app}, T --> T) $ t $ u, Term.range_type T) + + fun apply i t T ts = + let + val (ts1, ts2) = chop i ts + val (_, U) = Old_SMT_Utils.dest_funT i T + in fst (fold app ts2 (Term.list_comb (t, ts1), U)) end +in + +fun intro_explicit_application ctxt funcs ts = + let + val (arities, types) = fold min_arities ts (Termtab.empty, Typtab.empty) + val arities' = Termtab.map (minimize types) arities + + fun app_func t T ts = + if is_some (Termtab.lookup funcs t) then Term.list_comb (t, ts) + else apply (the (Termtab.lookup arities' t)) t T ts + + fun in_list T f t = HOLogic.mk_list T (map f (HOLogic.dest_list t)) + + fun traverse Ts t = + (case Term.strip_comb t of + (q as Const (@{const_name All}, _), [Abs (x, T, u)]) => + q $ Abs (x, T, in_trigger (T :: Ts) u) + | (q as Const (@{const_name Ex}, _), [Abs (x, T, u)]) => + q $ Abs (x, T, in_trigger (T :: Ts) u) + | (q as Const (@{const_name Let}, _), [u1, u2 as Abs _]) => + q $ traverse Ts u1 $ traverse Ts u2 + | (u as Const (c as (_, T)), ts) => + (case Old_SMT_Builtin.dest_builtin ctxt c ts of + SOME (_, k, us, mk) => + let + val (ts1, ts2) = chop k (map (traverse Ts) us) + val U = Term.strip_type T |>> snd o chop k |> (op --->) + in apply 0 (mk ts1) U ts2 end + | NONE => app_func u T (map (traverse Ts) ts)) + | (u as Free (_, T), ts) => app_func u T (map (traverse Ts) ts) + | (u as Bound i, ts) => apply 0 u (nth Ts i) (map (traverse Ts) ts) + | (Abs (n, T, u), ts) => traverses Ts (Abs (n, T, traverse (T::Ts) u)) ts + | (u, ts) => traverses Ts u ts) + and in_trigger Ts ((c as @{const trigger}) $ p $ t) = + c $ in_pats Ts p $ in_weight Ts t + | in_trigger Ts t = in_weight Ts t + and in_pats Ts ps = + in_list @{typ "pattern list"} + (in_list @{typ pattern} (in_pat Ts)) ps + and in_pat Ts ((p as Const (@{const_name pat}, _)) $ t) = + p $ traverse Ts t + | in_pat Ts ((p as Const (@{const_name nopat}, _)) $ t) = + p $ traverse Ts t + | in_pat _ t = raise TERM ("bad pattern", [t]) + and in_weight Ts ((c as @{const weight}) $ w $ t) = + c $ w $ traverse Ts t + | in_weight Ts t = traverse Ts t + and traverses Ts t ts = Term.list_comb (t, map (traverse Ts) ts) + in map (traverse []) ts end + +val fun_app_eq = mk_meta_eq @{thm fun_app_def} + +end + + +(** map HOL formulas to FOL formulas (i.e., separate formulas froms terms) **) + +local + val term_bool = @{lemma "term_true ~= term_false" + by (simp add: term_true_def term_false_def)} + + val is_quant = member (op =) [@{const_name All}, @{const_name Ex}] + + val fol_rules = [ + Let_def, + mk_meta_eq @{thm term_true_def}, + mk_meta_eq @{thm term_false_def}, + @{lemma "P = True == P" by (rule eq_reflection) simp}, + @{lemma "if P then True else False == P" by (rule eq_reflection) simp}] + + fun as_term t = @{const HOL.eq (bool)} $ t $ @{const term_true} + + exception BAD_PATTERN of unit + + fun wrap_in_if pat t = + if pat then + raise BAD_PATTERN () + else + @{const If (bool)} $ t $ @{const term_true} $ @{const term_false} + + fun is_builtin_conn_or_pred ctxt c ts = + is_some (Old_SMT_Builtin.dest_builtin_conn ctxt c ts) orelse + is_some (Old_SMT_Builtin.dest_builtin_pred ctxt c ts) + + fun builtin b ctxt c ts = + (case (Const c, ts) of + (@{const HOL.eq (bool)}, [t, u]) => + if t = @{const term_true} orelse u = @{const term_true} then + Old_SMT_Builtin.dest_builtin_eq ctxt t u + else b ctxt c ts + | _ => b ctxt c ts) +in + +fun folify ctxt = + let + fun in_list T f t = HOLogic.mk_list T (map_filter f (HOLogic.dest_list t)) + + fun in_term pat t = + (case Term.strip_comb t of + (@{const True}, []) => @{const term_true} + | (@{const False}, []) => @{const term_false} + | (u as Const (@{const_name If}, _), [t1, t2, t3]) => + if pat then raise BAD_PATTERN () + else u $ in_form t1 $ in_term pat t2 $ in_term pat t3 + | (Const (c as (n, _)), ts) => + if is_builtin_conn_or_pred ctxt c ts then wrap_in_if pat (in_form t) + else if is_quant n then wrap_in_if pat (in_form t) + else Term.list_comb (Const c, map (in_term pat) ts) + | (Free c, ts) => Term.list_comb (Free c, map (in_term pat) ts) + | _ => t) + + and in_weight ((c as @{const weight}) $ w $ t) = c $ w $ in_form t + | in_weight t = in_form t + + and in_pat ((p as Const (@{const_name pat}, _)) $ t) = + p $ in_term true t + | in_pat ((p as Const (@{const_name nopat}, _)) $ t) = + p $ in_term true t + | in_pat t = raise TERM ("bad pattern", [t]) + + and in_pats ps = + in_list @{typ "pattern list"} + (SOME o in_list @{typ pattern} (try in_pat)) ps + + and in_trigger ((c as @{const trigger}) $ p $ t) = + c $ in_pats p $ in_weight t + | in_trigger t = in_weight t + + and in_form t = + (case Term.strip_comb t of + (q as Const (qn, _), [Abs (n, T, u)]) => + if is_quant qn then q $ Abs (n, T, in_trigger u) + else as_term (in_term false t) + | (Const c, ts) => + (case Old_SMT_Builtin.dest_builtin_conn ctxt c ts of + SOME (_, _, us, mk) => mk (map in_form us) + | NONE => + (case Old_SMT_Builtin.dest_builtin_pred ctxt c ts of + SOME (_, _, us, mk) => mk (map (in_term false) us) + | NONE => as_term (in_term false t))) + | _ => as_term (in_term false t)) + in + map in_form #> + cons (Old_SMT_Utils.prop_of term_bool) #> + pair (fol_rules, [term_bool], builtin) + end + +end + + +(* translation into intermediate format *) + +(** utility functions **) + +val quantifier = (fn + @{const_name All} => SOME SForall + | @{const_name Ex} => SOME SExists + | _ => NONE) + +fun group_quant qname Ts (t as Const (q, _) $ Abs (_, T, u)) = + if q = qname then group_quant qname (T :: Ts) u else (Ts, t) + | group_quant _ Ts t = (Ts, t) + +fun dest_weight (@{const weight} $ w $ t) = + (SOME (snd (HOLogic.dest_number w)), t) + | dest_weight t = (NONE, t) + +fun dest_pat (Const (@{const_name pat}, _) $ t) = (t, true) + | dest_pat (Const (@{const_name nopat}, _) $ t) = (t, false) + | dest_pat t = raise TERM ("bad pattern", [t]) + +fun dest_pats [] = I + | dest_pats ts = + (case map dest_pat ts |> split_list ||> distinct (op =) of + (ps, [true]) => cons (SPat ps) + | (ps, [false]) => cons (SNoPat ps) + | _ => raise TERM ("bad multi-pattern", ts)) + +fun dest_trigger (@{const trigger} $ tl $ t) = + (rev (fold (dest_pats o HOLogic.dest_list) (HOLogic.dest_list tl) []), t) + | dest_trigger t = ([], t) + +fun dest_quant qn T t = quantifier qn |> Option.map (fn q => + let + val (Ts, u) = group_quant qn [T] t + val (ps, p) = dest_trigger u + val (w, b) = dest_weight p + in (q, rev Ts, ps, w, b) end) + +fun fold_map_pat f (SPat ts) = fold_map f ts #>> SPat + | fold_map_pat f (SNoPat ts) = fold_map f ts #>> SNoPat + + +(** translation from Isabelle terms into SMT intermediate terms **) + +fun intermediate header dtyps builtin ctxt ts trx = + let + fun transT (T as TFree _) = add_typ T true + | transT (T as TVar _) = (fn _ => raise TYPE ("bad SMT type", [T], [])) + | transT (T as Type _) = + (case Old_SMT_Builtin.dest_builtin_typ ctxt T of + SOME n => pair n + | NONE => add_typ T true) + + fun app n ts = SApp (n, ts) + + fun trans t = + (case Term.strip_comb t of + (Const (qn, _), [Abs (_, T, t1)]) => + (case dest_quant qn T t1 of + SOME (q, Ts, ps, w, b) => + fold_map transT Ts ##>> fold_map (fold_map_pat trans) ps ##>> + trans b #>> (fn ((Ts', ps'), b') => SQua (q, Ts', ps', w, b')) + | NONE => raise TERM ("unsupported quantifier", [t])) + | (Const (@{const_name Let}, _), [t1, Abs (_, T, t2)]) => + transT T ##>> trans t1 ##>> trans t2 #>> + (fn ((U, u1), u2) => SLet (U, u1, u2)) + | (u as Const (c as (_, T)), ts) => + (case builtin ctxt c ts of + SOME (n, _, us, _) => fold_map trans us #>> app n + | NONE => transs u T ts) + | (u as Free (_, T), ts) => transs u T ts + | (Bound i, []) => pair (SVar i) + | _ => raise TERM ("bad SMT term", [t])) + + and transs t T ts = + let val (Us, U) = Old_SMT_Utils.dest_funT (length ts) T + in + fold_map transT Us ##>> transT U #-> (fn Up => + add_fun t (SOME Up) ##>> fold_map trans ts #>> SApp) + end + + val (us, trx') = fold_map trans ts trx + in ((sign_of (header ts) dtyps trx', us), trx') end + + + +(* translation *) + +structure Configs = Generic_Data +( + type T = (Proof.context -> config) Old_SMT_Utils.dict + val empty = [] + val extend = I + fun merge data = Old_SMT_Utils.dict_merge fst data +) + +fun add_config (cs, cfg) = Configs.map (Old_SMT_Utils.dict_update (cs, cfg)) + +fun get_config ctxt = + let val cs = Old_SMT_Config.solver_class_of ctxt + in + (case Old_SMT_Utils.dict_get (Configs.get (Context.Proof ctxt)) cs of + SOME cfg => cfg ctxt + | NONE => error ("SMT: no translation configuration found " ^ + "for solver class " ^ quote (Old_SMT_Utils.string_of_class cs))) + end + +fun translate ctxt comments ithms = + let + val {prefixes, is_fol, header, has_datatypes, serialize} = get_config ctxt + + val with_datatypes = + has_datatypes andalso Config.get ctxt Old_SMT_Config.datatypes + + fun no_dtyps (tr_context, ctxt) ts = + ((Termtab.empty, [], tr_context, ctxt), ts) + + val ts1 = map (Envir.beta_eta_contract o Old_SMT_Utils.prop_of o snd) ithms + + val ((funcs, dtyps, tr_context, ctxt1), ts2) = + ((make_tr_context prefixes, ctxt), ts1) + |-> (if with_datatypes then collect_datatypes_and_records else no_dtyps) + + fun is_binder (Const (@{const_name Let}, _) $ _) = true + | is_binder t = Lambda_Lifting.is_quantifier t + + fun mk_trigger ((q as Const (@{const_name All}, _)) $ Abs (n, T, t)) = + q $ Abs (n, T, mk_trigger t) + | mk_trigger (eq as (Const (@{const_name HOL.eq}, T) $ lhs $ _)) = + Term.domain_type T --> @{typ pattern} + |> (fn T => Const (@{const_name pat}, T) $ lhs) + |> HOLogic.mk_list @{typ pattern} o single + |> HOLogic.mk_list @{typ "pattern list"} o single + |> (fn t => @{const trigger} $ t $ eq) + | mk_trigger t = t + + val (ctxt2, ts3) = + ts2 + |> eta_expand ctxt1 is_fol funcs + |> rpair ctxt1 + |-> Lambda_Lifting.lift_lambdas NONE is_binder + |-> (fn (ts', defs) => fn ctxt' => + map mk_trigger defs @ ts' + |> intro_explicit_application ctxt' funcs + |> pair ctxt') + + val ((rewrite_rules, extra_thms, builtin), ts4) = + (if is_fol then folify ctxt2 else pair ([], [], I)) ts3 + + val rewrite_rules' = fun_app_eq :: rewrite_rules + in + (ts4, tr_context) + |-> intermediate header dtyps (builtin Old_SMT_Builtin.dest_builtin) ctxt2 + |>> uncurry (serialize comments) + ||> recon_of ctxt2 rewrite_rules' extra_thms ithms + end + +end diff --git a/src/main/Old_SMT/old_smt_utils.ML b/src/main/Old_SMT/old_smt_utils.ML new file mode 100644 index 0000000..8603f1a --- /dev/null +++ b/src/main/Old_SMT/old_smt_utils.ML @@ -0,0 +1,221 @@ +(* Title: HOL/Library/Old_SMT/old_smt_utils.ML + Author: Sascha Boehme, TU Muenchen + +General utility functions. +*) + +signature OLD_SMT_UTILS = +sig + (*basic combinators*) + val repeat: ('a -> 'a option) -> 'a -> 'a + val repeat_yield: ('a -> 'b -> ('a * 'b) option) -> 'a -> 'b -> 'a * 'b + + (*class dictionaries*) + type class = string list + val basicC: class + val string_of_class: class -> string + type 'a dict = (class * 'a) Ord_List.T + val dict_map_default: class * 'a -> ('a -> 'a) -> 'a dict -> 'a dict + val dict_update: class * 'a -> 'a dict -> 'a dict + val dict_merge: ('a * 'a -> 'a) -> 'a dict * 'a dict -> 'a dict + val dict_lookup: 'a dict -> class -> 'a list + val dict_get: 'a dict -> class -> 'a option + + (*types*) + val dest_funT: int -> typ -> typ list * typ + + (*terms*) + val dest_conj: term -> term * term + val dest_disj: term -> term * term + val under_quant: (term -> 'a) -> term -> 'a + val is_number: term -> bool + + (*patterns and instantiations*) + val mk_const_pat: theory -> string -> (ctyp -> 'a) -> 'a * cterm + val destT1: ctyp -> ctyp + val destT2: ctyp -> ctyp + val instTs: ctyp list -> ctyp list * cterm -> cterm + val instT: ctyp -> ctyp * cterm -> cterm + val instT': cterm -> ctyp * cterm -> cterm + + (*certified terms*) + val dest_cabs: cterm -> Proof.context -> cterm * Proof.context + val dest_all_cabs: cterm -> Proof.context -> cterm * Proof.context + val dest_cbinder: cterm -> Proof.context -> cterm * Proof.context + val dest_all_cbinders: cterm -> Proof.context -> cterm * Proof.context + val mk_cprop: cterm -> cterm + val dest_cprop: cterm -> cterm + val mk_cequals: cterm -> cterm -> cterm + val term_of: cterm -> term + val prop_of: thm -> term + + (*conversions*) + val if_conv: (term -> bool) -> conv -> conv -> conv + val if_true_conv: (term -> bool) -> conv -> conv + val if_exists_conv: (term -> bool) -> conv -> conv + val binders_conv: (Proof.context -> conv) -> Proof.context -> conv + val under_quant_conv: (Proof.context * cterm list -> conv) -> + Proof.context -> conv + val prop_conv: conv -> conv +end + +structure Old_SMT_Utils: OLD_SMT_UTILS = +struct + +(* basic combinators *) + +fun repeat f = + let fun rep x = (case f x of SOME y => rep y | NONE => x) + in rep end + +fun repeat_yield f = + let fun rep x y = (case f x y of SOME (x', y') => rep x' y' | NONE => (x, y)) + in rep end + + +(* class dictionaries *) + +type class = string list + +val basicC = [] + +fun string_of_class [] = "basic" + | string_of_class cs = "basic." ^ space_implode "." cs + +type 'a dict = (class * 'a) Ord_List.T + +fun class_ord ((cs1, _), (cs2, _)) = + rev_order (list_ord fast_string_ord (cs1, cs2)) + +fun dict_insert (cs, x) d = + if AList.defined (op =) d cs then d + else Ord_List.insert class_ord (cs, x) d + +fun dict_map_default (cs, x) f = + dict_insert (cs, x) #> AList.map_entry (op =) cs f + +fun dict_update (e as (_, x)) = dict_map_default e (K x) + +fun dict_merge val_merge = sort class_ord o AList.join (op =) (K val_merge) + +fun dict_lookup d cs = + let fun match (cs', x) = if is_prefix (op =) cs' cs then SOME x else NONE + in map_filter match d end + +fun dict_get d cs = + (case AList.lookup (op =) d cs of + NONE => (case cs of [] => NONE | _ => dict_get d (take (length cs - 1) cs)) + | SOME x => SOME x) + + +(* types *) + +val dest_funT = + let + fun dest Ts 0 T = (rev Ts, T) + | dest Ts i (Type ("fun", [T, U])) = dest (T::Ts) (i-1) U + | dest _ _ T = raise TYPE ("not a function type", [T], []) + in dest [] end + + +(* terms *) + +fun dest_conj (@{const HOL.conj} $ t $ u) = (t, u) + | dest_conj t = raise TERM ("not a conjunction", [t]) + +fun dest_disj (@{const HOL.disj} $ t $ u) = (t, u) + | dest_disj t = raise TERM ("not a disjunction", [t]) + +fun under_quant f t = + (case t of + Const (@{const_name All}, _) $ Abs (_, _, u) => under_quant f u + | Const (@{const_name Ex}, _) $ Abs (_, _, u) => under_quant f u + | _ => f t) + +val is_number = + let + fun is_num env (Const (@{const_name If}, _) $ _ $ t $ u) = + is_num env t andalso is_num env u + | is_num env (Const (@{const_name Let}, _) $ t $ Abs (_, _, u)) = + is_num (t :: env) u + | is_num env (Bound i) = i < length env andalso is_num env (nth env i) + | is_num _ t = can HOLogic.dest_number t + in is_num [] end + + +(* patterns and instantiations *) + +fun mk_const_pat thy name destT = + let val cpat = Thm.global_cterm_of thy (Const (name, Sign.the_const_type thy name)) + in (destT (Thm.ctyp_of_cterm cpat), cpat) end + +val destT1 = hd o Thm.dest_ctyp +val destT2 = hd o tl o Thm.dest_ctyp + +fun instTs cUs (cTs, ct) = Thm.instantiate_cterm (map (dest_TVar o Thm.typ_of) cTs ~~ cUs, []) ct +fun instT cU (cT, ct) = instTs [cU] ([cT], ct) +fun instT' ct = instT (Thm.ctyp_of_cterm ct) + + +(* certified terms *) + +fun dest_cabs ct ctxt = + (case Thm.term_of ct of + Abs _ => + let val (n, ctxt') = yield_singleton Variable.variant_fixes Name.uu ctxt + in (snd (Thm.dest_abs (SOME n) ct), ctxt') end + | _ => raise CTERM ("no abstraction", [ct])) + +val dest_all_cabs = repeat_yield (try o dest_cabs) + +fun dest_cbinder ct ctxt = + (case Thm.term_of ct of + Const _ $ Abs _ => dest_cabs (Thm.dest_arg ct) ctxt + | _ => raise CTERM ("not a binder", [ct])) + +val dest_all_cbinders = repeat_yield (try o dest_cbinder) + +val mk_cprop = Thm.apply (Thm.cterm_of @{context} @{const Trueprop}) + +fun dest_cprop ct = + (case Thm.term_of ct of + @{const Trueprop} $ _ => Thm.dest_arg ct + | _ => raise CTERM ("not a property", [ct])) + +val equals = mk_const_pat @{theory} @{const_name Pure.eq} destT1 +fun mk_cequals ct cu = Thm.mk_binop (instT' ct equals) ct cu + +val dest_prop = (fn @{const Trueprop} $ t => t | t => t) +fun term_of ct = dest_prop (Thm.term_of ct) +fun prop_of thm = dest_prop (Thm.prop_of thm) + + +(* conversions *) + +fun if_conv pred cv1 cv2 ct = if pred (Thm.term_of ct) then cv1 ct else cv2 ct + +fun if_true_conv pred cv = if_conv pred cv Conv.all_conv + +fun if_exists_conv pred = if_true_conv (Term.exists_subterm pred) + +fun binders_conv cv ctxt = + Conv.binder_conv (binders_conv cv o snd) ctxt else_conv cv ctxt + +fun under_quant_conv cv ctxt = + let + fun quant_conv inside ctxt cvs ct = + (case Thm.term_of ct of + Const (@{const_name All}, _) $ Abs _ => + Conv.binder_conv (under_conv cvs) ctxt + | Const (@{const_name Ex}, _) $ Abs _ => + Conv.binder_conv (under_conv cvs) ctxt + | _ => if inside then cv (ctxt, cvs) else Conv.all_conv) ct + and under_conv cvs (cv, ctxt) = quant_conv true ctxt (cv :: cvs) + in quant_conv false ctxt [] end + +fun prop_conv cv ct = + (case Thm.term_of ct of + @{const Trueprop} $ _ => Conv.arg_conv cv ct + | _ => raise CTERM ("not a property", [ct])) + +end diff --git a/src/main/Old_SMT/old_smt_word.ML b/src/main/Old_SMT/old_smt_word.ML new file mode 100644 index 0000000..4303aba --- /dev/null +++ b/src/main/Old_SMT/old_smt_word.ML @@ -0,0 +1,146 @@ +(* Title: HOL/Library/Old_SMT/old_smt_word.ML + Author: Sascha Boehme, TU Muenchen + +SMT setup for words. +*) + +structure Old_SMT_Word: sig end = +struct + +open Word_Lib + +(* SMT-LIB logic *) + +fun smtlib_logic ts = + if exists (Term.exists_type (Term.exists_subtype is_wordT)) ts + then SOME "QF_AUFBV" + else NONE + + +(* SMT-LIB builtins *) + +local + val smtlibC = Old_SMTLIB_Interface.smtlibC + + val wordT = @{typ "'a::len word"} + + fun index1 n i = n ^ "[" ^ string_of_int i ^ "]" + fun index2 n i j = n ^ "[" ^ string_of_int i ^ ":" ^ string_of_int j ^ "]" + + fun word_typ (Type (@{type_name word}, [T])) = + Option.map (index1 "BitVec") (try dest_binT T) + | word_typ _ = NONE + + fun word_num (Type (@{type_name word}, [T])) i = + Option.map (index1 ("bv" ^ string_of_int i)) (try dest_binT T) + | word_num _ _ = NONE + + fun if_fixed pred m n T ts = + let val (Us, U) = Term.strip_type T + in + if pred (U, Us) then + SOME (n, length Us, ts, Term.list_comb o pair (Const (m, T))) + else NONE + end + + fun if_fixed_all m = if_fixed (forall (can dest_wordT) o (op ::)) m + fun if_fixed_args m = if_fixed (forall (can dest_wordT) o snd) m + + fun add_word_fun f (t, n) = + let val (m, _) = Term.dest_Const t + in Old_SMT_Builtin.add_builtin_fun smtlibC (Term.dest_Const t, K (f m n)) end + + fun hd2 xs = hd (tl xs) + + fun mk_nat i = @{const nat} $ HOLogic.mk_number @{typ nat} i + + fun dest_nat (@{const nat} $ n) = snd (HOLogic.dest_number n) + | dest_nat t = raise TERM ("not a natural number", [t]) + + fun mk_shift c [t, u] = Const c $ t $ mk_nat (snd (HOLogic.dest_number u)) + | mk_shift c ts = raise TERM ("bad arguments", Const c :: ts) + + fun shift m n T ts = + let val U = Term.domain_type T + in + (case (can dest_wordT U, try (dest_nat o hd2) ts) of + (true, SOME i) => + SOME (n, 2, [hd ts, HOLogic.mk_number U i], mk_shift (m, T)) + | _ => NONE) (* FIXME: also support non-numerical shifts *) + end + + fun mk_extract c i ts = Term.list_comb (Const c, mk_nat i :: ts) + + fun extract m n T ts = + let val U = Term.range_type (Term.range_type T) + in + (case (try (dest_nat o hd) ts, try dest_wordT U) of + (SOME lb, SOME i) => + SOME (index2 n (i + lb - 1) lb, 1, tl ts, mk_extract (m, T) lb) + | _ => NONE) + end + + fun mk_extend c ts = Term.list_comb (Const c, ts) + + fun extend m n T ts = + let val (U1, U2) = Term.dest_funT T + in + (case (try dest_wordT U1, try dest_wordT U2) of + (SOME i, SOME j) => + if j-i >= 0 then SOME (index1 n (j-i), 1, ts, mk_extend (m, T)) + else NONE + | _ => NONE) + end + + fun mk_rotate c i ts = Term.list_comb (Const c, mk_nat i :: ts) + + fun rotate m n T ts = + let val U = Term.domain_type (Term.range_type T) + in + (case (can dest_wordT U, try (dest_nat o hd) ts) of + (true, SOME i) => SOME (index1 n i, 1, tl ts, mk_rotate (m, T) i) + | _ => NONE) + end +in + +val setup_builtins = + Old_SMT_Builtin.add_builtin_typ smtlibC (wordT, word_typ, word_num) #> + fold (add_word_fun if_fixed_all) [ + (@{term "uminus :: 'a::len word => _"}, "bvneg"), + (@{term "plus :: 'a::len word => _"}, "bvadd"), + (@{term "minus :: 'a::len word => _"}, "bvsub"), + (@{term "times :: 'a::len word => _"}, "bvmul"), + (@{term "bitNOT :: 'a::len word => _"}, "bvnot"), + (@{term "bitAND :: 'a::len word => _"}, "bvand"), + (@{term "bitOR :: 'a::len word => _"}, "bvor"), + (@{term "bitXOR :: 'a::len word => _"}, "bvxor"), + (@{term "word_cat :: 'a::len word => _"}, "concat") ] #> + fold (add_word_fun shift) [ + (@{term "shiftl :: 'a::len word => _ "}, "bvshl"), + (@{term "shiftr :: 'a::len word => _"}, "bvlshr"), + (@{term "sshiftr :: 'a::len word => _"}, "bvashr") ] #> + add_word_fun extract + (@{term "slice :: _ => 'a::len word => _"}, "extract") #> + fold (add_word_fun extend) [ + (@{term "ucast :: 'a::len word => _"}, "zero_extend"), + (@{term "scast :: 'a::len word => _"}, "sign_extend") ] #> + fold (add_word_fun rotate) [ + (@{term word_rotl}, "rotate_left"), + (@{term word_rotr}, "rotate_right") ] #> + fold (add_word_fun if_fixed_args) [ + (@{term "less :: 'a::len word => _"}, "bvult"), + (@{term "less_eq :: 'a::len word => _"}, "bvule"), + (@{term word_sless}, "bvslt"), + (@{term word_sle}, "bvsle") ] + +end + + +(* setup *) + +val _ = + Theory.setup + (Context.theory_map + (Old_SMTLIB_Interface.add_logic (20, smtlib_logic) #> setup_builtins)) + +end diff --git a/src/main/Old_SMT/old_smtlib_interface.ML b/src/main/Old_SMT/old_smtlib_interface.ML new file mode 100644 index 0000000..dc00faa --- /dev/null +++ b/src/main/Old_SMT/old_smtlib_interface.ML @@ -0,0 +1,161 @@ +(* Title: HOL/Library/Old_SMT/old_smtlib_interface.ML + Author: Sascha Boehme, TU Muenchen + +Interface to SMT solvers based on the SMT-LIB format. +*) + +signature OLD_SMTLIB_INTERFACE = +sig + val smtlibC: Old_SMT_Utils.class + val add_logic: int * (term list -> string option) -> Context.generic -> + Context.generic + val translate_config: Proof.context -> Old_SMT_Translate.config + val setup: theory -> theory +end + +structure Old_SMTLIB_Interface: OLD_SMTLIB_INTERFACE = +struct + + +val smtlibC = ["smtlib"] + + +(* builtins *) + +local + fun int_num _ i = SOME (string_of_int i) + + fun is_linear [t] = Old_SMT_Utils.is_number t + | is_linear [t, u] = Old_SMT_Utils.is_number t orelse Old_SMT_Utils.is_number u + | is_linear _ = false + + fun times _ _ ts = + let val mk = Term.list_comb o pair @{const times (int)} + in if is_linear ts then SOME ("*", 2, ts, mk) else NONE end +in + +val setup_builtins = + Old_SMT_Builtin.add_builtin_typ smtlibC (@{typ int}, K (SOME "Int"), int_num) #> + fold (Old_SMT_Builtin.add_builtin_fun' smtlibC) [ + (@{const True}, "true"), + (@{const False}, "false"), + (@{const Not}, "not"), + (@{const HOL.conj}, "and"), + (@{const HOL.disj}, "or"), + (@{const HOL.implies}, "implies"), + (@{const HOL.eq (bool)}, "iff"), + (@{const HOL.eq ('a)}, "="), + (@{const If (bool)}, "if_then_else"), + (@{const If ('a)}, "ite"), + (@{const less (int)}, "<"), + (@{const less_eq (int)}, "<="), + (@{const uminus (int)}, "~"), + (@{const plus (int)}, "+"), + (@{const minus (int)}, "-") ] #> + Old_SMT_Builtin.add_builtin_fun smtlibC + (Term.dest_Const @{const times (int)}, times) + +end + + +(* serialization *) + +(** header **) + +fun fst_int_ord ((i1, _), (i2, _)) = int_ord (i1, i2) + +structure Logics = Generic_Data +( + type T = (int * (term list -> string option)) list + val empty = [] + val extend = I + fun merge data = Ord_List.merge fst_int_ord data +) + +fun add_logic pf = Logics.map (Ord_List.insert fst_int_ord pf) + +fun choose_logic ctxt ts = + let + fun choose [] = "AUFLIA" + | choose ((_, f) :: fs) = (case f ts of SOME s => s | NONE => choose fs) + in [":logic " ^ choose (Logics.get (Context.Proof ctxt))] end + + +(** serialization **) + +val add = Buffer.add +fun sep f = add " " #> f +fun enclose l r f = sep (add l #> f #> add r) +val par = enclose "(" ")" +fun app n f = (fn [] => sep (add n) | xs => par (add n #> fold f xs)) +fun line f = f #> add "\n" + +fun var i = add "?v" #> add (string_of_int i) + +fun sterm l (Old_SMT_Translate.SVar i) = sep (var (l - i - 1)) + | sterm l (Old_SMT_Translate.SApp (n, ts)) = app n (sterm l) ts + | sterm _ (Old_SMT_Translate.SLet _) = + raise Fail "SMT-LIB: unsupported let expression" + | sterm l (Old_SMT_Translate.SQua (q, ss, ps, w, t)) = + let + fun quant Old_SMT_Translate.SForall = add "forall" + | quant Old_SMT_Translate.SExists = add "exists" + val vs = map_index (apfst (Integer.add l)) ss + fun var_decl (i, s) = par (var i #> sep (add s)) + val sub = sterm (l + length ss) + fun pat kind ts = sep (add kind #> enclose "{" " }" (fold sub ts)) + fun pats (Old_SMT_Translate.SPat ts) = pat ":pat" ts + | pats (Old_SMT_Translate.SNoPat ts) = pat ":nopat" ts + fun weight NONE = I + | weight (SOME i) = + sep (add ":weight { " #> add (string_of_int i) #> add " }") + in + par (quant q #> fold var_decl vs #> sub t #> fold pats ps #> weight w) + end + +fun ssort sorts = sort fast_string_ord sorts +fun fsort funcs = sort (prod_ord fast_string_ord (K EQUAL)) funcs + +fun sdatatypes decls = + let + fun con (n, []) = sep (add n) + | con (n, sels) = par (add n #> + fold (fn (n, s) => par (add n #> sep (add s))) sels) + fun dtyp (n, decl) = add n #> fold con decl + in line (add ":datatypes " #> par (fold (par o dtyp) decls)) end + +fun serialize comments {header, sorts, dtyps, funcs} ts = + Buffer.empty + |> line (add "(benchmark Isabelle") + |> line (add ":status unknown") + |> fold (line o add) header + |> length sorts > 0 ? + line (add ":extrasorts" #> par (fold (sep o add) (ssort sorts))) + |> fold sdatatypes dtyps + |> length funcs > 0 ? ( + line (add ":extrafuns" #> add " (") #> + fold (fn (f, (ss, s)) => + line (sep (app f (sep o add) (ss @ [s])))) (fsort funcs) #> + line (add ")")) + |> fold (fn t => line (add ":assumption" #> sterm 0 t)) ts + |> line (add ":formula true)") + |> fold (fn str => line (add "; " #> add str)) comments + |> Buffer.content + + +(* interface *) + +fun translate_config ctxt = { + prefixes = { + sort_prefix = "S", + func_prefix = "f"}, + header = choose_logic ctxt, + is_fol = true, + has_datatypes = false, + serialize = serialize} + +val setup = Context.theory_map ( + setup_builtins #> + Old_SMT_Translate.add_config (smtlibC, translate_config)) + +end diff --git a/src/main/Old_SMT/old_z3_interface.ML b/src/main/Old_SMT/old_z3_interface.ML new file mode 100644 index 0000000..ec9f3d6 --- /dev/null +++ b/src/main/Old_SMT/old_z3_interface.ML @@ -0,0 +1,239 @@ +(* Title: HOL/Library/Old_SMT/old_z3_interface.ML + Author: Sascha Boehme, TU Muenchen + +Interface to Z3 based on a relaxed version of SMT-LIB. +*) + +signature OLD_Z3_INTERFACE = +sig + val smtlib_z3C: Old_SMT_Utils.class + val setup: theory -> theory + + datatype sym = Sym of string * sym list + type mk_builtins = { + mk_builtin_typ: sym -> typ option, + mk_builtin_num: theory -> int -> typ -> cterm option, + mk_builtin_fun: theory -> sym -> cterm list -> cterm option } + val add_mk_builtins: mk_builtins -> Context.generic -> Context.generic + val mk_builtin_typ: Proof.context -> sym -> typ option + val mk_builtin_num: Proof.context -> int -> typ -> cterm option + val mk_builtin_fun: Proof.context -> sym -> cterm list -> cterm option + + val is_builtin_theory_term: Proof.context -> term -> bool +end + +structure Old_Z3_Interface: OLD_Z3_INTERFACE = +struct + +val smtlib_z3C = Old_SMTLIB_Interface.smtlibC @ ["z3"] + + + +(* interface *) + +local + fun translate_config ctxt = + let + val {prefixes, header, is_fol, serialize, ...} = + Old_SMTLIB_Interface.translate_config ctxt + in + {prefixes=prefixes, header=header, is_fol=is_fol, serialize=serialize, + has_datatypes=true} + end + + fun is_div_mod @{const divide (int)} = true + | is_div_mod @{const modulo (int)} = true + | is_div_mod _ = false + + val div_by_z3div = @{lemma + "ALL k l. k div l = ( + if k = 0 | l = 0 then 0 + else if (0 < k & 0 < l) | (k < 0 & 0 < l) then z3div k l + else z3div (-k) (-l))" + by (simp add: z3div_def)} + + val mod_by_z3mod = @{lemma + "ALL k l. k mod l = ( + if l = 0 then k + else if k = 0 then 0 + else if (0 < k & 0 < l) | (k < 0 & 0 < l) then z3mod k l + else - z3mod (-k) (-l))" + by (simp add: z3mod_def)} + + val have_int_div_mod = + exists (Term.exists_subterm is_div_mod o Thm.prop_of) + + fun add_div_mod _ (thms, extra_thms) = + if have_int_div_mod thms orelse have_int_div_mod extra_thms then + (thms, div_by_z3div :: mod_by_z3mod :: extra_thms) + else (thms, extra_thms) + + val setup_builtins = + Old_SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const times (int)}, "*") #> + Old_SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const z3div}, "div") #> + Old_SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const z3mod}, "mod") +in + +val setup = Context.theory_map ( + setup_builtins #> + Old_SMT_Normalize.add_extra_norm (smtlib_z3C, add_div_mod) #> + Old_SMT_Translate.add_config (smtlib_z3C, translate_config)) + +end + + + +(* constructors *) + +datatype sym = Sym of string * sym list + + +(** additional constructors **) + +type mk_builtins = { + mk_builtin_typ: sym -> typ option, + mk_builtin_num: theory -> int -> typ -> cterm option, + mk_builtin_fun: theory -> sym -> cterm list -> cterm option } + +fun chained _ [] = NONE + | chained f (b :: bs) = (case f b of SOME y => SOME y | NONE => chained f bs) + +fun chained_mk_builtin_typ bs sym = + chained (fn {mk_builtin_typ=mk, ...} : mk_builtins => mk sym) bs + +fun chained_mk_builtin_num ctxt bs i T = + let val thy = Proof_Context.theory_of ctxt + in chained (fn {mk_builtin_num=mk, ...} : mk_builtins => mk thy i T) bs end + +fun chained_mk_builtin_fun ctxt bs s cts = + let val thy = Proof_Context.theory_of ctxt + in chained (fn {mk_builtin_fun=mk, ...} : mk_builtins => mk thy s cts) bs end + +fun fst_int_ord ((i1, _), (i2, _)) = int_ord (i1, i2) + +structure Mk_Builtins = Generic_Data +( + type T = (int * mk_builtins) list + val empty = [] + val extend = I + fun merge data = Ord_List.merge fst_int_ord data +) + +fun add_mk_builtins mk = + Mk_Builtins.map (Ord_List.insert fst_int_ord (serial (), mk)) + +fun get_mk_builtins ctxt = map snd (Mk_Builtins.get (Context.Proof ctxt)) + + +(** basic and additional constructors **) + +fun mk_builtin_typ _ (Sym ("Bool", _)) = SOME @{typ bool} + | mk_builtin_typ _ (Sym ("Int", _)) = SOME @{typ int} + | mk_builtin_typ _ (Sym ("bool", _)) = SOME @{typ bool} (*FIXME: legacy*) + | mk_builtin_typ _ (Sym ("int", _)) = SOME @{typ int} (*FIXME: legacy*) + | mk_builtin_typ ctxt sym = chained_mk_builtin_typ (get_mk_builtins ctxt) sym + +fun mk_builtin_num _ i @{typ int} = SOME (Numeral.mk_cnumber @{ctyp int} i) + | mk_builtin_num ctxt i T = + chained_mk_builtin_num ctxt (get_mk_builtins ctxt) i T + +val mk_true = Thm.cterm_of @{context} (@{const Not} $ @{const False}) +val mk_false = Thm.cterm_of @{context} @{const False} +val mk_not = Thm.apply (Thm.cterm_of @{context} @{const Not}) +val mk_implies = Thm.mk_binop (Thm.cterm_of @{context} @{const HOL.implies}) +val mk_iff = Thm.mk_binop (Thm.cterm_of @{context} @{const HOL.eq (bool)}) +val conj = Thm.cterm_of @{context} @{const HOL.conj} +val disj = Thm.cterm_of @{context} @{const HOL.disj} + +fun mk_nary _ cu [] = cu + | mk_nary ct _ cts = uncurry (fold_rev (Thm.mk_binop ct)) (split_last cts) + +val eq = Old_SMT_Utils.mk_const_pat @{theory} @{const_name HOL.eq} Old_SMT_Utils.destT1 +fun mk_eq ct cu = Thm.mk_binop (Old_SMT_Utils.instT' ct eq) ct cu + +val if_term = + Old_SMT_Utils.mk_const_pat @{theory} @{const_name If} + (Old_SMT_Utils.destT1 o Old_SMT_Utils.destT2) +fun mk_if cc ct cu = + Thm.mk_binop (Thm.apply (Old_SMT_Utils.instT' ct if_term) cc) ct cu + +val nil_term = + Old_SMT_Utils.mk_const_pat @{theory} @{const_name Nil} Old_SMT_Utils.destT1 +val cons_term = + Old_SMT_Utils.mk_const_pat @{theory} @{const_name Cons} Old_SMT_Utils.destT1 +fun mk_list cT cts = + fold_rev (Thm.mk_binop (Old_SMT_Utils.instT cT cons_term)) cts + (Old_SMT_Utils.instT cT nil_term) + +val distinct = Old_SMT_Utils.mk_const_pat @{theory} @{const_name distinct} + (Old_SMT_Utils.destT1 o Old_SMT_Utils.destT1) +fun mk_distinct [] = mk_true + | mk_distinct (cts as (ct :: _)) = + Thm.apply (Old_SMT_Utils.instT' ct distinct) + (mk_list (Thm.ctyp_of_cterm ct) cts) + +val access = + Old_SMT_Utils.mk_const_pat @{theory} @{const_name fun_app} Old_SMT_Utils.destT1 +fun mk_access array = Thm.apply (Old_SMT_Utils.instT' array access) array + +val update = Old_SMT_Utils.mk_const_pat @{theory} @{const_name fun_upd} + (Thm.dest_ctyp o Old_SMT_Utils.destT1) +fun mk_update array index value = + let val cTs = Thm.dest_ctyp (Thm.ctyp_of_cterm array) + in + Thm.apply (Thm.mk_binop (Old_SMT_Utils.instTs cTs update) array index) value + end + +val mk_uminus = Thm.apply (Thm.cterm_of @{context} @{const uminus (int)}) +val add = Thm.cterm_of @{context} @{const plus (int)} +val int0 = Numeral.mk_cnumber @{ctyp int} 0 +val mk_sub = Thm.mk_binop (Thm.cterm_of @{context} @{const minus (int)}) +val mk_mul = Thm.mk_binop (Thm.cterm_of @{context} @{const times (int)}) +val mk_div = Thm.mk_binop (Thm.cterm_of @{context} @{const z3div}) +val mk_mod = Thm.mk_binop (Thm.cterm_of @{context} @{const z3mod}) +val mk_lt = Thm.mk_binop (Thm.cterm_of @{context} @{const less (int)}) +val mk_le = Thm.mk_binop (Thm.cterm_of @{context} @{const less_eq (int)}) + +fun mk_builtin_fun ctxt sym cts = + (case (sym, cts) of + (Sym ("true", _), []) => SOME mk_true + | (Sym ("false", _), []) => SOME mk_false + | (Sym ("not", _), [ct]) => SOME (mk_not ct) + | (Sym ("and", _), _) => SOME (mk_nary conj mk_true cts) + | (Sym ("or", _), _) => SOME (mk_nary disj mk_false cts) + | (Sym ("implies", _), [ct, cu]) => SOME (mk_implies ct cu) + | (Sym ("iff", _), [ct, cu]) => SOME (mk_iff ct cu) + | (Sym ("~", _), [ct, cu]) => SOME (mk_iff ct cu) + | (Sym ("xor", _), [ct, cu]) => SOME (mk_not (mk_iff ct cu)) + | (Sym ("if", _), [ct1, ct2, ct3]) => SOME (mk_if ct1 ct2 ct3) + | (Sym ("ite", _), [ct1, ct2, ct3]) => SOME (mk_if ct1 ct2 ct3) (* FIXME: remove *) + | (Sym ("=", _), [ct, cu]) => SOME (mk_eq ct cu) + | (Sym ("distinct", _), _) => SOME (mk_distinct cts) + | (Sym ("select", _), [ca, ck]) => SOME (Thm.apply (mk_access ca) ck) + | (Sym ("store", _), [ca, ck, cv]) => SOME (mk_update ca ck cv) + | _ => + (case (sym, try (Thm.typ_of_cterm o hd) cts, cts) of + (Sym ("+", _), SOME @{typ int}, _) => SOME (mk_nary add int0 cts) + | (Sym ("-", _), SOME @{typ int}, [ct]) => SOME (mk_uminus ct) + | (Sym ("-", _), SOME @{typ int}, [ct, cu]) => SOME (mk_sub ct cu) + | (Sym ("*", _), SOME @{typ int}, [ct, cu]) => SOME (mk_mul ct cu) + | (Sym ("div", _), SOME @{typ int}, [ct, cu]) => SOME (mk_div ct cu) + | (Sym ("mod", _), SOME @{typ int}, [ct, cu]) => SOME (mk_mod ct cu) + | (Sym ("<", _), SOME @{typ int}, [ct, cu]) => SOME (mk_lt ct cu) + | (Sym ("<=", _), SOME @{typ int}, [ct, cu]) => SOME (mk_le ct cu) + | (Sym (">", _), SOME @{typ int}, [ct, cu]) => SOME (mk_lt cu ct) + | (Sym (">=", _), SOME @{typ int}, [ct, cu]) => SOME (mk_le cu ct) + | _ => chained_mk_builtin_fun ctxt (get_mk_builtins ctxt) sym cts)) + + + +(* abstraction *) + +fun is_builtin_theory_term ctxt t = + if Old_SMT_Builtin.is_builtin_num ctxt t then true + else + (case Term.strip_comb t of + (Const c, ts) => Old_SMT_Builtin.is_builtin_fun ctxt c ts + | _ => false) + +end diff --git a/src/main/Old_SMT/old_z3_model.ML b/src/main/Old_SMT/old_z3_model.ML new file mode 100644 index 0000000..b61f104 --- /dev/null +++ b/src/main/Old_SMT/old_z3_model.ML @@ -0,0 +1,337 @@ +(* Title: HOL/Library/Old_SMT/old_z3_model.ML + Author: Sascha Boehme and Philipp Meyer, TU Muenchen + +Parser for counterexamples generated by Z3. +*) + +signature OLD_Z3_MODEL = +sig + val parse_counterex: Proof.context -> Old_SMT_Translate.recon -> string list -> + term list * term list +end + +structure Old_Z3_Model: OLD_Z3_MODEL = +struct + + +(* counterexample expressions *) + +datatype expr = True | False | Number of int * int option | Value of int | + Array of array | App of string * expr list +and array = Fresh of expr | Store of (array * expr) * expr + + +(* parsing *) + +val space = Scan.many Symbol.is_ascii_blank +fun spaced p = p --| space +fun in_parens p = spaced (Scan.$$ "(") |-- p --| spaced (Scan.$$ ")") +fun in_braces p = spaced (Scan.$$ "{") |-- p --| spaced (Scan.$$ "}") + +val digit = (fn + "0" => SOME 0 | "1" => SOME 1 | "2" => SOME 2 | "3" => SOME 3 | + "4" => SOME 4 | "5" => SOME 5 | "6" => SOME 6 | "7" => SOME 7 | + "8" => SOME 8 | "9" => SOME 9 | _ => NONE) + +val nat_num = spaced (Scan.repeat1 (Scan.some digit) >> + (fn ds => fold (fn d => fn i => i * 10 + d) ds 0)) +val int_num = spaced (Scan.optional ($$ "-" >> K (fn i => ~i)) I :|-- + (fn sign => nat_num >> sign)) + +val is_char = Symbol.is_ascii_letter orf Symbol.is_ascii_digit orf + member (op =) (raw_explode "_+*-/%~=<>$&|?!.@^#") +val name = spaced (Scan.many1 is_char >> implode) + +fun $$$ s = spaced (Scan.this_string s) + +fun array_expr st = st |> in_parens ( + $$$ "const" |-- expr >> Fresh || + $$$ "store" |-- array_expr -- expr -- expr >> Store) + +and expr st = st |> ( + $$$ "true" >> K True || + $$$ "false" >> K False || + int_num -- Scan.option ($$$ "/" |-- int_num) >> Number || + $$$ "val!" |-- nat_num >> Value || + name >> (App o rpair []) || + array_expr >> Array || + in_parens (name -- Scan.repeat1 expr) >> App) + +fun args st = ($$$ "->" >> K [] || expr ::: args) st +val args_case = args -- expr +val else_case = $$$ "else" -- $$$ "->" |-- expr >> pair ([] : expr list) + +val func = + let fun cases st = (else_case >> single || args_case ::: cases) st + in in_braces cases end + +val cex = space |-- + Scan.repeat (name --| $$$ "->" -- (func || expr >> (single o pair []))) + +fun resolve terms ((n, k), cases) = + (case Symtab.lookup terms n of + NONE => NONE + | SOME t => SOME ((t, k), cases)) + +fun annotate _ (_, []) = NONE + | annotate terms (n, [([], c)]) = resolve terms ((n, 0), (c, [])) + | annotate _ (_, [_]) = NONE + | annotate terms (n, cases as (args, _) :: _) = + let val (cases', (_, else_case)) = split_last cases + in resolve terms ((n, length args), (else_case, cases')) end + +fun read_cex terms ls = + maps (cons "\n" o raw_explode) ls + |> try (fst o Scan.finite Symbol.stopper cex) + |> the_default [] + |> map_filter (annotate terms) + + +(* translation into terms *) + +fun max_value vs = + let + fun max_val_expr (Value i) = Integer.max i + | max_val_expr (App (_, es)) = fold max_val_expr es + | max_val_expr (Array a) = max_val_array a + | max_val_expr _ = I + + and max_val_array (Fresh e) = max_val_expr e + | max_val_array (Store ((a, e1), e2)) = + max_val_array a #> max_val_expr e1 #> max_val_expr e2 + + fun max_val (_, (ec, cs)) = + max_val_expr ec #> fold (fn (es, e) => fold max_val_expr (e :: es)) cs + + in fold max_val vs ~1 end + +fun with_context terms f vs = fst (fold_map f vs (terms, max_value vs + 1)) + +fun get_term n T es (cx as (terms, next_val)) = + (case Symtab.lookup terms n of + SOME t => ((t, es), cx) + | NONE => + let val t = Var (("skolem", next_val), T) + in ((t, []), (Symtab.update (n, t) terms, next_val + 1)) end) + +fun trans_expr _ True = pair @{const True} + | trans_expr _ False = pair @{const False} + | trans_expr T (Number (i, NONE)) = pair (HOLogic.mk_number T i) + | trans_expr T (Number (i, SOME j)) = + pair (Const (@{const_name divide}, [T, T] ---> T) $ + HOLogic.mk_number T i $ HOLogic.mk_number T j) + | trans_expr T (Value i) = pair (Var (("value", i), T)) + | trans_expr T (Array a) = trans_array T a + | trans_expr T (App (n, es)) = get_term n T es #-> (fn (t, es') => + let val Ts = fst (Old_SMT_Utils.dest_funT (length es') (Term.fastype_of t)) + in + fold_map (uncurry trans_expr) (Ts ~~ es') #>> Term.list_comb o pair t + end) + +and trans_array T a = + let val (dT, rT) = Term.dest_funT T + in + (case a of + Fresh e => trans_expr rT e #>> (fn t => Abs ("x", dT, t)) + | Store ((a', e1), e2) => + trans_array T a' ##>> trans_expr dT e1 ##>> trans_expr rT e2 #>> + (fn ((m, k), v) => + Const (@{const_name fun_upd}, [T, dT, rT] ---> T) $ m $ k $ v)) + end + +fun trans_pattern T ([], e) = trans_expr T e #>> pair [] + | trans_pattern T (arg :: args, e) = + trans_expr (Term.domain_type T) arg ##>> + trans_pattern (Term.range_type T) (args, e) #>> + (fn (arg', (args', e')) => (arg' :: args', e')) + +fun mk_fun_upd T U = Const (@{const_name fun_upd}, [T --> U, T, U, T] ---> U) + +fun mk_update ([], u) _ = u + | mk_update ([t], u) f = + uncurry mk_fun_upd (Term.dest_funT (Term.fastype_of f)) $ f $ t $ u + | mk_update (t :: ts, u) f = + let + val (dT, rT) = Term.dest_funT (Term.fastype_of f) + val (dT', rT') = Term.dest_funT rT + in + mk_fun_upd dT rT $ f $ t $ + mk_update (ts, u) (absdummy dT' (Const ("_", rT'))) + end + +fun mk_lambda Ts (t, pats) = + fold_rev absdummy Ts t |> fold mk_update pats + +fun translate ((t, k), (e, cs)) = + let + val T = Term.fastype_of t + val (Us, U) = Old_SMT_Utils.dest_funT k (Term.fastype_of t) + + fun mk_full_def u' pats = + pats + |> filter_out (fn (_, u) => u aconv u') + |> HOLogic.mk_eq o pair t o mk_lambda Us o pair u' + + fun mk_eq (us, u) = HOLogic.mk_eq (Term.list_comb (t, us), u) + fun mk_eqs u' [] = [HOLogic.mk_eq (t, u')] + | mk_eqs _ pats = map mk_eq pats + in + trans_expr U e ##>> + (if k = 0 then pair [] else fold_map (trans_pattern T) cs) #>> + (fn (u', pats) => (mk_eqs u' pats, mk_full_def u' pats)) + end + + +(* normalization *) + +fun partition_eqs f = + let + fun part t (xs, ts) = + (case try HOLogic.dest_eq t of + SOME (l, r) => (case f l r of SOME x => (x::xs, ts) | _ => (xs, t::ts)) + | NONE => (xs, t :: ts)) + in (fn ts => fold part ts ([], [])) end + +fun first_eq pred = + let + fun part _ [] = NONE + | part us (t :: ts) = + (case try (pred o HOLogic.dest_eq) t of + SOME (SOME lr) => SOME (lr, fold cons us ts) + | _ => part (t :: us) ts) + in (fn ts => part [] ts) end + +fun replace_vars tab = + let + fun repl v = the_default v (AList.lookup (op aconv) tab v) + fun replace (v as Var _) = repl v + | replace (v as Free _) = repl v + | replace t = t + in map (Term.map_aterms replace) end + +fun remove_int_nat_coercions (eqs, defs) = + let + fun mk_nat_num t i = + (case try HOLogic.dest_number i of + SOME (_, n) => SOME (t, HOLogic.mk_number @{typ nat} n) + | NONE => NONE) + fun nat_of (@{const of_nat (int)} $ (t as Var _)) i = mk_nat_num t i + | nat_of (@{const nat} $ i) (t as Var _) = mk_nat_num t i + | nat_of _ _ = NONE + val (nats, eqs') = partition_eqs nat_of eqs + + fun is_coercion t = + (case try HOLogic.dest_eq t of + SOME (@{const of_nat (int)}, _) => true + | SOME (@{const nat}, _) => true + | _ => false) + in apply2 (replace_vars nats) (eqs', filter_out is_coercion defs) end + +fun unfold_funapp (eqs, defs) = + let + fun unfold_app (Const (@{const_name fun_app}, _) $ f $ t) = f $ t + | unfold_app t = t + fun unfold_eq ((eq as Const (@{const_name HOL.eq}, _)) $ t $ u) = + eq $ unfold_app t $ u + | unfold_eq t = t + + fun is_fun_app t = + (case try HOLogic.dest_eq t of + SOME (Const (@{const_name fun_app}, _), _) => true + | _ => false) + + in (map unfold_eq eqs, filter_out is_fun_app defs) end + +val unfold_eqs = + let + val is_ground = not o Term.exists_subterm Term.is_Var + fun is_non_rec (v, t) = not (Term.exists_subterm (equal v) t) + + fun rewr_var (l as Var _, r) = if is_ground r then SOME (l, r) else NONE + | rewr_var (r, l as Var _) = if is_ground r then SOME (l, r) else NONE + | rewr_var _ = NONE + + fun rewr_free' e = if is_non_rec e then SOME e else NONE + fun rewr_free (e as (Free _, _)) = rewr_free' e + | rewr_free (e as (_, Free _)) = rewr_free' (swap e) + | rewr_free _ = NONE + + fun is_trivial (Const (@{const_name HOL.eq}, _) $ t $ u) = t aconv u + | is_trivial _ = false + + fun replace r = replace_vars [r] #> filter_out is_trivial + + fun unfold_vars (es, ds) = + (case first_eq rewr_var es of + SOME (lr, es') => unfold_vars (apply2 (replace lr) (es', ds)) + | NONE => (es, ds)) + + fun unfold_frees ues (es, ds) = + (case first_eq rewr_free es of + SOME (lr, es') => + apply2 (replace lr) (es', ds) + |> unfold_frees (HOLogic.mk_eq lr :: replace lr ues) + | NONE => (ues @ es, ds)) + + in unfold_vars #> unfold_frees [] end + +fun swap_free ((eq as Const (@{const_name HOL.eq}, _)) $ t $ (u as Free _)) = + eq $ u $ t + | swap_free t = t + +fun frees_for_vars ctxt (eqs, defs) = + let + fun fresh_free i T (cx as (frees, ctxt)) = + (case Inttab.lookup frees i of + SOME t => (t, cx) + | NONE => + let + val (n, ctxt') = yield_singleton Variable.variant_fixes "" ctxt + val t = Free (n, T) + in (t, (Inttab.update (i, t) frees, ctxt')) end) + + fun repl_var (Var ((_, i), T)) = fresh_free i T + | repl_var (t $ u) = repl_var t ##>> repl_var u #>> op $ + | repl_var (Abs (n, T, t)) = repl_var t #>> (fn t' => Abs (n, T, t')) + | repl_var t = pair t + in + (Inttab.empty, ctxt) + |> fold_map repl_var eqs + ||>> fold_map repl_var defs + |> fst + end + + +(* overall procedure *) + +val is_free_constraint = Term.exists_subterm (fn Free _ => true | _ => false) + +fun is_free_def (Const (@{const_name HOL.eq}, _) $ Free _ $ _) = true + | is_free_def _ = false + +fun defined tp = + try (apply2 (fst o HOLogic.dest_eq)) tp + |> the_default false o Option.map (op aconv) + +fun add_free_defs free_cs defs = + let val (free_defs, defs') = List.partition is_free_def defs + in (free_cs @ filter_out (member defined free_cs) free_defs, defs') end + +fun is_const_def (Const (@{const_name HOL.eq}, _) $ Const _ $ _) = true + | is_const_def _ = false + +fun parse_counterex ctxt ({terms, ...} : Old_SMT_Translate.recon) ls = + read_cex terms ls + |> with_context terms translate + |> apfst flat o split_list + |> remove_int_nat_coercions + |> unfold_funapp + |> unfold_eqs + |>> map swap_free + |>> filter is_free_constraint + |-> add_free_defs + |> frees_for_vars ctxt + ||> filter is_const_def + +end + diff --git a/src/main/Old_SMT/old_z3_proof_literals.ML b/src/main/Old_SMT/old_z3_proof_literals.ML new file mode 100644 index 0000000..89ce7d1 --- /dev/null +++ b/src/main/Old_SMT/old_z3_proof_literals.ML @@ -0,0 +1,363 @@ +(* Title: HOL/Library/Old_SMT/old_z3_proof_literals.ML + Author: Sascha Boehme, TU Muenchen + +Proof tools related to conjunctions and disjunctions. +*) + +signature OLD_Z3_PROOF_LITERALS = +sig + (*literal table*) + type littab = thm Termtab.table + val make_littab: thm list -> littab + val insert_lit: thm -> littab -> littab + val delete_lit: thm -> littab -> littab + val lookup_lit: littab -> term -> thm option + val get_first_lit: (term -> bool) -> littab -> thm option + + (*rules*) + val true_thm: thm + val rewrite_true: thm + + (*properties*) + val is_conj: term -> bool + val is_disj: term -> bool + val exists_lit: bool -> (term -> bool) -> term -> bool + val negate: cterm -> cterm + + (*proof tools*) + val explode: bool -> bool -> bool -> term list -> thm -> thm list + val join: bool -> littab -> term -> thm + val prove_conj_disj_eq: cterm -> thm +end + +structure Old_Z3_Proof_Literals: OLD_Z3_PROOF_LITERALS = +struct + + + +(* literal table *) + +type littab = thm Termtab.table + +fun make_littab thms = + fold (Termtab.update o `Old_SMT_Utils.prop_of) thms Termtab.empty + +fun insert_lit thm = Termtab.update (`Old_SMT_Utils.prop_of thm) +fun delete_lit thm = Termtab.delete (Old_SMT_Utils.prop_of thm) +fun lookup_lit lits = Termtab.lookup lits +fun get_first_lit f = + Termtab.get_first (fn (t, thm) => if f t then SOME thm else NONE) + + + +(* rules *) + +val true_thm = @{lemma "~False" by simp} +val rewrite_true = @{lemma "True == ~ False" by simp} + + + +(* properties and term operations *) + +val is_neg = (fn @{const Not} $ _ => true | _ => false) +fun is_neg' f = (fn @{const Not} $ t => f t | _ => false) +val is_dneg = is_neg' is_neg +val is_conj = (fn @{const HOL.conj} $ _ $ _ => true | _ => false) +val is_disj = (fn @{const HOL.disj} $ _ $ _ => true | _ => false) + +fun dest_disj_term' f = (fn + @{const Not} $ (@{const HOL.disj} $ t $ u) => SOME (f t, f u) + | _ => NONE) + +val dest_conj_term = (fn @{const HOL.conj} $ t $ u => SOME (t, u) | _ => NONE) +val dest_disj_term = + dest_disj_term' (fn @{const Not} $ t => t | t => @{const Not} $ t) + +fun exists_lit is_conj P = + let + val dest = if is_conj then dest_conj_term else dest_disj_term + fun exists t = P t orelse + (case dest t of + SOME (t1, t2) => exists t1 orelse exists t2 + | NONE => false) + in exists end + +val negate = Thm.apply (Thm.cterm_of @{context} @{const Not}) + + + +(* proof tools *) + +(** explosion of conjunctions and disjunctions **) + +local + val precomp = Old_Z3_Proof_Tools.precompose2 + + fun destc ct = Thm.dest_binop (Thm.dest_arg ct) + val dest_conj1 = precomp destc @{thm conjunct1} + val dest_conj2 = precomp destc @{thm conjunct2} + fun dest_conj_rules t = + dest_conj_term t |> Option.map (K (dest_conj1, dest_conj2)) + + fun destd f ct = f (Thm.dest_binop (Thm.dest_arg (Thm.dest_arg ct))) + val dn1 = apfst Thm.dest_arg and dn2 = apsnd Thm.dest_arg + val dest_disj1 = precomp (destd I) @{lemma "~(P | Q) ==> ~P" by fast} + val dest_disj2 = precomp (destd dn1) @{lemma "~(~P | Q) ==> P" by fast} + val dest_disj3 = precomp (destd I) @{lemma "~(P | Q) ==> ~Q" by fast} + val dest_disj4 = precomp (destd dn2) @{lemma "~(P | ~Q) ==> Q" by fast} + + fun dest_disj_rules t = + (case dest_disj_term' is_neg t of + SOME (true, true) => SOME (dest_disj2, dest_disj4) + | SOME (true, false) => SOME (dest_disj2, dest_disj3) + | SOME (false, true) => SOME (dest_disj1, dest_disj4) + | SOME (false, false) => SOME (dest_disj1, dest_disj3) + | NONE => NONE) + + fun destn ct = [Thm.dest_arg (Thm.dest_arg (Thm.dest_arg ct))] + val dneg_rule = Old_Z3_Proof_Tools.precompose destn @{thm notnotD} +in + +(* + explode a term into literals and collect all rules to be able to deduce + particular literals afterwards +*) +fun explode_term is_conj = + let + val dest = if is_conj then dest_conj_term else dest_disj_term + val dest_rules = if is_conj then dest_conj_rules else dest_disj_rules + + fun add (t, rs) = Termtab.map_default (t, rs) + (fn rs' => if length rs' < length rs then rs' else rs) + + fun explode1 rules t = + (case dest t of + SOME (t1, t2) => + let val (rule1, rule2) = the (dest_rules t) + in + explode1 (rule1 :: rules) t1 #> + explode1 (rule2 :: rules) t2 #> + add (t, rev rules) + end + | NONE => add (t, rev rules)) + + fun explode0 (@{const Not} $ (@{const Not} $ t)) = + Termtab.make [(t, [dneg_rule])] + | explode0 t = explode1 [] t Termtab.empty + + in explode0 end + +(* + extract a literal by applying previously collected rules +*) +fun extract_lit thm rules = fold Old_Z3_Proof_Tools.compose rules thm + + +(* + explode a theorem into its literals +*) +fun explode is_conj full keep_intermediate stop_lits = + let + val dest_rules = if is_conj then dest_conj_rules else dest_disj_rules + val tab = fold (Termtab.update o rpair ()) stop_lits Termtab.empty + + fun explode1 thm = + if Termtab.defined tab (Old_SMT_Utils.prop_of thm) then cons thm + else + (case dest_rules (Old_SMT_Utils.prop_of thm) of + SOME (rule1, rule2) => + explode2 rule1 thm #> + explode2 rule2 thm #> + keep_intermediate ? cons thm + | NONE => cons thm) + + and explode2 dest_rule thm = + if full orelse + exists_lit is_conj (Termtab.defined tab) (Old_SMT_Utils.prop_of thm) + then explode1 (Old_Z3_Proof_Tools.compose dest_rule thm) + else cons (Old_Z3_Proof_Tools.compose dest_rule thm) + + fun explode0 thm = + if not is_conj andalso is_dneg (Old_SMT_Utils.prop_of thm) + then [Old_Z3_Proof_Tools.compose dneg_rule thm] + else explode1 thm [] + + in explode0 end + +end + + + +(** joining of literals to conjunctions or disjunctions **) + +local + fun on_cprem i f thm = f (Thm.cprem_of thm i) + fun on_cprop f thm = f (Thm.cprop_of thm) + fun precomp2 f g thm = (on_cprem 1 f thm, on_cprem 2 g thm, f, g, thm) + fun comp2 (cv1, cv2, f, g, rule) thm1 thm2 = + Thm.instantiate ([], + [(dest_Var (Thm.term_of cv1), on_cprop f thm1), + (dest_Var (Thm.term_of cv2), on_cprop g thm2)]) rule + |> Old_Z3_Proof_Tools.discharge thm1 |> Old_Z3_Proof_Tools.discharge thm2 + + fun d1 ct = Thm.dest_arg ct and d2 ct = Thm.dest_arg (Thm.dest_arg ct) + + val conj_rule = precomp2 d1 d1 @{thm conjI} + fun comp_conj ((_, thm1), (_, thm2)) = comp2 conj_rule thm1 thm2 + + val disj1 = precomp2 d2 d2 @{lemma "~P ==> ~Q ==> ~(P | Q)" by fast} + val disj2 = precomp2 d2 d1 @{lemma "~P ==> Q ==> ~(P | ~Q)" by fast} + val disj3 = precomp2 d1 d2 @{lemma "P ==> ~Q ==> ~(~P | Q)" by fast} + val disj4 = precomp2 d1 d1 @{lemma "P ==> Q ==> ~(~P | ~Q)" by fast} + + fun comp_disj ((false, thm1), (false, thm2)) = comp2 disj1 thm1 thm2 + | comp_disj ((false, thm1), (true, thm2)) = comp2 disj2 thm1 thm2 + | comp_disj ((true, thm1), (false, thm2)) = comp2 disj3 thm1 thm2 + | comp_disj ((true, thm1), (true, thm2)) = comp2 disj4 thm1 thm2 + + fun dest_conj (@{const HOL.conj} $ t $ u) = ((false, t), (false, u)) + | dest_conj t = raise TERM ("dest_conj", [t]) + + val neg = (fn @{const Not} $ t => (true, t) | t => (false, @{const Not} $ t)) + fun dest_disj (@{const Not} $ (@{const HOL.disj} $ t $ u)) = (neg t, neg u) + | dest_disj t = raise TERM ("dest_disj", [t]) + + val precomp = Old_Z3_Proof_Tools.precompose + val dnegE = precomp (single o d2 o d1) @{thm notnotD} + val dnegI = precomp (single o d1) @{lemma "P ==> ~~P" by fast} + fun as_dneg f t = f (@{const Not} $ (@{const Not} $ t)) + + val precomp2 = Old_Z3_Proof_Tools.precompose2 + fun dni f = apsnd f o Thm.dest_binop o f o d1 + val negIffE = precomp2 (dni d1) @{lemma "~(P = (~Q)) ==> Q = P" by fast} + val negIffI = precomp2 (dni I) @{lemma "P = Q ==> ~(Q = (~P))" by fast} + val iff_const = @{const HOL.eq (bool)} + fun as_negIff f (@{const HOL.eq (bool)} $ t $ u) = + f (@{const Not} $ (iff_const $ u $ (@{const Not} $ t))) + | as_negIff _ _ = NONE +in + +fun join is_conj littab t = + let + val comp = if is_conj then comp_conj else comp_disj + val dest = if is_conj then dest_conj else dest_disj + + val lookup = lookup_lit littab + + fun lookup_rule t = + (case t of + @{const Not} $ (@{const Not} $ t) => + (Old_Z3_Proof_Tools.compose dnegI, lookup t) + | @{const Not} $ (@{const HOL.eq (bool)} $ t $ (@{const Not} $ u)) => + (Old_Z3_Proof_Tools.compose negIffI, lookup (iff_const $ u $ t)) + | @{const Not} $ ((eq as Const (@{const_name HOL.eq}, _)) $ t $ u) => + let fun rewr lit = lit COMP @{thm not_sym} + in (rewr, lookup (@{const Not} $ (eq $ u $ t))) end + | _ => + (case as_dneg lookup t of + NONE => (Old_Z3_Proof_Tools.compose negIffE, as_negIff lookup t) + | x => (Old_Z3_Proof_Tools.compose dnegE, x))) + + fun join1 (s, t) = + (case lookup t of + SOME lit => (s, lit) + | NONE => + (case lookup_rule t of + (rewrite, SOME lit) => (s, rewrite lit) + | (_, NONE) => (s, comp (apply2 join1 (dest t))))) + + in snd (join1 (if is_conj then (false, t) else (true, t))) end + +end + + + +(** proving equality of conjunctions or disjunctions **) + +fun iff_intro thm1 thm2 = thm2 COMP (thm1 COMP @{thm iffI}) + +local + val cp1 = @{lemma "(~P) = (~Q) ==> P = Q" by simp} + val cp2 = @{lemma "(~P) = Q ==> P = (~Q)" by fastforce} + val cp3 = @{lemma "P = (~Q) ==> (~P) = Q" by simp} +in +fun contrapos1 prove (ct, cu) = prove (negate ct, negate cu) COMP cp1 +fun contrapos2 prove (ct, cu) = prove (negate ct, Thm.dest_arg cu) COMP cp2 +fun contrapos3 prove (ct, cu) = prove (Thm.dest_arg ct, negate cu) COMP cp3 +end + + +local + val contra_rule = @{lemma "P ==> ~P ==> False" by (rule notE)} + fun contra_left conj thm = + let + val rules = explode_term conj (Old_SMT_Utils.prop_of thm) + fun contra_lits (t, rs) = + (case t of + @{const Not} $ u => Termtab.lookup rules u |> Option.map (pair rs) + | _ => NONE) + in + (case Termtab.lookup rules @{const False} of + SOME rs => extract_lit thm rs + | NONE => + the (Termtab.get_first contra_lits rules) + |> apply2 (extract_lit thm) + |> (fn (nlit, plit) => nlit COMP (plit COMP contra_rule))) + end + + val falseE_v = dest_Var (Thm.term_of (Thm.dest_arg (Thm.dest_arg (Thm.cprop_of @{thm FalseE})))) + fun contra_right ct = Thm.instantiate ([], [(falseE_v, ct)]) @{thm FalseE} +in +fun contradict conj ct = + iff_intro (Old_Z3_Proof_Tools.under_assumption (contra_left conj) ct) + (contra_right ct) +end + + +local + fun prove_eq l r (cl, cr) = + let + fun explode' is_conj = explode is_conj true (l <> r) [] + fun make_tab is_conj thm = make_littab (true_thm :: explode' is_conj thm) + fun prove is_conj ct tab = join is_conj tab (Thm.term_of ct) + + val thm1 = Old_Z3_Proof_Tools.under_assumption (prove r cr o make_tab l) cl + val thm2 = Old_Z3_Proof_Tools.under_assumption (prove l cl o make_tab r) cr + in iff_intro thm1 thm2 end + + datatype conj_disj = CONJ | DISJ | NCON | NDIS + fun kind_of t = + if is_conj t then SOME CONJ + else if is_disj t then SOME DISJ + else if is_neg' is_conj t then SOME NCON + else if is_neg' is_disj t then SOME NDIS + else NONE +in + +fun prove_conj_disj_eq ct = + let val cp as (cl, cr) = Thm.dest_binop (Thm.dest_arg ct) + in + (case (kind_of (Thm.term_of cl), Thm.term_of cr) of + (SOME CONJ, @{const False}) => contradict true cl + | (SOME DISJ, @{const Not} $ @{const False}) => + contrapos2 (contradict false o fst) cp + | (kl, _) => + (case (kl, kind_of (Thm.term_of cr)) of + (SOME CONJ, SOME CONJ) => prove_eq true true cp + | (SOME CONJ, SOME NDIS) => prove_eq true false cp + | (SOME CONJ, _) => prove_eq true true cp + | (SOME DISJ, SOME DISJ) => contrapos1 (prove_eq false false) cp + | (SOME DISJ, SOME NCON) => contrapos2 (prove_eq false true) cp + | (SOME DISJ, _) => contrapos1 (prove_eq false false) cp + | (SOME NCON, SOME NCON) => contrapos1 (prove_eq true true) cp + | (SOME NCON, SOME DISJ) => contrapos3 (prove_eq true false) cp + | (SOME NCON, NONE) => contrapos3 (prove_eq true false) cp + | (SOME NDIS, SOME NDIS) => prove_eq false false cp + | (SOME NDIS, SOME CONJ) => prove_eq false true cp + | (SOME NDIS, NONE) => prove_eq false true cp + | _ => raise CTERM ("prove_conj_disj_eq", [ct]))) + end + +end + +end diff --git a/src/main/Old_SMT/old_z3_proof_methods.ML b/src/main/Old_SMT/old_z3_proof_methods.ML new file mode 100644 index 0000000..c27174d --- /dev/null +++ b/src/main/Old_SMT/old_z3_proof_methods.ML @@ -0,0 +1,149 @@ +(* Title: HOL/Library/Old_SMT/old_z3_proof_methods.ML + Author: Sascha Boehme, TU Muenchen + +Proof methods for Z3 proof reconstruction. +*) + +signature OLD_Z3_PROOF_METHODS = +sig + val prove_injectivity: Proof.context -> cterm -> thm + val prove_ite: Proof.context -> cterm -> thm +end + +structure Old_Z3_Proof_Methods: OLD_Z3_PROOF_METHODS = +struct + + +fun apply tac st = + (case Seq.pull (tac 1 st) of + NONE => raise THM ("tactic failed", 1, [st]) + | SOME (st', _) => st') + + + +(* if-then-else *) + +val pull_ite = mk_meta_eq + @{lemma "f (if P then x else y) = (if P then f x else f y)" by simp} + +fun pull_ites_conv ct = + (Conv.rewr_conv pull_ite then_conv + Conv.binop_conv (Conv.try_conv pull_ites_conv)) ct + +fun prove_ite ctxt = + Old_Z3_Proof_Tools.by_tac ctxt ( + CONVERSION (Conv.arg_conv (Conv.arg1_conv pull_ites_conv)) + THEN' resolve_tac ctxt @{thms refl}) + + + +(* injectivity *) + +local + +val B = @{typ bool} +fun mk_univ T = Const (@{const_name top}, HOLogic.mk_setT T) +fun mk_inj_on T U = + Const (@{const_name inj_on}, (T --> U) --> HOLogic.mk_setT T --> B) +fun mk_inv_into T U = + Const (@{const_name inv_into}, [HOLogic.mk_setT T, T --> U, U] ---> T) + +fun mk_inv_of ctxt ct = + let + val (dT, rT) = Term.dest_funT (Thm.typ_of_cterm ct) + val inv = Thm.cterm_of ctxt (mk_inv_into dT rT) + val univ = Thm.cterm_of ctxt (mk_univ dT) + in Thm.mk_binop inv univ ct end + +fun mk_inj_prop ctxt ct = + let + val (dT, rT) = Term.dest_funT (Thm.typ_of_cterm ct) + val inj = Thm.cterm_of ctxt (mk_inj_on dT rT) + val univ = Thm.cterm_of ctxt (mk_univ dT) + in Old_SMT_Utils.mk_cprop (Thm.mk_binop inj ct univ) end + + +val disjE = @{lemma "~P | Q ==> P ==> Q" by fast} + +fun prove_inj_prop ctxt def lhs = + let + val (ct, ctxt') = Old_SMT_Utils.dest_all_cabs (Thm.rhs_of def) ctxt + val rule = disjE OF [Object_Logic.rulify ctxt' (Thm.assume lhs)] + in + Goal.init (mk_inj_prop ctxt' (Thm.dest_arg ct)) + |> apply (resolve_tac ctxt' @{thms injI}) + |> apply (Tactic.solve_tac ctxt' [rule, rule RS @{thm sym}]) + |> Goal.norm_result ctxt' o Goal.finish ctxt' + |> singleton (Variable.export ctxt' ctxt) + end + +fun prove_rhs ctxt def lhs = + Old_Z3_Proof_Tools.by_tac ctxt ( + CONVERSION (Conv.top_sweep_conv (K (Conv.rewr_conv def)) ctxt) + THEN' REPEAT_ALL_NEW (match_tac ctxt @{thms allI}) + THEN' resolve_tac ctxt [@{thm inv_f_f} OF [prove_inj_prop ctxt def lhs]]) + + +fun expand thm ct = + let + val cpat = Thm.dest_arg (Thm.rhs_of thm) + val (cl, cr) = Thm.dest_binop (Thm.dest_arg (Thm.dest_arg1 ct)) + val thm1 = Thm.instantiate (Thm.match (cpat, cl)) thm + val thm2 = Thm.instantiate (Thm.match (cpat, cr)) thm + in Conv.arg_conv (Conv.binop_conv (Conv.rewrs_conv [thm1, thm2])) ct end + +fun prove_lhs ctxt rhs = + let + val eq = Thm.symmetric (mk_meta_eq (Object_Logic.rulify ctxt (Thm.assume rhs))) + val conv = Old_SMT_Utils.binders_conv (K (expand eq)) ctxt + in + Old_Z3_Proof_Tools.by_tac ctxt ( + CONVERSION (Old_SMT_Utils.prop_conv conv) + THEN' Simplifier.simp_tac (put_simpset HOL_ss ctxt)) + end + + +fun mk_inv_def ctxt rhs = + let + val (ct, ctxt') = + Old_SMT_Utils.dest_all_cbinders (Old_SMT_Utils.dest_cprop rhs) ctxt + val (cl, cv) = Thm.dest_binop ct + val (cg, (cargs, cf)) = Drule.strip_comb cl ||> split_last + val cu = fold_rev Thm.lambda cargs (mk_inv_of ctxt' (Thm.lambda cv cf)) + in Thm.assume (Old_SMT_Utils.mk_cequals cg cu) end + +fun prove_inj_eq ctxt ct = + let + val (lhs, rhs) = + apply2 Old_SMT_Utils.mk_cprop (Thm.dest_binop (Old_SMT_Utils.dest_cprop ct)) + val lhs_thm = Thm.implies_intr rhs (prove_lhs ctxt rhs lhs) + val rhs_thm = + Thm.implies_intr lhs (prove_rhs ctxt (mk_inv_def ctxt rhs) lhs rhs) + in lhs_thm COMP (rhs_thm COMP @{thm iffI}) end + + +val swap_eq_thm = mk_meta_eq @{thm eq_commute} +val swap_disj_thm = mk_meta_eq @{thm disj_commute} + +fun swap_conv dest eq = + Old_SMT_Utils.if_true_conv ((op <) o apply2 Term.size_of_term o dest) + (Conv.rewr_conv eq) + +val swap_eq_conv = swap_conv HOLogic.dest_eq swap_eq_thm +val swap_disj_conv = swap_conv Old_SMT_Utils.dest_disj swap_disj_thm + +fun norm_conv ctxt = + swap_eq_conv then_conv + Conv.arg1_conv (Old_SMT_Utils.binders_conv (K swap_disj_conv) ctxt) then_conv + Conv.arg_conv (Old_SMT_Utils.binders_conv (K swap_eq_conv) ctxt) + +in + +fun prove_injectivity ctxt = + Old_Z3_Proof_Tools.by_tac ctxt ( + CONVERSION (Old_SMT_Utils.prop_conv (norm_conv ctxt)) + THEN' CSUBGOAL (uncurry (resolve_tac ctxt o single o prove_inj_eq ctxt))) + +end + +end diff --git a/src/main/Old_SMT/old_z3_proof_parser.ML b/src/main/Old_SMT/old_z3_proof_parser.ML new file mode 100644 index 0000000..aa44b11 --- /dev/null +++ b/src/main/Old_SMT/old_z3_proof_parser.ML @@ -0,0 +1,446 @@ +(* Title: HOL/Library/Old_SMT/old_z3_proof_parser.ML + Author: Sascha Boehme, TU Muenchen + +Parser for Z3 proofs. +*) + +signature OLD_Z3_PROOF_PARSER = +sig + (*proof rules*) + datatype rule = True_Axiom | Asserted | Goal | Modus_Ponens | Reflexivity | + Symmetry | Transitivity | Transitivity_Star | Monotonicity | Quant_Intro | + Distributivity | And_Elim | Not_Or_Elim | Rewrite | Rewrite_Star | + Pull_Quant | Pull_Quant_Star | Push_Quant | Elim_Unused_Vars | + Dest_Eq_Res | Quant_Inst | Hypothesis | Lemma | Unit_Resolution | + Iff_True | Iff_False | Commutativity | Def_Axiom | Intro_Def | Apply_Def | + Iff_Oeq | Nnf_Pos | Nnf_Neg | Nnf_Star | Cnf_Star | Skolemize | + Modus_Ponens_Oeq | Th_Lemma of string list + val string_of_rule: rule -> string + + (*proof parser*) + datatype proof_step = Proof_Step of { + rule: rule, + args: cterm list, + prems: int list, + prop: cterm } + val parse: Proof.context -> typ Symtab.table -> term Symtab.table -> + string list -> + (int * cterm) list * (int * proof_step) list * string list * Proof.context +end + +structure Old_Z3_Proof_Parser: OLD_Z3_PROOF_PARSER = +struct + + +(* proof rules *) + +datatype rule = True_Axiom | Asserted | Goal | Modus_Ponens | Reflexivity | + Symmetry | Transitivity | Transitivity_Star | Monotonicity | Quant_Intro | + Distributivity | And_Elim | Not_Or_Elim | Rewrite | Rewrite_Star | + Pull_Quant | Pull_Quant_Star | Push_Quant | Elim_Unused_Vars | Dest_Eq_Res | + Quant_Inst | Hypothesis | Lemma | Unit_Resolution | Iff_True | Iff_False | + Commutativity | Def_Axiom | Intro_Def | Apply_Def | Iff_Oeq | Nnf_Pos | + Nnf_Neg | Nnf_Star | Cnf_Star | Skolemize | Modus_Ponens_Oeq | + Th_Lemma of string list + +val rule_names = Symtab.make [ + ("true-axiom", True_Axiom), + ("asserted", Asserted), + ("goal", Goal), + ("mp", Modus_Ponens), + ("refl", Reflexivity), + ("symm", Symmetry), + ("trans", Transitivity), + ("trans*", Transitivity_Star), + ("monotonicity", Monotonicity), + ("quant-intro", Quant_Intro), + ("distributivity", Distributivity), + ("and-elim", And_Elim), + ("not-or-elim", Not_Or_Elim), + ("rewrite", Rewrite), + ("rewrite*", Rewrite_Star), + ("pull-quant", Pull_Quant), + ("pull-quant*", Pull_Quant_Star), + ("push-quant", Push_Quant), + ("elim-unused", Elim_Unused_Vars), + ("der", Dest_Eq_Res), + ("quant-inst", Quant_Inst), + ("hypothesis", Hypothesis), + ("lemma", Lemma), + ("unit-resolution", Unit_Resolution), + ("iff-true", Iff_True), + ("iff-false", Iff_False), + ("commutativity", Commutativity), + ("def-axiom", Def_Axiom), + ("intro-def", Intro_Def), + ("apply-def", Apply_Def), + ("iff~", Iff_Oeq), + ("nnf-pos", Nnf_Pos), + ("nnf-neg", Nnf_Neg), + ("nnf*", Nnf_Star), + ("cnf*", Cnf_Star), + ("sk", Skolemize), + ("mp~", Modus_Ponens_Oeq), + ("th-lemma", Th_Lemma [])] + +fun string_of_rule (Th_Lemma args) = space_implode " " ("th-lemma" :: args) + | string_of_rule r = + let fun eq_rule (s, r') = if r = r' then SOME s else NONE + in the (Symtab.get_first eq_rule rule_names) end + + + +(* certified terms and variables *) + +val (var_prefix, decl_prefix) = ("v", "sk") +(* + "decl_prefix" is for skolem constants (represented by free variables), + "var_prefix" is for pseudo-schematic variables (schematic with respect + to the Z3 proof, but represented by free variables). + + Both prefixes must be distinct to avoid name interferences. + More precisely, the naming of pseudo-schematic variables must be + context-independent modulo the current proof context to be able to + use fast inference kernel rules during proof reconstruction. +*) + +fun mk_inst ctxt vars = + let + val max = fold (Integer.max o fst) vars 0 + val ns = fst (Variable.variant_fixes (replicate (max + 1) var_prefix) ctxt) + fun mk (i, v) = + (dest_Var (Thm.term_of v), Thm.cterm_of ctxt (Free (nth ns i, Thm.typ_of_cterm v))) + in map mk vars end + +fun close ctxt (ct, vars) = + let + val inst = mk_inst ctxt vars + val names = fold (Term.add_free_names o Thm.term_of o snd) inst [] + in (Thm.instantiate_cterm ([], inst) ct, names) end + + +fun mk_bound ctxt (i, T) = + let val ct = Thm.cterm_of ctxt (Var ((Name.uu, 0), T)) + in (ct, [(i, ct)]) end + +local + fun mk_quant1 ctxt q T (ct, vars) = + let + val cv = + (case AList.lookup (op =) vars 0 of + SOME cv => cv + | _ => Thm.cterm_of ctxt (Var ((Name.uu, Thm.maxidx_of_cterm ct + 1), T))) + fun dec (i, v) = if i = 0 then NONE else SOME (i-1, v) + val vars' = map_filter dec vars + in (Thm.apply (Old_SMT_Utils.instT' cv q) (Thm.lambda cv ct), vars') end + + fun quant name = + Old_SMT_Utils.mk_const_pat @{theory} name (Old_SMT_Utils.destT1 o Old_SMT_Utils.destT1) + val forall = quant @{const_name All} + val exists = quant @{const_name Ex} +in + +fun mk_quant is_forall ctxt = + fold_rev (mk_quant1 ctxt (if is_forall then forall else exists)) + +end + +local + fun prep (ct, vars) (maxidx, all_vars) = + let + val maxidx' = maxidx + Thm.maxidx_of_cterm ct + 1 + + fun part (i, cv) = + (case AList.lookup (op =) all_vars i of + SOME cu => apfst (if cu aconvc cv then I else cons (cv, cu)) + | NONE => + let val cv' = Thm.incr_indexes_cterm maxidx cv + in apfst (cons (cv, cv')) #> apsnd (cons (i, cv')) end) + + val (inst, vars') = + if null vars then ([], vars) + else fold part vars ([], []) + + in + (Thm.instantiate_cterm ([], map (apfst (dest_Var o Thm.term_of)) inst) ct, + (maxidx', vars' @ all_vars)) + end +in +fun mk_fun f ts = + let val (cts, (_, vars)) = fold_map prep ts (0, []) + in f cts |> Option.map (rpair vars) end +end + + + +(* proof parser *) + +datatype proof_step = Proof_Step of { + rule: rule, + args: cterm list, + prems: int list, + prop: cterm } + + +(** parser context **) + +val not_false = Thm.cterm_of @{context} (@{const Not} $ @{const False}) + +fun make_context ctxt typs terms = + let + val ctxt' = + ctxt + |> Symtab.fold (Variable.declare_typ o snd) typs + |> Symtab.fold (Variable.declare_term o snd) terms + + fun cert @{const True} = not_false + | cert t = Thm.cterm_of ctxt' t + + in (typs, Symtab.map (K cert) terms, Inttab.empty, [], [], ctxt') end + +fun fresh_name n (typs, terms, exprs, steps, vars, ctxt) = + let val (n', ctxt') = yield_singleton Variable.variant_fixes n ctxt + in (n', (typs, terms, exprs, steps, vars, ctxt')) end + +fun context_of (_, _, _, _, _, ctxt) = ctxt + +fun add_decl (n, T) (cx as (_, terms, _, _, _, _)) = + (case Symtab.lookup terms n of + SOME _ => cx + | NONE => cx |> fresh_name (decl_prefix ^ n) + |> (fn (m, (typs, terms, exprs, steps, vars, ctxt)) => + let + val upd = Symtab.update (n, Thm.cterm_of ctxt (Free (m, T))) + in (typs, upd terms, exprs, steps, vars, ctxt) end)) + +fun mk_typ (typs, _, _, _, _, ctxt) (s as Old_Z3_Interface.Sym (n, _)) = + (case Old_Z3_Interface.mk_builtin_typ ctxt s of + SOME T => SOME T + | NONE => Symtab.lookup typs n) + +fun mk_num (_, _, _, _, _, ctxt) (i, T) = + mk_fun (K (Old_Z3_Interface.mk_builtin_num ctxt i T)) [] + +fun mk_app (_, terms, _, _, _, ctxt) (s as Old_Z3_Interface.Sym (n, _), es) = + mk_fun (fn cts => + (case Old_Z3_Interface.mk_builtin_fun ctxt s cts of + SOME ct => SOME ct + | NONE => + Symtab.lookup terms n |> Option.map (Drule.list_comb o rpair cts))) es + +fun add_expr k t (typs, terms, exprs, steps, vars, ctxt) = + (typs, terms, Inttab.update (k, t) exprs, steps, vars, ctxt) + +fun lookup_expr (_, _, exprs, _, _, _) = Inttab.lookup exprs + +fun add_proof_step k ((r, args), prop) cx = + let + val (typs, terms, exprs, steps, vars, ctxt) = cx + val (ct, vs) = close ctxt prop + fun part (SOME e, _) (cts, ps) = (close ctxt e :: cts, ps) + | part (NONE, i) (cts, ps) = (cts, i :: ps) + val (args', prems) = fold (part o `(lookup_expr cx)) args ([], []) + val (cts, vss) = split_list args' + val step = Proof_Step {rule=r, args=rev cts, prems=rev prems, + prop = Old_SMT_Utils.mk_cprop ct} + val vars' = fold (union (op =)) (vs :: vss) vars + in (typs, terms, exprs, (k, step) :: steps, vars', ctxt) end + +fun finish (_, _, _, steps, vars, ctxt) = + let + fun coll (p as (k, Proof_Step {prems, rule, prop, ...})) (ars, ps, ids) = + (case rule of + Asserted => ((k, prop) :: ars, ps, ids) + | Goal => ((k, prop) :: ars, ps, ids) + | _ => + if Inttab.defined ids k then + (ars, p :: ps, fold (Inttab.update o rpair ()) prems ids) + else (ars, ps, ids)) + + val (ars, steps', _) = fold coll steps ([], [], Inttab.make [(~1, ())]) + in (ars, steps', vars, ctxt) end + + +(** core parser **) + +fun parse_exn line_no msg = raise Old_SMT_Failure.SMT (Old_SMT_Failure.Other_Failure + ("Z3 proof parser (line " ^ string_of_int line_no ^ "): " ^ msg)) + +fun scan_exn msg ((line_no, _), _) = parse_exn line_no msg + +fun with_info f cx = + (case f ((NONE, 1), cx) of + ((SOME _, _), cx') => cx' + | ((_, line_no), _) => parse_exn line_no "bad proof") + +fun parse_line _ _ (st as ((SOME _, _), _)) = st + | parse_line scan line ((_, line_no), cx) = + let val st = ((line_no, cx), raw_explode line) + in + (case Scan.catch (Scan.finite' Symbol.stopper (Scan.option scan)) st of + (SOME r, ((_, cx'), _)) => ((r, line_no+1), cx') + | (NONE, _) => parse_exn line_no ("bad proof line: " ^ quote line)) + end + +fun with_context f x ((line_no, cx), st) = + let val (y, cx') = f x cx + in (y, ((line_no, cx'), st)) end + + +fun lookup_context f x (st as ((_, cx), _)) = (f cx x, st) + + +(** parser combinators and parsers for basic entities **) + +fun $$ s = Scan.lift (Scan.$$ s) +fun this s = Scan.lift (Scan.this_string s) +val is_blank = Symbol.is_ascii_blank +fun blank st = Scan.lift (Scan.many1 is_blank) st +fun sep scan = blank |-- scan +fun seps scan = Scan.repeat (sep scan) +fun seps1 scan = Scan.repeat1 (sep scan) +fun seps_by scan_sep scan = scan ::: Scan.repeat (scan_sep |-- scan) + +val lpar = "(" and rpar = ")" +val lbra = "[" and rbra = "]" +fun par scan = $$ lpar |-- scan --| $$ rpar +fun bra scan = $$ lbra |-- scan --| $$ rbra + +val digit = (fn + "0" => SOME 0 | "1" => SOME 1 | "2" => SOME 2 | "3" => SOME 3 | + "4" => SOME 4 | "5" => SOME 5 | "6" => SOME 6 | "7" => SOME 7 | + "8" => SOME 8 | "9" => SOME 9 | _ => NONE) + +fun digits st = (Scan.lift (Scan.many1 Symbol.is_ascii_digit) >> implode) st + +fun nat_num st = (Scan.lift (Scan.repeat1 (Scan.some digit)) >> (fn ds => + fold (fn d => fn i => i * 10 + d) ds 0)) st + +fun int_num st = (Scan.optional ($$ "-" >> K (fn i => ~i)) I :|-- + (fn sign => nat_num >> sign)) st + +val is_char = Symbol.is_ascii_letter orf Symbol.is_ascii_digit orf + member (op =) (raw_explode "_+*-/%~=<>$&|?!.@^#") + +fun name st = (Scan.lift (Scan.many1 is_char) >> implode) st + +fun sym st = (name -- + Scan.optional (bra (seps_by ($$ ":") sym)) [] >> Old_Z3_Interface.Sym) st + +fun id st = ($$ "#" |-- nat_num) st + + +(** parsers for various parts of Z3 proofs **) + +fun sort st = Scan.first [ + this "array" |-- bra (sort --| $$ ":" -- sort) >> (op -->), + par (this "->" |-- seps1 sort) >> ((op --->) o split_last), + sym :|-- (fn s as Old_Z3_Interface.Sym (n, _) => lookup_context mk_typ s :|-- (fn + SOME T => Scan.succeed T + | NONE => scan_exn ("unknown sort: " ^ quote n)))] st + +fun bound st = (par (this ":var" |-- sep nat_num -- sep sort) :|-- + lookup_context (mk_bound o context_of)) st + +fun numb (n as (i, _)) = lookup_context mk_num n :|-- (fn + SOME n' => Scan.succeed n' + | NONE => scan_exn ("unknown number: " ^ quote (string_of_int i))) + +fun appl (app as (Old_Z3_Interface.Sym (n, _), _)) = + lookup_context mk_app app :|-- (fn + SOME app' => Scan.succeed app' + | NONE => scan_exn ("unknown function symbol: " ^ quote n)) + +fun bv_size st = (digits >> (fn sz => + Old_Z3_Interface.Sym ("bv", [Old_Z3_Interface.Sym (sz, [])]))) st + +fun bv_number_sort st = (bv_size :|-- lookup_context mk_typ :|-- (fn + SOME cT => Scan.succeed cT + | NONE => scan_exn ("unknown sort: " ^ quote "bv"))) st + +fun bv_number st = + (this "bv" |-- bra (nat_num --| $$ ":" -- bv_number_sort) :|-- numb) st + +fun frac_number st = ( + int_num --| $$ "/" -- int_num --| this "::" -- sort :|-- (fn ((i, j), T) => + numb (i, T) -- numb (j, T) :|-- (fn (n, m) => + appl (Old_Z3_Interface.Sym ("/", []), [n, m])))) st + +fun plain_number st = (int_num --| this "::" -- sort :|-- numb) st + +fun number st = Scan.first [bv_number, frac_number, plain_number] st + +fun constant st = ((sym >> rpair []) :|-- appl) st + +fun expr_id st = (id :|-- (fn i => lookup_context lookup_expr i :|-- (fn + SOME e => Scan.succeed e + | NONE => scan_exn ("unknown term id: " ^ quote (string_of_int i))))) st + +fun arg st = Scan.first [expr_id, number, constant] st + +fun application st = par ((sym -- Scan.repeat1 (sep arg)) :|-- appl) st + +fun variables st = par (this "vars" |-- seps1 (par (name |-- sep sort))) st + +fun pats st = seps (par ((this ":pat" || this ":nopat") |-- seps1 id)) st + +val ctrue = Thm.cterm_of @{context} @{const True} + +fun pattern st = par (this "pattern" |-- Scan.repeat1 (sep arg) >> + (the o mk_fun (K (SOME ctrue)))) st + +fun quant_kind st = st |> ( + this "forall" >> K (mk_quant true o context_of) || + this "exists" >> K (mk_quant false o context_of)) + +fun quantifier st = + (par (quant_kind -- sep variables --| pats -- sep arg) :|-- + lookup_context (fn cx => fn ((mk_q, Ts), body) => mk_q cx Ts body)) st + +fun expr k = + Scan.first [bound, quantifier, pattern, application, number, constant] :|-- + with_context (pair NONE oo add_expr k) + +val rule_arg = id + (* if this is modified, then 'th_lemma_arg' needs reviewing *) + +fun th_lemma_arg st = Scan.unless (sep rule_arg >> K "" || $$ rbra) (sep name) st + +fun rule_name st = ((name >> `(Symtab.lookup rule_names)) :|-- (fn + (SOME (Th_Lemma _), _) => Scan.repeat th_lemma_arg >> Th_Lemma + | (SOME r, _) => Scan.succeed r + | (NONE, n) => scan_exn ("unknown proof rule: " ^ quote n))) st + +fun rule f k = + bra (rule_name -- seps id) --| $$ ":" -- sep arg #-> + with_context (pair (f k) oo add_proof_step k) + +fun decl st = (this "decl" |-- sep name --| sep (this "::") -- sep sort :|-- + with_context (pair NONE oo add_decl)) st + +fun def st = (id --| sep (this ":=")) st + +fun node st = st |> ( + decl || + def :|-- (fn k => sep (expr k) || sep (rule (K NONE) k)) || + rule SOME ~1) + + +(** overall parser **) + +(* + Currently, terms are parsed bottom-up (i.e., along with parsing the proof + text line by line), but proofs are reconstructed top-down (i.e. by an + in-order top-down traversal of the proof tree/graph). The latter approach + was taken because some proof texts comprise irrelevant proof steps which + will thus not be reconstructed. This approach might also be beneficial + for constructing terms, but it would also increase the complexity of the + (otherwise rather modular) code. +*) + +fun parse ctxt typs terms proof_text = + make_context ctxt typs terms + |> with_info (fold (parse_line node) proof_text) + |> finish + +end diff --git a/src/main/Old_SMT/old_z3_proof_reconstruction.ML b/src/main/Old_SMT/old_z3_proof_reconstruction.ML new file mode 100644 index 0000000..e2302cd --- /dev/null +++ b/src/main/Old_SMT/old_z3_proof_reconstruction.ML @@ -0,0 +1,891 @@ +(* Title: HOL/Library/Old_SMT/old_z3_proof_reconstruction.ML + Author: Sascha Boehme, TU Muenchen + +Proof reconstruction for proofs found by Z3. +*) + +signature OLD_Z3_PROOF_RECONSTRUCTION = +sig + val add_z3_rule: thm -> Context.generic -> Context.generic + val reconstruct: Proof.context -> Old_SMT_Translate.recon -> string list -> int list * thm +end + +structure Old_Z3_Proof_Reconstruction: OLD_Z3_PROOF_RECONSTRUCTION = +struct + + +fun z3_exn msg = raise Old_SMT_Failure.SMT (Old_SMT_Failure.Other_Failure + ("Z3 proof reconstruction: " ^ msg)) + + + +(* net of schematic rules *) + +local + val description = "declaration of Z3 proof rules" + + val eq = Thm.eq_thm + + structure Old_Z3_Rules = Generic_Data + ( + type T = thm Net.net + val empty = Net.empty + val extend = I + val merge = Net.merge eq + ) + + fun prep context = + `Thm.prop_of o rewrite_rule (Context.proof_of context) [Old_Z3_Proof_Literals.rewrite_true] + + fun ins thm context = + context |> Old_Z3_Rules.map (fn net => Net.insert_term eq (prep context thm) net handle Net.INSERT => net) + fun rem thm context = + context |> Old_Z3_Rules.map (fn net => Net.delete_term eq (prep context thm) net handle Net.DELETE => net) + + val add = Thm.declaration_attribute ins + val del = Thm.declaration_attribute rem +in + +val add_z3_rule = ins + +fun by_schematic_rule ctxt ct = + the (Old_Z3_Proof_Tools.net_instance (Old_Z3_Rules.get (Context.Proof ctxt)) ct) + +val _ = Theory.setup + (Attrib.setup @{binding old_z3_rule} (Attrib.add_del add del) description #> + Global_Theory.add_thms_dynamic (@{binding old_z3_rule}, Net.content o Old_Z3_Rules.get)) + +end + + + +(* proof tools *) + +fun named ctxt name prover ct = + let val _ = Old_SMT_Config.trace_msg ctxt I ("Z3: trying " ^ name ^ " ...") + in prover ct end + +fun NAMED ctxt name tac i st = + let val _ = Old_SMT_Config.trace_msg ctxt I ("Z3: trying " ^ name ^ " ...") + in tac i st end + +fun pretty_goal ctxt thms t = + [Pretty.block [Pretty.str "proposition: ", Syntax.pretty_term ctxt t]] + |> not (null thms) ? cons (Pretty.big_list "assumptions:" + (map (Thm.pretty_thm ctxt) thms)) + +fun try_apply ctxt thms = + let + fun try_apply_err ct = Pretty.string_of (Pretty.chunks [ + Pretty.big_list ("Z3 found a proof," ^ + " but proof reconstruction failed at the following subgoal:") + (pretty_goal ctxt thms (Thm.term_of ct)), + Pretty.str ("Declaring a rule as [old_z3_rule] might solve this problem.")]) + + fun apply [] ct = error (try_apply_err ct) + | apply (prover :: provers) ct = + (case try prover ct of + SOME thm => (Old_SMT_Config.trace_msg ctxt I "Z3: succeeded"; thm) + | NONE => apply provers ct) + + fun schematic_label full = "schematic rules" |> full ? suffix " (full)" + fun schematic ctxt full ct = + ct + |> full ? fold_rev (curry Drule.mk_implies o Thm.cprop_of) thms + |> named ctxt (schematic_label full) (by_schematic_rule ctxt) + |> fold Thm.elim_implies thms + + in apply o cons (schematic ctxt false) o cons (schematic ctxt true) end + +local + val rewr_if = + @{lemma "(if P then Q1 else Q2) = ((P --> Q1) & (~P --> Q2))" by simp} +in + +fun HOL_fast_tac ctxt = + Classical.fast_tac (put_claset HOL_cs ctxt) + +fun simp_fast_tac ctxt = + Simplifier.simp_tac (put_simpset HOL_ss ctxt addsimps [rewr_if]) + THEN_ALL_NEW HOL_fast_tac ctxt + +end + + + +(* theorems and proofs *) + +(** theorem incarnations **) + +datatype theorem = + Thm of thm | (* theorem without special features *) + MetaEq of thm | (* meta equality "t == s" *) + Literals of thm * Old_Z3_Proof_Literals.littab + (* "P1 & ... & Pn" and table of all literals P1, ..., Pn *) + +fun thm_of (Thm thm) = thm + | thm_of (MetaEq thm) = thm COMP @{thm meta_eq_to_obj_eq} + | thm_of (Literals (thm, _)) = thm + +fun meta_eq_of (MetaEq thm) = thm + | meta_eq_of p = mk_meta_eq (thm_of p) + +fun literals_of (Literals (_, lits)) = lits + | literals_of p = Old_Z3_Proof_Literals.make_littab [thm_of p] + + + +(** core proof rules **) + +(* assumption *) + +local + val remove_trigger = mk_meta_eq @{thm trigger_def} + val remove_weight = mk_meta_eq @{thm weight_def} + val remove_fun_app = mk_meta_eq @{thm fun_app_def} + + fun rewrite_conv _ [] = Conv.all_conv + | rewrite_conv ctxt eqs = Simplifier.full_rewrite (empty_simpset ctxt addsimps eqs) + + val prep_rules = [@{thm Let_def}, remove_trigger, remove_weight, + remove_fun_app, Old_Z3_Proof_Literals.rewrite_true] + + fun rewrite _ [] = I + | rewrite ctxt eqs = Conv.fconv_rule (rewrite_conv ctxt eqs) + + fun lookup_assm assms_net ct = + Old_Z3_Proof_Tools.net_instances assms_net ct + |> map (fn ithm as (_, thm) => (ithm, Thm.cprop_of thm aconvc ct)) +in + +fun add_asserted outer_ctxt rewrite_rules assms asserted ctxt = + let + val eqs = map (rewrite ctxt [Old_Z3_Proof_Literals.rewrite_true]) rewrite_rules + val eqs' = union Thm.eq_thm eqs prep_rules + + val assms_net = + assms + |> map (apsnd (rewrite ctxt eqs')) + |> map (apsnd (Conv.fconv_rule Thm.eta_conversion)) + |> Old_Z3_Proof_Tools.thm_net_of snd + + fun revert_conv ctxt = rewrite_conv ctxt eqs' then_conv Thm.eta_conversion + + fun assume thm ctxt = + let + val ct = Thm.cprem_of thm 1 + val (thm', ctxt') = yield_singleton Assumption.add_assumes ct ctxt + in (Thm.implies_elim thm thm', ctxt') end + + fun add1 idx thm1 ((i, th), exact) ((is, thms), (ctxt, ptab)) = + let + val (thm, ctxt') = + if exact then (Thm.implies_elim thm1 th, ctxt) + else assume thm1 ctxt + val thms' = if exact then thms else th :: thms + in + ((insert (op =) i is, thms'), + (ctxt', Inttab.update (idx, Thm thm) ptab)) + end + + fun add (idx, ct) (cx as ((is, thms), (ctxt, ptab))) = + let + val thm1 = + Thm.trivial ct + |> Conv.fconv_rule (Conv.arg1_conv (revert_conv outer_ctxt)) + val thm2 = singleton (Variable.export ctxt outer_ctxt) thm1 + in + (case lookup_assm assms_net (Thm.cprem_of thm2 1) of + [] => + let val (thm, ctxt') = assume thm1 ctxt + in ((is, thms), (ctxt', Inttab.update (idx, Thm thm) ptab)) end + | ithms => fold (add1 idx thm1) ithms cx) + end + in fold add asserted (([], []), (ctxt, Inttab.empty)) end + +end + + +(* P = Q ==> P ==> Q or P --> Q ==> P ==> Q *) +local + val precomp = Old_Z3_Proof_Tools.precompose2 + val comp = Old_Z3_Proof_Tools.compose + + val meta_iffD1 = @{lemma "P == Q ==> P ==> (Q::bool)" by simp} + val meta_iffD1_c = precomp Thm.dest_binop meta_iffD1 + + val iffD1_c = precomp (Thm.dest_binop o Thm.dest_arg) @{thm iffD1} + val mp_c = precomp (Thm.dest_binop o Thm.dest_arg) @{thm mp} +in +fun mp (MetaEq thm) p = Thm (Thm.implies_elim (comp meta_iffD1_c thm) p) + | mp p_q p = + let + val pq = thm_of p_q + val thm = comp iffD1_c pq handle THM _ => comp mp_c pq + in Thm (Thm.implies_elim thm p) end +end + + +(* and_elim: P1 & ... & Pn ==> Pi *) +(* not_or_elim: ~(P1 | ... | Pn) ==> ~Pi *) +local + fun is_sublit conj t = Old_Z3_Proof_Literals.exists_lit conj (fn u => u aconv t) + + fun derive conj t lits idx ptab = + let + val lit = the (Old_Z3_Proof_Literals.get_first_lit (is_sublit conj t) lits) + val ls = Old_Z3_Proof_Literals.explode conj false false [t] lit + val lits' = fold Old_Z3_Proof_Literals.insert_lit ls + (Old_Z3_Proof_Literals.delete_lit lit lits) + + fun upd thm = Literals (thm_of thm, lits') + val ptab' = Inttab.map_entry idx upd ptab + in (the (Old_Z3_Proof_Literals.lookup_lit lits' t), ptab') end + + fun lit_elim conj (p, idx) ct ptab = + let val lits = literals_of p + in + (case Old_Z3_Proof_Literals.lookup_lit lits (Old_SMT_Utils.term_of ct) of + SOME lit => (Thm lit, ptab) + | NONE => apfst Thm (derive conj (Old_SMT_Utils.term_of ct) lits idx ptab)) + end +in +val and_elim = lit_elim true +val not_or_elim = lit_elim false +end + + +(* P1, ..., Pn |- False ==> |- ~P1 | ... | ~Pn *) +local + fun step lit thm = + Thm.implies_elim (Thm.implies_intr (Thm.cprop_of lit) thm) lit + val explode_disj = Old_Z3_Proof_Literals.explode false false false + fun intro hyps thm th = fold step (explode_disj hyps th) thm + + fun dest_ccontr ct = [Thm.dest_arg (Thm.dest_arg (Thm.dest_arg1 ct))] + val ccontr = Old_Z3_Proof_Tools.precompose dest_ccontr @{thm ccontr} +in +fun lemma thm ct = + let + val cu = Old_Z3_Proof_Literals.negate (Thm.dest_arg ct) + val hyps = map_filter (try HOLogic.dest_Trueprop) (Thm.hyps_of thm) + val th = Old_Z3_Proof_Tools.under_assumption (intro hyps thm) cu + in Thm (Old_Z3_Proof_Tools.compose ccontr th) end +end + + +(* \/{P1, ..., Pn, Q1, ..., Qn}, ~P1, ..., ~Pn ==> \/{Q1, ..., Qn} *) +local + val explode_disj = Old_Z3_Proof_Literals.explode false true false + val join_disj = Old_Z3_Proof_Literals.join false + fun unit thm thms th = + let + val t = @{const Not} $ Old_SMT_Utils.prop_of thm + val ts = map Old_SMT_Utils.prop_of thms + in + join_disj (Old_Z3_Proof_Literals.make_littab (thms @ explode_disj ts th)) t + end + + fun dest_arg2 ct = Thm.dest_arg (Thm.dest_arg ct) + fun dest ct = apply2 dest_arg2 (Thm.dest_binop ct) + val contrapos = + Old_Z3_Proof_Tools.precompose2 dest @{lemma "(~P ==> ~Q) ==> Q ==> P" by fast} +in +fun unit_resolution thm thms ct = + Old_Z3_Proof_Literals.negate (Thm.dest_arg ct) + |> Old_Z3_Proof_Tools.under_assumption (unit thm thms) + |> Thm o Old_Z3_Proof_Tools.discharge thm o Old_Z3_Proof_Tools.compose contrapos +end + + +(* P ==> P == True or P ==> P == False *) +local + val iff1 = @{lemma "P ==> P == (~ False)" by simp} + val iff2 = @{lemma "~P ==> P == False" by simp} +in +fun iff_true thm = MetaEq (thm COMP iff1) +fun iff_false thm = MetaEq (thm COMP iff2) +end + + +(* distributivity of | over & *) +fun distributivity ctxt = Thm o try_apply ctxt [] [ + named ctxt "fast" (Old_Z3_Proof_Tools.by_tac ctxt (HOL_fast_tac ctxt))] + (* FIXME: not very well tested *) + + +(* Tseitin-like axioms *) +local + val disjI1 = @{lemma "(P ==> Q) ==> ~P | Q" by fast} + val disjI2 = @{lemma "(~P ==> Q) ==> P | Q" by fast} + val disjI3 = @{lemma "(~Q ==> P) ==> P | Q" by fast} + val disjI4 = @{lemma "(Q ==> P) ==> P | ~Q" by fast} + + fun prove' conj1 conj2 ct2 thm = + let + val littab = + Old_Z3_Proof_Literals.explode conj1 true (conj1 <> conj2) [] thm + |> cons Old_Z3_Proof_Literals.true_thm + |> Old_Z3_Proof_Literals.make_littab + in Old_Z3_Proof_Literals.join conj2 littab (Thm.term_of ct2) end + + fun prove rule (ct1, conj1) (ct2, conj2) = + Old_Z3_Proof_Tools.under_assumption (prove' conj1 conj2 ct2) ct1 COMP rule + + fun prove_def_axiom ct = + let val (ct1, ct2) = Thm.dest_binop (Thm.dest_arg ct) + in + (case Thm.term_of ct1 of + @{const Not} $ (@{const HOL.conj} $ _ $ _) => + prove disjI1 (Thm.dest_arg ct1, true) (ct2, true) + | @{const HOL.conj} $ _ $ _ => + prove disjI3 (Old_Z3_Proof_Literals.negate ct2, false) (ct1, true) + | @{const Not} $ (@{const HOL.disj} $ _ $ _) => + prove disjI3 (Old_Z3_Proof_Literals.negate ct2, false) (ct1, false) + | @{const HOL.disj} $ _ $ _ => + prove disjI2 (Old_Z3_Proof_Literals.negate ct1, false) (ct2, true) + | Const (@{const_name distinct}, _) $ _ => + let + fun dis_conv cv = Conv.arg_conv (Conv.arg1_conv cv) + val unfold_dis_conv = dis_conv Old_Z3_Proof_Tools.unfold_distinct_conv + fun prv cu = + let val (cu1, cu2) = Thm.dest_binop (Thm.dest_arg cu) + in prove disjI4 (Thm.dest_arg cu2, true) (cu1, true) end + in Old_Z3_Proof_Tools.with_conv unfold_dis_conv prv ct end + | @{const Not} $ (Const (@{const_name distinct}, _) $ _) => + let + fun dis_conv cv = Conv.arg_conv (Conv.arg1_conv (Conv.arg_conv cv)) + val unfold_dis_conv = dis_conv Old_Z3_Proof_Tools.unfold_distinct_conv + fun prv cu = + let val (cu1, cu2) = Thm.dest_binop (Thm.dest_arg cu) + in prove disjI1 (Thm.dest_arg cu1, true) (cu2, true) end + in Old_Z3_Proof_Tools.with_conv unfold_dis_conv prv ct end + | _ => raise CTERM ("prove_def_axiom", [ct])) + end +in +fun def_axiom ctxt = Thm o try_apply ctxt [] [ + named ctxt "conj/disj/distinct" prove_def_axiom, + Old_Z3_Proof_Tools.by_abstraction 0 (true, false) ctxt [] (fn ctxt' => + named ctxt' "simp+fast" (Old_Z3_Proof_Tools.by_tac ctxt (simp_fast_tac ctxt')))] +end + + +(* local definitions *) +local + val intro_rules = [ + @{lemma "n == P ==> (~n | P) & (n | ~P)" by simp}, + @{lemma "n == (if P then s else t) ==> (~P | n = s) & (P | n = t)" + by simp}, + @{lemma "n == P ==> n = P" by (rule meta_eq_to_obj_eq)} ] + + val apply_rules = [ + @{lemma "(~n | P) & (n | ~P) ==> P == n" by (atomize(full)) fast}, + @{lemma "(~P | n = s) & (P | n = t) ==> (if P then s else t) == n" + by (atomize(full)) fastforce} ] + + val inst_rule = Old_Z3_Proof_Tools.match_instantiate Thm.dest_arg + + fun apply_rule ct = + (case get_first (try (inst_rule ct)) intro_rules of + SOME thm => thm + | NONE => raise CTERM ("intro_def", [ct])) +in +fun intro_def ct = Old_Z3_Proof_Tools.make_hyp_def (apply_rule ct) #>> Thm + +fun apply_def thm = + get_first (try (fn rule => MetaEq (thm COMP rule))) apply_rules + |> the_default (Thm thm) +end + + +(* negation normal form *) +local + val quant_rules1 = ([ + @{lemma "(!!x. P x == Q) ==> ALL x. P x == Q" by simp}, + @{lemma "(!!x. P x == Q) ==> EX x. P x == Q" by simp}], [ + @{lemma "(!!x. P x == Q x) ==> ALL x. P x == ALL x. Q x" by simp}, + @{lemma "(!!x. P x == Q x) ==> EX x. P x == EX x. Q x" by simp}]) + + val quant_rules2 = ([ + @{lemma "(!!x. ~P x == Q) ==> ~(ALL x. P x) == Q" by simp}, + @{lemma "(!!x. ~P x == Q) ==> ~(EX x. P x) == Q" by simp}], [ + @{lemma "(!!x. ~P x == Q x) ==> ~(ALL x. P x) == EX x. Q x" by simp}, + @{lemma "(!!x. ~P x == Q x) ==> ~(EX x. P x) == ALL x. Q x" by simp}]) + + fun nnf_quant_tac ctxt thm (qs as (qs1, qs2)) i st = ( + resolve_tac ctxt [thm] ORELSE' + (match_tac ctxt qs1 THEN' nnf_quant_tac ctxt thm qs) ORELSE' + (match_tac ctxt qs2 THEN' nnf_quant_tac ctxt thm qs)) i st + + fun nnf_quant_tac_varified ctxt vars eq = + nnf_quant_tac ctxt (Old_Z3_Proof_Tools.varify vars eq) + + fun nnf_quant ctxt vars qs p ct = + Old_Z3_Proof_Tools.as_meta_eq ct + |> Old_Z3_Proof_Tools.by_tac ctxt (nnf_quant_tac_varified ctxt vars (meta_eq_of p) qs) + + fun prove_nnf ctxt = try_apply ctxt [] [ + named ctxt "conj/disj" Old_Z3_Proof_Literals.prove_conj_disj_eq, + Old_Z3_Proof_Tools.by_abstraction 0 (true, false) ctxt [] (fn ctxt' => + named ctxt' "simp+fast" (Old_Z3_Proof_Tools.by_tac ctxt' (simp_fast_tac ctxt')))] +in +fun nnf ctxt vars ps ct = + (case Old_SMT_Utils.term_of ct of + _ $ (l as Const _ $ Abs _) $ (r as Const _ $ Abs _) => + if l aconv r + then MetaEq (Thm.reflexive (Thm.dest_arg (Thm.dest_arg ct))) + else MetaEq (nnf_quant ctxt vars quant_rules1 (hd ps) ct) + | _ $ (@{const Not} $ (Const _ $ Abs _)) $ (Const _ $ Abs _) => + MetaEq (nnf_quant ctxt vars quant_rules2 (hd ps) ct) + | _ => + let + val nnf_rewr_conv = Conv.arg_conv (Conv.arg_conv + (Old_Z3_Proof_Tools.unfold_eqs ctxt + (map (Thm.symmetric o meta_eq_of) ps))) + in Thm (Old_Z3_Proof_Tools.with_conv nnf_rewr_conv (prove_nnf ctxt) ct) end) +end + + + +(** equality proof rules **) + +(* |- t = t *) +fun refl ct = MetaEq (Thm.reflexive (Thm.dest_arg (Thm.dest_arg ct))) + + +(* s = t ==> t = s *) +local + val symm_rule = @{lemma "s = t ==> t == s" by simp} +in +fun symm (MetaEq thm) = MetaEq (Thm.symmetric thm) + | symm p = MetaEq (thm_of p COMP symm_rule) +end + + +(* s = t ==> t = u ==> s = u *) +local + val trans1 = @{lemma "s == t ==> t = u ==> s == u" by simp} + val trans2 = @{lemma "s = t ==> t == u ==> s == u" by simp} + val trans3 = @{lemma "s = t ==> t = u ==> s == u" by simp} +in +fun trans (MetaEq thm1) (MetaEq thm2) = MetaEq (Thm.transitive thm1 thm2) + | trans (MetaEq thm) q = MetaEq (thm_of q COMP (thm COMP trans1)) + | trans p (MetaEq thm) = MetaEq (thm COMP (thm_of p COMP trans2)) + | trans p q = MetaEq (thm_of q COMP (thm_of p COMP trans3)) +end + + +(* t1 = s1 ==> ... ==> tn = sn ==> f t1 ... tn = f s1 .. sn + (reflexive antecendents are droppped) *) +local + exception MONO + + fun prove_refl (ct, _) = Thm.reflexive ct + fun prove_comb f g cp = + let val ((ct1, ct2), (cu1, cu2)) = apply2 Thm.dest_comb cp + in Thm.combination (f (ct1, cu1)) (g (ct2, cu2)) end + fun prove_arg f = prove_comb prove_refl f + + fun prove f cp = prove_comb (prove f) f cp handle CTERM _ => prove_refl cp + + fun prove_nary is_comb f = + let + fun prove (cp as (ct, _)) = f cp handle MONO => + if is_comb (Thm.term_of ct) + then prove_comb (prove_arg prove) prove cp + else prove_refl cp + in prove end + + fun prove_list f n cp = + if n = 0 then prove_refl cp + else prove_comb (prove_arg f) (prove_list f (n-1)) cp + + fun with_length f (cp as (cl, _)) = + f (length (HOLogic.dest_list (Thm.term_of cl))) cp + + fun prove_distinct f = prove_arg (with_length (prove_list f)) + + fun prove_eq exn lookup cp = + (case lookup (Logic.mk_equals (apply2 Thm.term_of cp)) of + SOME eq => eq + | NONE => if exn then raise MONO else prove_refl cp) + + val prove_exn = prove_eq true + and prove_safe = prove_eq false + + fun mono f (cp as (cl, _)) = + (case Term.head_of (Thm.term_of cl) of + @{const HOL.conj} => prove_nary Old_Z3_Proof_Literals.is_conj (prove_exn f) + | @{const HOL.disj} => prove_nary Old_Z3_Proof_Literals.is_disj (prove_exn f) + | Const (@{const_name distinct}, _) => prove_distinct (prove_safe f) + | _ => prove (prove_safe f)) cp +in +fun monotonicity eqs ct = + let + fun and_symmetric (t, thm) = [(t, thm), (t, Thm.symmetric thm)] + val teqs = maps (and_symmetric o `Thm.prop_of o meta_eq_of) eqs + val lookup = AList.lookup (op aconv) teqs + val cp = Thm.dest_binop (Thm.dest_arg ct) + in MetaEq (prove_exn lookup cp handle MONO => mono lookup cp) end +end + + +(* |- f a b = f b a (where f is equality) *) +local + val rule = @{lemma "a = b == b = a" by (atomize(full)) (rule eq_commute)} +in +fun commutativity ct = + MetaEq (Old_Z3_Proof_Tools.match_instantiate I + (Old_Z3_Proof_Tools.as_meta_eq ct) rule) +end + + + +(** quantifier proof rules **) + +(* P ?x = Q ?x ==> (ALL x. P x) = (ALL x. Q x) + P ?x = Q ?x ==> (EX x. P x) = (EX x. Q x) *) +local + val rules = [ + @{lemma "(!!x. P x == Q x) ==> (ALL x. P x) == (ALL x. Q x)" by simp}, + @{lemma "(!!x. P x == Q x) ==> (EX x. P x) == (EX x. Q x)" by simp}] +in +fun quant_intro ctxt vars p ct = + let + val thm = meta_eq_of p + val rules' = Old_Z3_Proof_Tools.varify vars thm :: rules + val cu = Old_Z3_Proof_Tools.as_meta_eq ct + val tac = REPEAT_ALL_NEW (match_tac ctxt rules') + in MetaEq (Old_Z3_Proof_Tools.by_tac ctxt tac cu) end +end + + +(* |- ((ALL x. P x) | Q) = (ALL x. P x | Q) *) +fun pull_quant ctxt = Thm o try_apply ctxt [] [ + named ctxt "fast" (Old_Z3_Proof_Tools.by_tac ctxt (HOL_fast_tac ctxt))] + (* FIXME: not very well tested *) + + +(* |- (ALL x. P x & Q x) = ((ALL x. P x) & (ALL x. Q x)) *) +fun push_quant ctxt = Thm o try_apply ctxt [] [ + named ctxt "fast" (Old_Z3_Proof_Tools.by_tac ctxt (HOL_fast_tac ctxt))] + (* FIXME: not very well tested *) + + +(* |- (ALL x1 ... xn y1 ... yn. P x1 ... xn) = (ALL x1 ... xn. P x1 ... xn) *) +local + val elim_all = @{lemma "P = Q ==> (ALL x. P) = Q" by fast} + val elim_ex = @{lemma "P = Q ==> (EX x. P) = Q" by fast} + + fun elim_unused_tac ctxt i st = ( + match_tac ctxt [@{thm refl}] + ORELSE' (match_tac ctxt [elim_all, elim_ex] THEN' elim_unused_tac ctxt) + ORELSE' ( + match_tac ctxt [@{thm iff_allI}, @{thm iff_exI}] + THEN' elim_unused_tac ctxt)) i st +in + +fun elim_unused_vars ctxt = Thm o Old_Z3_Proof_Tools.by_tac ctxt (elim_unused_tac ctxt) + +end + + +(* |- (ALL x1 ... xn. ~(x1 = t1 & ... xn = tn) | P x1 ... xn) = P t1 ... tn *) +fun dest_eq_res ctxt = Thm o try_apply ctxt [] [ + named ctxt "fast" (Old_Z3_Proof_Tools.by_tac ctxt (HOL_fast_tac ctxt))] + (* FIXME: not very well tested *) + + +(* |- ~(ALL x1...xn. P x1...xn) | P a1...an *) +local + val rule = @{lemma "~ P x | Q ==> ~(ALL x. P x) | Q" by fast} +in +fun quant_inst ctxt = Thm o Old_Z3_Proof_Tools.by_tac ctxt ( + REPEAT_ALL_NEW (match_tac ctxt [rule]) + THEN' resolve_tac ctxt @{thms excluded_middle}) +end + + +(* |- (EX x. P x) = P c |- ~(ALL x. P x) = ~ P c *) +local + val forall = + Old_SMT_Utils.mk_const_pat @{theory} @{const_name Pure.all} + (Old_SMT_Utils.destT1 o Old_SMT_Utils.destT1) + fun mk_forall cv ct = + Thm.apply (Old_SMT_Utils.instT' cv forall) (Thm.lambda cv ct) + + fun get_vars f mk pred ctxt t = + Term.fold_aterms f t [] + |> map_filter (fn v => + if pred v then SOME (Thm.cterm_of ctxt (mk v)) else NONE) + + fun close vars f ct ctxt = + let + val frees_of = get_vars Term.add_frees Free (member (op =) vars o fst) + val vs = frees_of ctxt (Thm.term_of ct) + val (thm, ctxt') = f (fold_rev mk_forall vs ct) ctxt + val vars_of = get_vars Term.add_vars Var (K true) ctxt' + in + (Thm.instantiate ([], map (dest_Var o Thm.term_of) (vars_of (Thm.prop_of thm)) ~~ vs) thm, + ctxt') + end + + val sk_rules = @{lemma + "c = (SOME x. P x) ==> (EX x. P x) = P c" + "c = (SOME x. ~P x) ==> (~(ALL x. P x)) = (~P c)" + by (metis someI_ex)+} +in + +fun skolemize vars = + apfst Thm oo close vars (yield_singleton Assumption.add_assumes) + +fun discharge_sk_tac ctxt i st = ( + resolve_tac ctxt @{thms trans} i + THEN resolve_tac ctxt sk_rules i + THEN (resolve_tac ctxt @{thms refl} ORELSE' discharge_sk_tac ctxt) (i+1) + THEN resolve_tac ctxt @{thms refl} i) st + +end + + + +(** theory proof rules **) + +(* theory lemmas: linear arithmetic, arrays *) +fun th_lemma ctxt simpset thms = Thm o try_apply ctxt thms [ + Old_Z3_Proof_Tools.by_abstraction 0 (false, true) ctxt thms (fn ctxt' => + Old_Z3_Proof_Tools.by_tac ctxt' ( + NAMED ctxt' "arith" (Arith_Data.arith_tac ctxt') + ORELSE' NAMED ctxt' "simp+arith" ( + Simplifier.asm_full_simp_tac (put_simpset simpset ctxt') + THEN_ALL_NEW Arith_Data.arith_tac ctxt')))] + + +(* rewriting: prove equalities: + * ACI of conjunction/disjunction + * contradiction, excluded middle + * logical rewriting rules (for negation, implication, equivalence, + distinct) + * normal forms for polynoms (integer/real arithmetic) + * quantifier elimination over linear arithmetic + * ... ? **) +local + fun spec_meta_eq_of thm = + (case try (fn th => th RS @{thm spec}) thm of + SOME thm' => spec_meta_eq_of thm' + | NONE => mk_meta_eq thm) + + fun prep (Thm thm) = spec_meta_eq_of thm + | prep (MetaEq thm) = thm + | prep (Literals (thm, _)) = spec_meta_eq_of thm + + fun unfold_conv ctxt ths = + Conv.arg_conv (Conv.binop_conv (Old_Z3_Proof_Tools.unfold_eqs ctxt + (map prep ths))) + + fun with_conv _ [] prv = prv + | with_conv ctxt ths prv = + Old_Z3_Proof_Tools.with_conv (unfold_conv ctxt ths) prv + + val unfold_conv = + Conv.arg_conv (Conv.binop_conv + (Conv.try_conv Old_Z3_Proof_Tools.unfold_distinct_conv)) + val prove_conj_disj_eq = + Old_Z3_Proof_Tools.with_conv unfold_conv Old_Z3_Proof_Literals.prove_conj_disj_eq + + fun declare_hyps ctxt thm = + (thm, snd (Assumption.add_assumes (Thm.chyps_of thm) ctxt)) +in + +val abstraction_depth = 3 + (* + This value was chosen large enough to potentially catch exceptions, + yet small enough to not cause too much harm. The value might be + increased in the future, if reconstructing 'rewrite' fails on problems + that get too much abstracted to be reconstructable. + *) + +fun rewrite simpset ths ct ctxt = + apfst Thm (declare_hyps ctxt (with_conv ctxt ths (try_apply ctxt [] [ + named ctxt "conj/disj/distinct" prove_conj_disj_eq, + named ctxt "pull-ite" Old_Z3_Proof_Methods.prove_ite ctxt, + Old_Z3_Proof_Tools.by_abstraction 0 (true, false) ctxt [] (fn ctxt' => + Old_Z3_Proof_Tools.by_tac ctxt' ( + NAMED ctxt' "simp (logic)" (Simplifier.simp_tac (put_simpset simpset ctxt')) + THEN_ALL_NEW NAMED ctxt' "fast (logic)" (fast_tac ctxt'))), + Old_Z3_Proof_Tools.by_abstraction 0 (false, true) ctxt [] (fn ctxt' => + Old_Z3_Proof_Tools.by_tac ctxt' ( + (resolve_tac ctxt' @{thms iff_allI} ORELSE' K all_tac) + THEN' NAMED ctxt' "simp (theory)" (Simplifier.simp_tac (put_simpset simpset ctxt')) + THEN_ALL_NEW ( + NAMED ctxt' "fast (theory)" (HOL_fast_tac ctxt') + ORELSE' NAMED ctxt' "arith (theory)" (Arith_Data.arith_tac ctxt')))), + Old_Z3_Proof_Tools.by_abstraction 0 (true, true) ctxt [] (fn ctxt' => + Old_Z3_Proof_Tools.by_tac ctxt' ( + (resolve_tac ctxt' @{thms iff_allI} ORELSE' K all_tac) + THEN' NAMED ctxt' "simp (full)" (Simplifier.simp_tac (put_simpset simpset ctxt')) + THEN_ALL_NEW ( + NAMED ctxt' "fast (full)" (HOL_fast_tac ctxt') + ORELSE' NAMED ctxt' "arith (full)" (Arith_Data.arith_tac ctxt')))), + named ctxt "injectivity" (Old_Z3_Proof_Methods.prove_injectivity ctxt), + Old_Z3_Proof_Tools.by_abstraction abstraction_depth (true, true) ctxt [] + (fn ctxt' => + Old_Z3_Proof_Tools.by_tac ctxt' ( + (resolve_tac ctxt' @{thms iff_allI} ORELSE' K all_tac) + THEN' NAMED ctxt' "simp (deepen)" (Simplifier.simp_tac (put_simpset simpset ctxt')) + THEN_ALL_NEW ( + NAMED ctxt' "fast (deepen)" (HOL_fast_tac ctxt') + ORELSE' NAMED ctxt' "arith (deepen)" (Arith_Data.arith_tac + ctxt'))))]) ct)) + +end + + + +(* proof reconstruction *) + +(** tracing and checking **) + +fun trace_before ctxt idx = Old_SMT_Config.trace_msg ctxt (fn r => + "Z3: #" ^ string_of_int idx ^ ": " ^ Old_Z3_Proof_Parser.string_of_rule r) + +fun check_after idx r ps ct (p, (ctxt, _)) = + if not (Config.get ctxt Old_SMT_Config.trace) then () + else + let val thm = thm_of p |> tap (Thm.join_proofs o single) + in + if (Thm.cprop_of thm) aconvc ct then () + else + z3_exn (Pretty.string_of (Pretty.big_list + ("proof step failed: " ^ quote (Old_Z3_Proof_Parser.string_of_rule r) ^ + " (#" ^ string_of_int idx ^ ")") + (pretty_goal ctxt (map (thm_of o fst) ps) (Thm.prop_of thm) @ + [Pretty.block [Pretty.str "expected: ", + Syntax.pretty_term ctxt (Thm.term_of ct)]]))) + end + + +(** overall reconstruction procedure **) + +local + fun not_supported r = raise Fail ("Z3: proof rule not implemented: " ^ + quote (Old_Z3_Proof_Parser.string_of_rule r)) + + fun prove_step simpset vars r ps ct (cxp as (cx, ptab)) = + (case (r, ps) of + (* core rules *) + (Old_Z3_Proof_Parser.True_Axiom, _) => (Thm Old_Z3_Proof_Literals.true_thm, cxp) + | (Old_Z3_Proof_Parser.Asserted, _) => raise Fail "bad assertion" + | (Old_Z3_Proof_Parser.Goal, _) => raise Fail "bad assertion" + | (Old_Z3_Proof_Parser.Modus_Ponens, [(p, _), (q, _)]) => + (mp q (thm_of p), cxp) + | (Old_Z3_Proof_Parser.Modus_Ponens_Oeq, [(p, _), (q, _)]) => + (mp q (thm_of p), cxp) + | (Old_Z3_Proof_Parser.And_Elim, [(p, i)]) => + and_elim (p, i) ct ptab ||> pair cx + | (Old_Z3_Proof_Parser.Not_Or_Elim, [(p, i)]) => + not_or_elim (p, i) ct ptab ||> pair cx + | (Old_Z3_Proof_Parser.Hypothesis, _) => (Thm (Thm.assume ct), cxp) + | (Old_Z3_Proof_Parser.Lemma, [(p, _)]) => (lemma (thm_of p) ct, cxp) + | (Old_Z3_Proof_Parser.Unit_Resolution, (p, _) :: ps) => + (unit_resolution (thm_of p) (map (thm_of o fst) ps) ct, cxp) + | (Old_Z3_Proof_Parser.Iff_True, [(p, _)]) => (iff_true (thm_of p), cxp) + | (Old_Z3_Proof_Parser.Iff_False, [(p, _)]) => (iff_false (thm_of p), cxp) + | (Old_Z3_Proof_Parser.Distributivity, _) => (distributivity cx ct, cxp) + | (Old_Z3_Proof_Parser.Def_Axiom, _) => (def_axiom cx ct, cxp) + | (Old_Z3_Proof_Parser.Intro_Def, _) => intro_def ct cx ||> rpair ptab + | (Old_Z3_Proof_Parser.Apply_Def, [(p, _)]) => (apply_def (thm_of p), cxp) + | (Old_Z3_Proof_Parser.Iff_Oeq, [(p, _)]) => (p, cxp) + | (Old_Z3_Proof_Parser.Nnf_Pos, _) => (nnf cx vars (map fst ps) ct, cxp) + | (Old_Z3_Proof_Parser.Nnf_Neg, _) => (nnf cx vars (map fst ps) ct, cxp) + + (* equality rules *) + | (Old_Z3_Proof_Parser.Reflexivity, _) => (refl ct, cxp) + | (Old_Z3_Proof_Parser.Symmetry, [(p, _)]) => (symm p, cxp) + | (Old_Z3_Proof_Parser.Transitivity, [(p, _), (q, _)]) => (trans p q, cxp) + | (Old_Z3_Proof_Parser.Monotonicity, _) => (monotonicity (map fst ps) ct, cxp) + | (Old_Z3_Proof_Parser.Commutativity, _) => (commutativity ct, cxp) + + (* quantifier rules *) + | (Old_Z3_Proof_Parser.Quant_Intro, [(p, _)]) => (quant_intro cx vars p ct, cxp) + | (Old_Z3_Proof_Parser.Pull_Quant, _) => (pull_quant cx ct, cxp) + | (Old_Z3_Proof_Parser.Push_Quant, _) => (push_quant cx ct, cxp) + | (Old_Z3_Proof_Parser.Elim_Unused_Vars, _) => (elim_unused_vars cx ct, cxp) + | (Old_Z3_Proof_Parser.Dest_Eq_Res, _) => (dest_eq_res cx ct, cxp) + | (Old_Z3_Proof_Parser.Quant_Inst, _) => (quant_inst cx ct, cxp) + | (Old_Z3_Proof_Parser.Skolemize, _) => skolemize vars ct cx ||> rpair ptab + + (* theory rules *) + | (Old_Z3_Proof_Parser.Th_Lemma _, _) => (* FIXME: use arguments *) + (th_lemma cx simpset (map (thm_of o fst) ps) ct, cxp) + | (Old_Z3_Proof_Parser.Rewrite, _) => rewrite simpset [] ct cx ||> rpair ptab + | (Old_Z3_Proof_Parser.Rewrite_Star, ps) => + rewrite simpset (map fst ps) ct cx ||> rpair ptab + + | (Old_Z3_Proof_Parser.Nnf_Star, _) => not_supported r + | (Old_Z3_Proof_Parser.Cnf_Star, _) => not_supported r + | (Old_Z3_Proof_Parser.Transitivity_Star, _) => not_supported r + | (Old_Z3_Proof_Parser.Pull_Quant_Star, _) => not_supported r + + | _ => raise Fail ("Z3: proof rule " ^ + quote (Old_Z3_Proof_Parser.string_of_rule r) ^ + " has an unexpected number of arguments.")) + + fun lookup_proof ptab idx = + (case Inttab.lookup ptab idx of + SOME p => (p, idx) + | NONE => z3_exn ("unknown proof id: " ^ quote (string_of_int idx))) + + fun prove simpset vars (idx, step) (_, cxp as (ctxt, ptab)) = + let + val Old_Z3_Proof_Parser.Proof_Step {rule=r, prems, prop, ...} = step + val ps = map (lookup_proof ptab) prems + val _ = trace_before ctxt idx r + val (thm, (ctxt', ptab')) = + cxp + |> prove_step simpset vars r ps prop + |> tap (check_after idx r ps prop) + in (thm, (ctxt', Inttab.update (idx, thm) ptab')) end + + fun make_discharge_rules rules = rules @ [@{thm allI}, @{thm refl}, + @{thm reflexive}, Old_Z3_Proof_Literals.true_thm] + + fun discharge_assms_tac ctxt rules = + REPEAT (HEADGOAL (resolve_tac ctxt rules ORELSE' SOLVED' (discharge_sk_tac ctxt))) + + fun discharge_assms ctxt rules thm = + if Thm.nprems_of thm = 0 then Goal.norm_result ctxt thm + else + (case Seq.pull (discharge_assms_tac ctxt rules thm) of + SOME (thm', _) => Goal.norm_result ctxt thm' + | NONE => raise THM ("failed to discharge premise", 1, [thm])) + + fun discharge rules outer_ctxt (p, (inner_ctxt, _)) = + thm_of p + |> singleton (Proof_Context.export inner_ctxt outer_ctxt) + |> discharge_assms outer_ctxt (make_discharge_rules rules) +in + +fun reconstruct outer_ctxt recon output = + let + val {context=ctxt, typs, terms, rewrite_rules, assms} = recon + val (asserted, steps, vars, ctxt1) = + Old_Z3_Proof_Parser.parse ctxt typs terms output + + val simpset = + Old_Z3_Proof_Tools.make_simpset ctxt1 (Named_Theorems.get ctxt1 @{named_theorems old_z3_simp}) + + val ((is, rules), cxp as (ctxt2, _)) = + add_asserted outer_ctxt rewrite_rules assms asserted ctxt1 + in + if Config.get ctxt2 Old_SMT_Config.filter_only_facts then (is, @{thm TrueI}) + else + (Thm @{thm TrueI}, cxp) + |> fold (prove simpset vars) steps + |> discharge rules outer_ctxt + |> pair [] + end + +end + +end diff --git a/src/main/Old_SMT/old_z3_proof_tools.ML b/src/main/Old_SMT/old_z3_proof_tools.ML new file mode 100644 index 0000000..8fc65ba --- /dev/null +++ b/src/main/Old_SMT/old_z3_proof_tools.ML @@ -0,0 +1,374 @@ +(* Title: HOL/Library/Old_SMT/old_z3_proof_tools.ML + Author: Sascha Boehme, TU Muenchen + +Helper functions required for Z3 proof reconstruction. +*) + +signature OLD_Z3_PROOF_TOOLS = +sig + (*modifying terms*) + val as_meta_eq: cterm -> cterm + + (*theorem nets*) + val thm_net_of: ('a -> thm) -> 'a list -> 'a Net.net + val net_instances: (int * thm) Net.net -> cterm -> (int * thm) list + val net_instance: thm Net.net -> cterm -> thm option + + (*proof combinators*) + val under_assumption: (thm -> thm) -> cterm -> thm + val with_conv: conv -> (cterm -> thm) -> cterm -> thm + val discharge: thm -> thm -> thm + val varify: string list -> thm -> thm + val unfold_eqs: Proof.context -> thm list -> conv + val match_instantiate: (cterm -> cterm) -> cterm -> thm -> thm + val by_tac: Proof.context -> (int -> tactic) -> cterm -> thm + val make_hyp_def: thm -> Proof.context -> thm * Proof.context + val by_abstraction: int -> bool * bool -> Proof.context -> thm list -> + (Proof.context -> cterm -> thm) -> cterm -> thm + + (*a faster COMP*) + type compose_data = cterm list * (cterm -> cterm list) * thm + val precompose: (cterm -> cterm list) -> thm -> compose_data + val precompose2: (cterm -> cterm * cterm) -> thm -> compose_data + val compose: compose_data -> thm -> thm + + (*unfolding of 'distinct'*) + val unfold_distinct_conv: conv + + (*simpset*) + val add_simproc: Simplifier.simproc -> Context.generic -> Context.generic + val make_simpset: Proof.context -> thm list -> simpset +end + +structure Old_Z3_Proof_Tools: OLD_Z3_PROOF_TOOLS = +struct + + + +(* modifying terms *) + +fun as_meta_eq ct = + uncurry Old_SMT_Utils.mk_cequals (Thm.dest_binop (Old_SMT_Utils.dest_cprop ct)) + + + +(* theorem nets *) + +fun thm_net_of f xthms = + let fun insert xthm = Net.insert_term (K false) (Thm.prop_of (f xthm), xthm) + in fold insert xthms Net.empty end + +fun maybe_instantiate ct thm = + try Thm.first_order_match (Thm.cprop_of thm, ct) + |> Option.map (fn inst => Thm.instantiate inst thm) + +local + fun instances_from_net match f net ct = + let + val lookup = if match then Net.match_term else Net.unify_term + val xthms = lookup net (Thm.term_of ct) + fun select ct = map_filter (f (maybe_instantiate ct)) xthms + fun select' ct = + let val thm = Thm.trivial ct + in map_filter (f (try (fn rule => rule COMP thm))) xthms end + in (case select ct of [] => select' ct | xthms' => xthms') end +in + +fun net_instances net = + instances_from_net false (fn f => fn (i, thm) => Option.map (pair i) (f thm)) + net + +fun net_instance net = try hd o instances_from_net true I net + +end + + + +(* proof combinators *) + +fun under_assumption f ct = + let val ct' = Old_SMT_Utils.mk_cprop ct + in Thm.implies_intr ct' (f (Thm.assume ct')) end + +fun with_conv conv prove ct = + let val eq = Thm.symmetric (conv ct) + in Thm.equal_elim eq (prove (Thm.lhs_of eq)) end + +fun discharge p pq = Thm.implies_elim pq p + +fun varify vars = Drule.generalize ([], vars) + +fun unfold_eqs _ [] = Conv.all_conv + | unfold_eqs ctxt eqs = + Conv.top_sweep_conv (K (Conv.rewrs_conv eqs)) ctxt + +fun match_instantiate f ct thm = + Thm.instantiate (Thm.match (f (Thm.cprop_of thm), ct)) thm + +fun by_tac ctxt tac ct = Goal.norm_result ctxt (Goal.prove_internal ctxt [] ct (K (tac 1))) + +(* + |- c x == t x ==> P (c x) + --------------------------- + c == t |- P (c x) +*) +fun make_hyp_def thm ctxt = + let + val (lhs, rhs) = Thm.dest_binop (Thm.cprem_of thm 1) + val (cf, cvs) = Drule.strip_comb lhs + val eq = Old_SMT_Utils.mk_cequals cf (fold_rev Thm.lambda cvs rhs) + fun apply cv th = + Thm.combination th (Thm.reflexive cv) + |> Conv.fconv_rule (Conv.arg_conv (Thm.beta_conversion false)) + in + yield_singleton Assumption.add_assumes eq ctxt + |>> Thm.implies_elim thm o fold apply cvs + end + + + +(* abstraction *) + +local + +fun abs_context ctxt = (ctxt, Termtab.empty, 1, false) + +fun context_of (ctxt, _, _, _) = ctxt + +fun replace (_, (cv, ct)) = Thm.forall_elim ct o Thm.forall_intr cv + +fun abs_instantiate (_, tab, _, beta_norm) = + fold replace (Termtab.dest tab) #> + beta_norm ? Conv.fconv_rule (Thm.beta_conversion true) + +fun lambda_abstract cvs t = + let + val frees = map Free (Term.add_frees t []) + val cvs' = filter (fn cv => member (op aconv) frees (Thm.term_of cv)) cvs + val vs = map (Term.dest_Free o Thm.term_of) cvs' + in (fold_rev absfree vs t, cvs') end + +fun fresh_abstraction (_, cvs) ct (cx as (ctxt, tab, idx, beta_norm)) = + let val (t, cvs') = lambda_abstract cvs (Thm.term_of ct) + in + (case Termtab.lookup tab t of + SOME (cv, _) => (Drule.list_comb (cv, cvs'), cx) + | NONE => + let + val (n, ctxt') = yield_singleton Variable.variant_fixes "x" ctxt + val cv = Thm.cterm_of ctxt' + (Free (n, map Thm.typ_of_cterm cvs' ---> Thm.typ_of_cterm ct)) + val cu = Drule.list_comb (cv, cvs') + val e = (t, (cv, fold_rev Thm.lambda cvs' ct)) + val beta_norm' = beta_norm orelse not (null cvs') + in (cu, (ctxt', Termtab.update e tab, idx + 1, beta_norm')) end) + end + +fun abs_comb f g dcvs ct = + let val (cf, cu) = Thm.dest_comb ct + in f dcvs cf ##>> g dcvs cu #>> uncurry Thm.apply end + +fun abs_arg f = abs_comb (K pair) f + +fun abs_args f dcvs ct = + (case Thm.term_of ct of + _ $ _ => abs_comb (abs_args f) f dcvs ct + | _ => pair ct) + +fun abs_list f g dcvs ct = + (case Thm.term_of ct of + Const (@{const_name Nil}, _) => pair ct + | Const (@{const_name Cons}, _) $ _ $ _ => + abs_comb (abs_arg f) (abs_list f g) dcvs ct + | _ => g dcvs ct) + +fun abs_abs f (depth, cvs) ct = + let val (cv, cu) = Thm.dest_abs NONE ct + in f (depth, cv :: cvs) cu #>> Thm.lambda cv end + +val is_atomic = + (fn Free _ => true | Var _ => true | Bound _ => true | _ => false) + +fun abstract depth (ext_logic, with_theories) = + let + fun abstr1 cvs ct = abs_arg abstr cvs ct + and abstr2 cvs ct = abs_comb abstr1 abstr cvs ct + and abstr3 cvs ct = abs_comb abstr2 abstr cvs ct + and abstr_abs cvs ct = abs_arg (abs_abs abstr) cvs ct + + and abstr (dcvs as (d, cvs)) ct = + (case Thm.term_of ct of + @{const Trueprop} $ _ => abstr1 dcvs ct + | @{const Pure.imp} $ _ $ _ => abstr2 dcvs ct + | @{const True} => pair ct + | @{const False} => pair ct + | @{const Not} $ _ => abstr1 dcvs ct + | @{const HOL.conj} $ _ $ _ => abstr2 dcvs ct + | @{const HOL.disj} $ _ $ _ => abstr2 dcvs ct + | @{const HOL.implies} $ _ $ _ => abstr2 dcvs ct + | Const (@{const_name HOL.eq}, _) $ _ $ _ => abstr2 dcvs ct + | Const (@{const_name distinct}, _) $ _ => + if ext_logic then abs_arg (abs_list abstr fresh_abstraction) dcvs ct + else fresh_abstraction dcvs ct + | Const (@{const_name If}, _) $ _ $ _ $ _ => + if ext_logic then abstr3 dcvs ct else fresh_abstraction dcvs ct + | Const (@{const_name All}, _) $ _ => + if ext_logic then abstr_abs dcvs ct else fresh_abstraction dcvs ct + | Const (@{const_name Ex}, _) $ _ => + if ext_logic then abstr_abs dcvs ct else fresh_abstraction dcvs ct + | t => (fn cx => + if is_atomic t orelse can HOLogic.dest_number t then (ct, cx) + else if with_theories andalso + Old_Z3_Interface.is_builtin_theory_term (context_of cx) t + then abs_args abstr dcvs ct cx + else if d = 0 then fresh_abstraction dcvs ct cx + else + (case Term.strip_comb t of + (Const _, _) => abs_args abstr (d-1, cvs) ct cx + | (Free _, _) => abs_args abstr (d-1, cvs) ct cx + | _ => fresh_abstraction dcvs ct cx))) + in abstr (depth, []) end + +val cimp = Thm.cterm_of @{context} @{const Pure.imp} + +fun deepen depth f x = + if depth = 0 then f depth x + else (case try (f depth) x of SOME y => y | NONE => deepen (depth - 1) f x) + +fun with_prems depth thms f ct = + fold_rev (Thm.mk_binop cimp o Thm.cprop_of) thms ct + |> deepen depth f + |> fold (fn prem => fn th => Thm.implies_elim th prem) thms + +in + +fun by_abstraction depth mode ctxt thms prove = + with_prems depth thms (fn d => fn ct => + let val (cu, cx) = abstract d mode ct (abs_context ctxt) + in abs_instantiate cx (prove (context_of cx) cu) end) + +end + + + +(* a faster COMP *) + +type compose_data = cterm list * (cterm -> cterm list) * thm + +fun list2 (x, y) = [x, y] + +fun precompose f rule : compose_data = (f (Thm.cprem_of rule 1), f, rule) +fun precompose2 f rule : compose_data = precompose (list2 o f) rule + +fun compose (cvs, f, rule) thm = + discharge thm (Thm.instantiate ([], map (dest_Var o Thm.term_of) cvs ~~ f (Thm.cprop_of thm)) rule) + + + +(* unfolding of 'distinct' *) + +local + val set1 = @{lemma "x ~: set [] == ~False" by simp} + val set2 = @{lemma "x ~: set [x] == False" by simp} + val set3 = @{lemma "x ~: set [y] == x ~= y" by simp} + val set4 = @{lemma "x ~: set (x # ys) == False" by simp} + val set5 = @{lemma "x ~: set (y # ys) == x ~= y & x ~: set ys" by simp} + + fun set_conv ct = + (Conv.rewrs_conv [set1, set2, set3, set4] else_conv + (Conv.rewr_conv set5 then_conv Conv.arg_conv set_conv)) ct + + val dist1 = @{lemma "distinct [] == ~False" by (simp add: distinct_def)} + val dist2 = @{lemma "distinct [x] == ~False" by (simp add: distinct_def)} + val dist3 = @{lemma "distinct (x # xs) == x ~: set xs & distinct xs" + by (simp add: distinct_def)} + + fun binop_conv cv1 cv2 = Conv.combination_conv (Conv.arg_conv cv1) cv2 +in +fun unfold_distinct_conv ct = + (Conv.rewrs_conv [dist1, dist2] else_conv + (Conv.rewr_conv dist3 then_conv binop_conv set_conv unfold_distinct_conv)) ct +end + + + +(* simpset *) + +local + val antisym_le1 = mk_meta_eq @{thm order_class.antisym_conv} + val antisym_le2 = mk_meta_eq @{thm linorder_class.antisym_conv2} + val antisym_less1 = mk_meta_eq @{thm linorder_class.antisym_conv1} + val antisym_less2 = mk_meta_eq @{thm linorder_class.antisym_conv3} + + fun eq_prop t thm = HOLogic.mk_Trueprop t aconv Thm.prop_of thm + fun dest_binop ((c as Const _) $ t $ u) = (c, t, u) + | dest_binop t = raise TERM ("dest_binop", [t]) + + fun prove_antisym_le ctxt ct = + let + val (le, r, s) = dest_binop (Thm.term_of ct) + val less = Const (@{const_name less}, Term.fastype_of le) + val prems = Simplifier.prems_of ctxt + in + (case find_first (eq_prop (le $ s $ r)) prems of + NONE => + find_first (eq_prop (HOLogic.mk_not (less $ r $ s))) prems + |> Option.map (fn thm => thm RS antisym_less1) + | SOME thm => SOME (thm RS antisym_le1)) + end + handle THM _ => NONE + + fun prove_antisym_less ctxt ct = + let + val (less, r, s) = dest_binop (HOLogic.dest_not (Thm.term_of ct)) + val le = Const (@{const_name less_eq}, Term.fastype_of less) + val prems = Simplifier.prems_of ctxt + in + (case find_first (eq_prop (le $ r $ s)) prems of + NONE => + find_first (eq_prop (HOLogic.mk_not (less $ s $ r))) prems + |> Option.map (fn thm => thm RS antisym_less2) + | SOME thm => SOME (thm RS antisym_le2)) + end + handle THM _ => NONE + + val basic_simpset = + simpset_of (put_simpset HOL_ss @{context} + addsimps @{thms field_simps} + addsimps [@{thm times_divide_eq_right}, @{thm times_divide_eq_left}] + addsimps @{thms arith_special} addsimps @{thms arith_simps} + addsimps @{thms rel_simps} + addsimps @{thms array_rules} + addsimps @{thms term_true_def} addsimps @{thms term_false_def} + addsimps @{thms z3div_def} addsimps @{thms z3mod_def} + addsimprocs [@{simproc numeral_divmod}] + addsimprocs [ + Simplifier.make_simproc @{context} "fast_int_arith" + {lhss = [@{term "(m::int) < n"}, @{term "(m::int) \ n"}, @{term "(m::int) = n"}], + proc = K Lin_Arith.simproc}, + Simplifier.make_simproc @{context} "antisym_le" + {lhss = [@{term "(x::'a::order) \ y"}], + proc = K prove_antisym_le}, + Simplifier.make_simproc @{context} "antisym_less" + {lhss = [@{term "\ (x::'a::linorder) < y"}], + proc = K prove_antisym_less}]) + + structure Simpset = Generic_Data + ( + type T = simpset + val empty = basic_simpset + val extend = I + val merge = Simplifier.merge_ss + ) +in + +fun add_simproc simproc context = + Simpset.map (simpset_map (Context.proof_of context) + (fn ctxt => ctxt addsimprocs [simproc])) context + +fun make_simpset ctxt rules = + simpset_of (put_simpset (Simpset.get (Context.Proof ctxt)) ctxt addsimps rules) + +end + +end diff --git a/src/QuickCheckBackend.thy b/src/main/QuickCheckBackend.thy similarity index 99% rename from src/QuickCheckBackend.thy rename to src/main/QuickCheckBackend.thy index c17034b..4fb6091 100644 --- a/src/QuickCheckBackend.thy +++ b/src/main/QuickCheckBackend.thy @@ -45,9 +45,9 @@ chapter {* The QuickCheck backend *} theory QuickCheckBackend imports - HOL - Int - List + HOL.HOL + HOL.Int + HOL.List TestEnv BackendUtils diff --git a/src/RandomBackend.thy b/src/main/RandomBackend.thy similarity index 99% rename from src/RandomBackend.thy rename to src/main/RandomBackend.thy index c1b8a1f..6cb0f4f 100644 --- a/src/RandomBackend.thy +++ b/src/main/RandomBackend.thy @@ -45,9 +45,9 @@ chapter {* The random solver *} theory RandomBackend imports - HOL - Int - List + HOL.HOL + HOL.Int + HOL.List TestEnv BackendUtils diff --git a/src/main/SMT/conj_disj_perm.ML b/src/main/SMT/conj_disj_perm.ML new file mode 100644 index 0000000..30b85d6 --- /dev/null +++ b/src/main/SMT/conj_disj_perm.ML @@ -0,0 +1,127 @@ +(* Title: HOL/Tools/SMT/conj_disj_perm.ML + Author: Sascha Boehme, TU Muenchen + +Tactic to prove equivalence of permutations of conjunctions and disjunctions. +*) + +signature CONJ_DISJ_PERM = +sig + val conj_disj_perm_tac: Proof.context -> int -> tactic +end + +structure Conj_Disj_Perm: CONJ_DISJ_PERM = +struct + +fun with_assumption ct f = + let val ct' = Thm.apply @{cterm HOL.Trueprop} ct + in Thm.implies_intr ct' (f (Thm.assume ct')) end + +fun eq_from_impls thm1 thm2 = thm2 INCR_COMP (thm1 INCR_COMP @{thm iffI}) + +fun add_lit thm = Termtab.update (HOLogic.dest_Trueprop (Thm.prop_of thm), thm) + +val ndisj1_rule = @{lemma "~(P | Q) ==> ~P" by auto} +val ndisj2_rule = @{lemma "~(P | Q) ==> ~Q" by auto} + +fun explode_thm thm = + (case HOLogic.dest_Trueprop (Thm.prop_of thm) of + @{const HOL.conj} $ _ $ _ => explode_conj_thm @{thm conjunct1} @{thm conjunct2} thm + | @{const HOL.Not} $ (@{const HOL.disj} $ _ $ _) => explode_conj_thm ndisj1_rule ndisj2_rule thm + | @{const HOL.Not} $ (@{const HOL.Not} $ _) => explode_thm (thm RS @{thm notnotD}) + | _ => add_lit thm) + +and explode_conj_thm rule1 rule2 thm lits = + explode_thm (thm RS rule1) (explode_thm (thm RS rule2) (add_lit thm lits)) + +val not_false_rule = @{lemma "~False" by auto} +fun explode thm = explode_thm thm (add_lit not_false_rule (add_lit @{thm TrueI} Termtab.empty)) + +fun find_dual_lit lits (@{const HOL.Not} $ t, thm) = Termtab.lookup lits t |> Option.map (pair thm) + | find_dual_lit _ _ = NONE + +fun find_dual_lits lits = Termtab.get_first (find_dual_lit lits) lits + +val not_not_rule = @{lemma "P ==> ~~P" by auto} +val ndisj_rule = @{lemma "~P ==> ~Q ==> ~(P | Q)" by auto} + +fun join lits t = + (case Termtab.lookup lits t of + SOME thm => thm + | NONE => join_term lits t) + +and join_term lits (@{const HOL.conj} $ t $ u) = @{thm conjI} OF (map (join lits) [t, u]) + | join_term lits (@{const HOL.Not} $ (@{const HOL.disj} $ t $ u)) = + ndisj_rule OF (map (join lits o HOLogic.mk_not) [t, u]) + | join_term lits (@{const HOL.Not} $ (@{const HOL.Not} $ t)) = join lits t RS not_not_rule + | join_term _ t = raise TERM ("join_term", [t]) + +fun prove_conj_disj_imp ct cu = with_assumption ct (fn thm => join (explode thm) (Thm.term_of cu)) + +fun prove_conj_disj_eq (clhs, crhs) = + let + val thm1 = prove_conj_disj_imp clhs crhs + val thm2 = prove_conj_disj_imp crhs clhs + in eq_from_impls thm1 thm2 end + +val not_not_false_rule = @{lemma "~~False ==> P" by auto} +val not_true_rule = @{lemma "~True ==> P" by auto} + +fun prove_any_imp ct = + (case Thm.term_of ct of + @{const HOL.False} => @{thm FalseE} + | @{const HOL.Not} $ (@{const HOL.Not} $ @{const HOL.False}) => not_not_false_rule + | @{const HOL.Not} $ @{const HOL.True} => not_true_rule + | _ => raise CTERM ("prove_any_imp", [ct])) + +fun prove_contradiction_imp ct = + with_assumption ct (fn thm => + let val lits = explode thm + in + (case Termtab.lookup lits @{const HOL.False} of + SOME thm' => thm' RS @{thm FalseE} + | NONE => + (case Termtab.lookup lits (@{const HOL.Not} $ @{const HOL.True}) of + SOME thm' => thm' RS not_true_rule + | NONE => + (case find_dual_lits lits of + SOME (not_lit_thm, lit_thm) => @{thm notE} OF [not_lit_thm, lit_thm] + | NONE => raise CTERM ("prove_contradiction", [ct])))) + end) + +fun prove_contradiction_eq to_right (clhs, crhs) = + let + val thm1 = if to_right then prove_contradiction_imp clhs else prove_any_imp clhs + val thm2 = if to_right then prove_any_imp crhs else prove_contradiction_imp crhs + in eq_from_impls thm1 thm2 end + +val contrapos_rule = @{lemma "(~P) = (~Q) ==> P = Q" by auto} +fun contrapos prove cp = contrapos_rule OF [prove (apply2 (Thm.apply @{cterm HOL.Not}) cp)] + +datatype kind = True | False | Conj | Disj | Other + +fun choose t _ _ _ @{const HOL.True} = t + | choose _ f _ _ @{const HOL.False} = f + | choose _ _ c _ (@{const HOL.conj} $ _ $ _) = c + | choose _ _ _ d (@{const HOL.disj} $ _ $ _) = d + | choose _ _ _ _ _ = Other + +fun kind_of (@{const HOL.Not} $ t) = choose False True Disj Conj t + | kind_of t = choose True False Conj Disj t + +fun prove_conj_disj_perm ct cp = + (case apply2 (kind_of o Thm.term_of) cp of + (Conj, Conj) => prove_conj_disj_eq cp + | (Disj, Disj) => contrapos prove_conj_disj_eq cp + | (Conj, False) => prove_contradiction_eq true cp + | (False, Conj) => prove_contradiction_eq false cp + | (Disj, True) => contrapos (prove_contradiction_eq true) cp + | (True, Disj) => contrapos (prove_contradiction_eq false) cp + | _ => raise CTERM ("prove_conj_disj_perm", [ct])) + +fun conj_disj_perm_tac ctxt = CSUBGOAL (fn (ct, i) => + (case Thm.term_of ct of + @{const HOL.Trueprop} $ (@{const HOL.eq(bool)} $ _ $ _) => + resolve_tac ctxt [prove_conj_disj_perm ct (Thm.dest_binop (Thm.dest_arg ct))] i + | _ => no_tac)) + +end diff --git a/src/main/SMT/cvc4_interface.ML b/src/main/SMT/cvc4_interface.ML new file mode 100644 index 0000000..68cad31 --- /dev/null +++ b/src/main/SMT/cvc4_interface.ML @@ -0,0 +1,31 @@ +(* Title: HOL/Tools/SMT/cvc4_interface.ML + Author: Jasmin Blanchette, TU Muenchen + +Interface to CVC4 based on an extended version of SMT-LIB. +*) + +signature CVC4_INTERFACE = +sig + val smtlib_cvc4C: SMT_Util.class +end; + +structure CVC4_Interface: CVC4_INTERFACE = +struct + +val smtlib_cvc4C = SMTLIB_Interface.smtlibC @ ["cvc4"] + + +(* interface *) + +local + fun translate_config ctxt = + {logic = K "(set-logic ALL_SUPPORTED)\n", fp_kinds = [BNF_Util.Least_FP, BNF_Util.Greatest_FP], + serialize = #serialize (SMTLIB_Interface.translate_config ctxt)} +in + +val _ = + Theory.setup (Context.theory_map (SMT_Translate.add_config (smtlib_cvc4C, translate_config))) + +end + +end; diff --git a/src/main/SMT/cvc4_proof_parse.ML b/src/main/SMT/cvc4_proof_parse.ML new file mode 100644 index 0000000..2807164 --- /dev/null +++ b/src/main/SMT/cvc4_proof_parse.ML @@ -0,0 +1,46 @@ +(* Title: HOL/Tools/SMT/cvc4_proof_parse.ML + Author: Jasmin Blanchette, TU Muenchen + +CVC4 proof (actually, unsat core) parsing. +*) + +signature CVC4_PROOF_PARSE = +sig + val parse_proof: SMT_Translate.replay_data -> + ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> + SMT_Solver.parsed_proof +end; + +structure CVC4_Proof_Parse: CVC4_PROOF_PARSE = +struct + +fun parse_proof ({ll_defs, assms, ...} : SMT_Translate.replay_data) xfacts prems _ output = + if exists (String.isPrefix "(error \"This build of CVC4 doesn't have proof support") output then + {outcome = NONE, fact_ids = NONE, atp_proof = K []} + else + let + val num_ll_defs = length ll_defs + + val id_of_index = Integer.add num_ll_defs + val index_of_id = Integer.add (~ num_ll_defs) + + val used_assert_ids = map_filter (try SMTLIB_Interface.assert_index_of_name) output + val used_assm_js = + map_filter (fn id => let val i = index_of_id id in if i >= 0 then SOME i else NONE end) + used_assert_ids + + val conjecture_i = 0 + val prems_i = conjecture_i + 1 + val num_prems = length prems + val facts_i = prems_i + num_prems + + val fact_ids' = + map_filter (fn j => + let val (i, _) = nth assms j in + try (apsnd (nth xfacts)) (id_of_index j, i - facts_i) + end) used_assm_js + in + {outcome = NONE, fact_ids = SOME fact_ids', atp_proof = K []} + end + +end; diff --git a/src/main/SMT/smt_builtin.ML b/src/main/SMT/smt_builtin.ML new file mode 100644 index 0000000..a5955c7 --- /dev/null +++ b/src/main/SMT/smt_builtin.ML @@ -0,0 +1,222 @@ +(* Title: HOL/Tools/SMT/smt_builtin.ML + Author: Sascha Boehme, TU Muenchen + +Tables of types and terms directly supported by SMT solvers. +*) + +signature SMT_BUILTIN = +sig + (*for experiments*) + val filter_builtins: (typ -> bool) -> Proof.context -> Proof.context + + (*built-in types*) + val add_builtin_typ: SMT_Util.class -> + typ * (typ -> string option) * (typ -> int -> string option) -> Context.generic -> + Context.generic + val add_builtin_typ_ext: typ * (typ -> bool) -> Context.generic -> + Context.generic + val dest_builtin_typ: Proof.context -> typ -> string option + val is_builtin_typ_ext: Proof.context -> typ -> bool + + (*built-in numbers*) + val dest_builtin_num: Proof.context -> term -> (string * typ) option + val is_builtin_num: Proof.context -> term -> bool + val is_builtin_num_ext: Proof.context -> term -> bool + + (*built-in functions*) + type 'a bfun = Proof.context -> typ -> term list -> 'a + type bfunr = string * int * term list * (term list -> term) + val add_builtin_fun: SMT_Util.class -> (string * typ) * bfunr option bfun -> Context.generic -> + Context.generic + val add_builtin_fun': SMT_Util.class -> term * string -> Context.generic -> Context.generic + val add_builtin_fun_ext: (string * typ) * term list bfun -> Context.generic -> Context.generic + val add_builtin_fun_ext': string * typ -> Context.generic -> Context.generic + val add_builtin_fun_ext'': string -> Context.generic -> Context.generic + val dest_builtin_fun: Proof.context -> string * typ -> term list -> bfunr option + val dest_builtin_eq: Proof.context -> term -> term -> bfunr option + val dest_builtin_pred: Proof.context -> string * typ -> term list -> bfunr option + val dest_builtin_conn: Proof.context -> string * typ -> term list -> bfunr option + val dest_builtin: Proof.context -> string * typ -> term list -> bfunr option + val dest_builtin_ext: Proof.context -> string * typ -> term list -> term list option + val is_builtin_fun: Proof.context -> string * typ -> term list -> bool + val is_builtin_fun_ext: Proof.context -> string * typ -> term list -> bool +end; + +structure SMT_Builtin: SMT_BUILTIN = +struct + + +(* built-in tables *) + +datatype ('a, 'b) kind = Ext of 'a | Int of 'b + +type ('a, 'b) ttab = ((typ * ('a, 'b) kind) Ord_List.T) SMT_Util.dict + +fun typ_ord ((T, _), (U, _)) = + let + fun tord (TVar _, Type _) = GREATER + | tord (Type _, TVar _) = LESS + | tord (Type (n, Ts), Type (m, Us)) = + if n = m then list_ord tord (Ts, Us) + else Term_Ord.typ_ord (T, U) + | tord TU = Term_Ord.typ_ord TU + in tord (T, U) end + +fun insert_ttab cs T f = + SMT_Util.dict_map_default (cs, []) + (Ord_List.insert typ_ord (perhaps (try Logic.varifyT_global) T, f)) + +fun merge_ttab ttabp = SMT_Util.dict_merge (Ord_List.merge typ_ord) ttabp + +fun lookup_ttab ctxt ttab T = + let fun match (U, _) = Sign.typ_instance (Proof_Context.theory_of ctxt) (T, U) + in + get_first (find_first match) (SMT_Util.dict_lookup ttab (SMT_Config.solver_class_of ctxt)) + end + +type ('a, 'b) btab = ('a, 'b) ttab Symtab.table + +fun insert_btab cs n T f = + Symtab.map_default (n, []) (insert_ttab cs T f) + +fun merge_btab btabp = Symtab.join (K merge_ttab) btabp + +fun lookup_btab ctxt btab (n, T) = + (case Symtab.lookup btab n of + NONE => NONE + | SOME ttab => lookup_ttab ctxt ttab T) + +type 'a bfun = Proof.context -> typ -> term list -> 'a + +type bfunr = string * int * term list * (term list -> term) + +structure Builtins = Generic_Data +( + type T = + (typ -> bool, (typ -> string option) * (typ -> int -> string option)) ttab * + (term list bfun, bfunr option bfun) btab + val empty = ([], Symtab.empty) + val extend = I + fun merge ((t1, b1), (t2, b2)) = (merge_ttab (t1, t2), merge_btab (b1, b2)) +) + +fun filter_ttab keep_T = map (apsnd (filter (keep_T o fst))) + +fun filter_builtins keep_T = + Context.proof_map (Builtins.map (fn (ttab, btab) => + (filter_ttab keep_T ttab, Symtab.map (K (filter_ttab keep_T)) btab))) + + +(* built-in types *) + +fun add_builtin_typ cs (T, f, g) = + Builtins.map (apfst (insert_ttab cs T (Int (f, g)))) + +fun add_builtin_typ_ext (T, f) = Builtins.map (apfst (insert_ttab SMT_Util.basicC T (Ext f))) + +fun lookup_builtin_typ ctxt = + lookup_ttab ctxt (fst (Builtins.get (Context.Proof ctxt))) + +fun dest_builtin_typ ctxt T = + (case lookup_builtin_typ ctxt T of + SOME (_, Int (f, _)) => f T + | _ => NONE) + +fun is_builtin_typ_ext ctxt T = + (case lookup_builtin_typ ctxt T of + SOME (_, Int (f, _)) => is_some (f T) + | SOME (_, Ext f) => f T + | NONE => false) + + +(* built-in numbers *) + +fun dest_builtin_num ctxt t = + (case try HOLogic.dest_number t of + NONE => NONE + | SOME (T, i) => + if i < 0 then NONE else + (case lookup_builtin_typ ctxt T of + SOME (_, Int (_, g)) => g T i |> Option.map (rpair T) + | _ => NONE)) + +val is_builtin_num = is_some oo dest_builtin_num + +fun is_builtin_num_ext ctxt t = + (case try HOLogic.dest_number t of + NONE => false + | SOME (T, _) => is_builtin_typ_ext ctxt T) + + +(* built-in functions *) + +fun add_builtin_fun cs ((n, T), f) = + Builtins.map (apsnd (insert_btab cs n T (Int f))) + +fun add_builtin_fun' cs (t, n) = + let + val c as (m, T) = Term.dest_Const t + fun app U ts = Term.list_comb (Const (m, U), ts) + fun bfun _ U ts = SOME (n, length (Term.binder_types T), ts, app U) + in add_builtin_fun cs (c, bfun) end + +fun add_builtin_fun_ext ((n, T), f) = + Builtins.map (apsnd (insert_btab SMT_Util.basicC n T (Ext f))) + +fun add_builtin_fun_ext' c = add_builtin_fun_ext (c, fn _ => fn _ => I) + +fun add_builtin_fun_ext'' n context = + let val thy = Context.theory_of context + in add_builtin_fun_ext' (n, Sign.the_const_type thy n) context end + +fun lookup_builtin_fun ctxt = + lookup_btab ctxt (snd (Builtins.get (Context.Proof ctxt))) + +fun dest_builtin_fun ctxt (c as (_, T)) ts = + (case lookup_builtin_fun ctxt c of + SOME (_, Int f) => f ctxt T ts + | _ => NONE) + +fun dest_builtin_eq ctxt t u = + let + val aT = TFree (Name.aT, @{sort type}) + val c = (@{const_name HOL.eq}, aT --> aT --> @{typ bool}) + fun mk ts = Term.list_comb (HOLogic.eq_const (Term.fastype_of (hd ts)), ts) + in + dest_builtin_fun ctxt c [] + |> Option.map (fn (n, i, _, _) => (n, i, [t, u], mk)) + end + +fun special_builtin_fun pred ctxt (c as (_, T)) ts = + if pred (Term.body_type T, Term.binder_types T) then + dest_builtin_fun ctxt c ts + else NONE + +fun dest_builtin_pred ctxt = special_builtin_fun (equal @{typ bool} o fst) ctxt + +fun dest_builtin_conn ctxt = + special_builtin_fun (forall (equal @{typ bool}) o (op ::)) ctxt + +fun dest_builtin ctxt c ts = + let val t = Term.list_comb (Const c, ts) + in + (case dest_builtin_num ctxt t of + SOME (n, _) => SOME (n, 0, [], K t) + | NONE => dest_builtin_fun ctxt c ts) + end + +fun dest_builtin_fun_ext ctxt (c as (_, T)) ts = + (case lookup_builtin_fun ctxt c of + SOME (_, Int f) => f ctxt T ts |> Option.map (fn (_, _, us, _) => us) + | SOME (_, Ext f) => SOME (f ctxt T ts) + | NONE => NONE) + +fun dest_builtin_ext ctxt c ts = + if is_builtin_num_ext ctxt (Term.list_comb (Const c, ts)) then SOME [] + else dest_builtin_fun_ext ctxt c ts + +fun is_builtin_fun ctxt c ts = is_some (dest_builtin_fun ctxt c ts) + +fun is_builtin_fun_ext ctxt c ts = is_some (dest_builtin_fun_ext ctxt c ts) + +end; diff --git a/src/main/SMT/smt_config.ML b/src/main/SMT/smt_config.ML new file mode 100644 index 0000000..8b8d029 --- /dev/null +++ b/src/main/SMT/smt_config.ML @@ -0,0 +1,265 @@ +(* Title: HOL/Tools/SMT/smt_config.ML + Author: Sascha Boehme, TU Muenchen + +Configuration options and diagnostic tools for SMT. +*) + +signature SMT_CONFIG = +sig + (*solver*) + type solver_info = { + name: string, + class: Proof.context -> SMT_Util.class, + avail: unit -> bool, + options: Proof.context -> string list } + val add_solver: solver_info -> Context.generic -> Context.generic + val set_solver_options: string * string -> Context.generic -> Context.generic + val is_available: Proof.context -> string -> bool + val available_solvers_of: Proof.context -> string list + val select_solver: string -> Context.generic -> Context.generic + val solver_of: Proof.context -> string + val solver_class_of: Proof.context -> SMT_Util.class + val solver_options_of: Proof.context -> string list + + (*options*) + val oracle: bool Config.T + val timeout: real Config.T + val reconstruction_step_timeout: real Config.T + val random_seed: int Config.T + val read_only_certificates: bool Config.T + val verbose: bool Config.T + val trace: bool Config.T + val statistics: bool Config.T + val monomorph_limit: int Config.T + val monomorph_instances: int Config.T + val infer_triggers: bool Config.T + val debug_files: string Config.T + val sat_solver: string Config.T + + (*tools*) + val with_time_limit: Proof.context -> real Config.T -> ('a -> 'b) -> 'a -> 'b + val with_timeout: Proof.context -> ('a -> 'b) -> 'a -> 'b + + (*diagnostics*) + val trace_msg: Proof.context -> ('a -> string) -> 'a -> unit + val verbose_msg: Proof.context -> ('a -> string) -> 'a -> unit + val statistics_msg: Proof.context -> ('a -> string) -> 'a -> unit + + (*certificates*) + val select_certificates: string -> Context.generic -> Context.generic + val certificates_of: Proof.context -> Cache_IO.cache option + + (*setup*) + val print_setup: Proof.context -> unit +end; + +structure SMT_Config: SMT_CONFIG = +struct + +(* solver *) + +type solver_info = { + name: string, + class: Proof.context -> SMT_Util.class, + avail: unit -> bool, + options: Proof.context -> string list} + +type data = { + solvers: (solver_info * string list) Symtab.table, + solver: string option, + certs: Cache_IO.cache option} + +fun mk_data solvers solver certs: data = {solvers=solvers, solver=solver, certs=certs} + +val empty_data = mk_data Symtab.empty NONE NONE + +fun solvers_of ({solvers, ...}: data) = solvers +fun solver_of ({solver, ...}: data) = solver +fun certs_of ({certs, ...}: data) = certs + +fun map_solvers f ({solvers, solver, certs}: data) = mk_data (f solvers) solver certs +fun map_solver f ({solvers, solver, certs}: data) = mk_data solvers (f solver) certs +fun put_certs c ({solvers, solver, ...}: data) = mk_data solvers solver c + +fun merge_data ({solvers=ss1,solver=s1,certs=c1}: data, {solvers=ss2,solver=s2,certs=c2}: data) = + mk_data (Symtab.merge (K true) (ss1, ss2)) (merge_options (s1, s2)) (merge_options (c1, c2)) + +structure Data = Generic_Data +( + type T = data + val empty = empty_data + val extend = I + val merge = merge_data +) + +fun set_solver_options (name, options) = + let val opts = String.tokens (Symbol.is_ascii_blank o str) options + in Data.map (map_solvers (Symtab.map_entry name (apsnd (K opts)))) end + +fun add_solver (info as {name, ...} : solver_info) context = + if Symtab.defined (solvers_of (Data.get context)) name then + error ("Solver already registered: " ^ quote name) + else + context + |> Data.map (map_solvers (Symtab.update (name, (info, [])))) + |> Context.map_theory (Attrib.setup (Binding.name (name ^ "_options")) + (Scan.lift (@{keyword "="} |-- Args.name) >> + (Thm.declaration_attribute o K o set_solver_options o pair name)) + ("additional command line options for SMT solver " ^ quote name)) + +fun all_solvers_of ctxt = Symtab.keys (solvers_of (Data.get (Context.Proof ctxt))) + +fun solver_name_of ctxt = solver_of (Data.get (Context.Proof ctxt)) + +fun is_available ctxt name = + (case Symtab.lookup (solvers_of (Data.get (Context.Proof ctxt))) name of + SOME ({avail, ...}, _) => avail () + | NONE => false) + +fun available_solvers_of ctxt = + filter (is_available ctxt) (all_solvers_of ctxt) + +fun warn_solver (Context.Proof ctxt) name = + if Context_Position.is_visible ctxt then + warning ("The SMT solver " ^ quote name ^ " is not installed") + else () + | warn_solver _ _ = () + +fun select_solver name context = + let + val ctxt = Context.proof_of context + val upd = Data.map (map_solver (K (SOME name))) + in + if not (member (op =) (all_solvers_of ctxt) name) then + error ("Trying to select unknown solver: " ^ quote name) + else if not (is_available ctxt name) then + (warn_solver context name; upd context) + else upd context + end + +fun no_solver_err () = error "No SMT solver selected" + +fun solver_of ctxt = + (case solver_name_of ctxt of + SOME name => name + | NONE => no_solver_err ()) + +fun solver_info_of default select ctxt = + (case solver_name_of ctxt of + NONE => default () + | SOME name => select (Symtab.lookup (solvers_of (Data.get (Context.Proof ctxt))) name)) + +fun solver_class_of ctxt = + let fun class_of ({class, ...}: solver_info, _) = class ctxt + in solver_info_of no_solver_err (class_of o the) ctxt end + +fun solver_options_of ctxt = + let + fun all_options NONE = [] + | all_options (SOME ({options, ...} : solver_info, opts)) = + opts @ options ctxt + in solver_info_of (K []) all_options ctxt end + +val setup_solver = + Attrib.setup @{binding smt_solver} + (Scan.lift (@{keyword "="} |-- Args.name) >> + (Thm.declaration_attribute o K o select_solver)) + "SMT solver configuration" + + +(* options *) + +val oracle = Attrib.setup_config_bool @{binding smt_oracle} (K true) +val timeout = Attrib.setup_config_real @{binding smt_timeout} (K 30.0) +val reconstruction_step_timeout = Attrib.setup_config_real @{binding smt_reconstruction_step_timeout} (K 10.0) +val random_seed = Attrib.setup_config_int @{binding smt_random_seed} (K 1) +val read_only_certificates = Attrib.setup_config_bool @{binding smt_read_only_certificates} (K false) +val verbose = Attrib.setup_config_bool @{binding smt_verbose} (K true) +val trace = Attrib.setup_config_bool @{binding smt_trace} (K false) +val statistics = Attrib.setup_config_bool @{binding smt_statistics} (K false) +val monomorph_limit = Attrib.setup_config_int @{binding smt_monomorph_limit} (K 10) +val monomorph_instances = Attrib.setup_config_int @{binding smt_monomorph_instances} (K 500) +val infer_triggers = Attrib.setup_config_bool @{binding smt_infer_triggers} (K false) +val debug_files = Attrib.setup_config_string @{binding smt_debug_files} (K "") +val sat_solver = Attrib.setup_config_string @{binding smt_sat_solver} (K "cdclite") + + +(* diagnostics *) + +fun cond_trace flag f x = if flag then tracing ("SMT: " ^ f x) else () + +fun verbose_msg ctxt = cond_trace (Config.get ctxt verbose) +fun trace_msg ctxt = cond_trace (Config.get ctxt trace) +fun statistics_msg ctxt = cond_trace (Config.get ctxt statistics) + + +(* tools *) + +fun with_time_limit ctxt timeout_config f x = + Timeout.apply (seconds (Config.get ctxt timeout_config)) f x + handle Timeout.TIMEOUT _ => raise SMT_Failure.SMT SMT_Failure.Time_Out + +fun with_timeout ctxt = with_time_limit ctxt timeout + + +(* certificates *) + +val certificates_of = certs_of o Data.get o Context.Proof + +val get_certificates_path = Option.map (Cache_IO.cache_path_of) o certificates_of + +fun select_certificates name context = context |> Data.map (put_certs ( + if name = "" then NONE + else + Path.explode name + |> Path.append (Resources.master_directory (Context.theory_of context)) + |> SOME o Cache_IO.unsynchronized_init)) + +val setup_certificates = + Attrib.setup @{binding smt_certificates} + (Scan.lift (@{keyword "="} |-- Args.name) >> + (Thm.declaration_attribute o K o select_certificates)) + "SMT certificates configuration" + + +(* setup *) + +val _ = Theory.setup ( + setup_solver #> + setup_certificates) + +fun print_setup ctxt = + let + fun string_of_bool b = if b then "true" else "false" + + val names = available_solvers_of ctxt + val ns = if null names then ["(none)"] else sort_strings names + val n = the_default "(none)" (solver_name_of ctxt) + val opts = solver_options_of ctxt + + val t = string_of_real (Config.get ctxt timeout) + + val certs_filename = + (case get_certificates_path ctxt of + SOME path => Path.print path + | NONE => "(disabled)") + in + Pretty.writeln (Pretty.big_list "SMT setup:" [ + Pretty.str ("Current SMT solver: " ^ n), + Pretty.str ("Current SMT solver options: " ^ space_implode " " opts), + Pretty.str_list "Available SMT solvers: " "" ns, + Pretty.str ("Current timeout: " ^ t ^ " seconds"), + Pretty.str ("With proofs: " ^ + string_of_bool (not (Config.get ctxt oracle))), + Pretty.str ("Certificates cache: " ^ certs_filename), + Pretty.str ("Fixed certificates: " ^ + string_of_bool (Config.get ctxt read_only_certificates))]) + end + +val _ = + Outer_Syntax.command @{command_keyword smt_status} + "show the available SMT solvers, the currently selected SMT solver, \ + \and the values of SMT configuration options" + (Scan.succeed (Toplevel.keep (print_setup o Toplevel.context_of))) + +end; diff --git a/src/main/SMT/smt_datatypes.ML b/src/main/SMT/smt_datatypes.ML new file mode 100644 index 0000000..2467cab --- /dev/null +++ b/src/main/SMT/smt_datatypes.ML @@ -0,0 +1,152 @@ +(* Title: HOL/Tools/SMT/smt_datatypes.ML + Author: Sascha Boehme, TU Muenchen + +Collector functions for common type declarations and their representation +as (co)algebraic datatypes. +*) + +signature SMT_DATATYPES = +sig + val add_decls: BNF_Util.fp_kind list -> typ -> + (BNF_Util.fp_kind * (typ * (term * term list) list)) list list * Proof.context -> + (BNF_Util.fp_kind * (typ * (term * term list) list)) list list * Proof.context +end; + +structure SMT_Datatypes: SMT_DATATYPES = +struct + +fun mk_selectors T Ts sels = + if null sels then + Variable.variant_fixes (replicate (length Ts) "select") + #>> map2 (fn U => fn n => Free (n, T --> U)) Ts + else + pair sels + + +(* free constructor type declarations *) + +fun get_ctr_sugar_decl ({ctrs = ctrs0, selss = selss0, ...} : Ctr_Sugar.ctr_sugar) T Ts ctxt = + let + val selss = map (map (Ctr_Sugar.mk_disc_or_sel Ts)) selss0 + val ctrs = map (Ctr_Sugar.mk_ctr Ts) ctrs0 + + fun mk_constr ctr sels = + mk_selectors T (binder_types (fastype_of ctr)) sels #>> pair ctr + + val selss' = + (if has_duplicates (op aconv) (flat selss) orelse + exists (exists (can (dest_funT o range_type o fastype_of))) selss then + [] + else + selss) + |> Ctr_Sugar_Util.pad_list [] (length ctrs) + in + @{fold_map 2} mk_constr ctrs selss' ctxt + |>> (pair T #> single) + end + + +(* typedef declarations *) + +fun get_typedef_decl (({Abs_name, Rep_name, abs_type, rep_type, ...}, {Abs_inverse, ...}) + : Typedef.info) T Ts = + if can (curry (op RS) @{thm UNIV_I}) Abs_inverse then + let + val env = snd (Term.dest_Type abs_type) ~~ Ts + val instT = Term.map_atyps (perhaps (AList.lookup (op =) env)) + + val constr = Const (Abs_name, instT (rep_type --> abs_type)) + val select = Const (Rep_name, instT (abs_type --> rep_type)) + in [(T, [(constr, [select])])] end + else + [] + + +(* collection of declarations *) + +val extN = "_ext" (* cf. "HOL/Tools/typedef.ML" *) + +fun get_decls fps T n Ts ctxt = + let + fun maybe_typedef () = + (case Typedef.get_info ctxt n of + [] => ([], ctxt) + | info :: _ => (map (pair (hd fps)) (get_typedef_decl info T Ts), ctxt)) + in + (case BNF_FP_Def_Sugar.fp_sugar_of ctxt n of + SOME {fp, fp_res = {Ts = fp_Ts, ...}, fp_ctr_sugar = {ctr_sugar, ...}, ...} => + if member (op =) fps fp then + let + val ns = map (fst o dest_Type) fp_Ts + val mutual_fp_sugars = map_filter (BNF_FP_Def_Sugar.fp_sugar_of ctxt) ns + val Xs = map #X mutual_fp_sugars + val ctrXs_Tsss = map (#ctrXs_Tss o #fp_ctr_sugar) mutual_fp_sugars + + (* Datatypes nested through datatypes and codatatypes nested through codatatypes are + allowed. So are mutually (co)recursive (co)datatypes. *) + fun is_same_fp s = + (case BNF_FP_Def_Sugar.fp_sugar_of ctxt s of + SOME {fp = fp', ...} => fp' = fp + | NONE => false) + fun is_homogenously_nested_co_recursive (Type (s, Ts)) = + forall (if is_same_fp s then is_homogenously_nested_co_recursive + else not o BNF_FP_Rec_Sugar_Util.exists_subtype_in Xs) Ts + | is_homogenously_nested_co_recursive _ = true + + val Type (_, As) :: _ = fp_Ts + val substAs = Term.typ_subst_atomic (As ~~ Ts); + in + (* TODO/FIXME: The "bool" check is there to work around a CVC4 bug + (http://church.cims.nyu.edu/bugzilla3/show_bug.cgi?id=597). It should be removed once + the bug is fixed. *) + if forall (forall (forall (is_homogenously_nested_co_recursive))) ctrXs_Tsss andalso + forall (forall (forall (curry (op <>) @{typ bool}))) + (map (map (map substAs)) ctrXs_Tsss) then + get_ctr_sugar_decl ctr_sugar T Ts ctxt |>> map (pair fp) + else + maybe_typedef () + end + else + ([], ctxt) + | NONE => + if String.isSuffix extN n then + (* for records (FIXME: hack) *) + (case Ctr_Sugar.ctr_sugar_of ctxt n of + SOME ctr_sugar => + get_ctr_sugar_decl ctr_sugar T Ts ctxt |>> map (pair (hd fps)) + | NONE => maybe_typedef ()) + else + maybe_typedef ()) + end + +fun add_decls fps T (declss, ctxt) = + let + fun declared T = exists (exists (equal T o fst o snd)) + fun declared' T = exists (exists (equal T o fst o snd) o snd) + fun depends ds = exists (member (op =) (map (fst o snd) ds)) + + fun add (TFree _) = I + | add (TVar _) = I + | add (T as Type (@{type_name fun}, _)) = + fold add (Term.body_type T :: Term.binder_types T) + | add @{typ bool} = I + | add (T as Type (n, Ts)) = (fn (dss, ctxt1) => + if declared T declss orelse declared' T dss then + (dss, ctxt1) + else if SMT_Builtin.is_builtin_typ_ext ctxt1 T then + (dss, ctxt1) + else + (case get_decls fps T n Ts ctxt1 of + ([], _) => (dss, ctxt1) + | (ds, ctxt2) => + let + val constrTs = maps (map (snd o Term.dest_Const o fst) o snd o snd) ds + val Us = fold (union (op =) o Term.binder_types) constrTs [] + + fun ins [] = [(Us, ds)] + | ins ((Uds as (Us', _)) :: Udss) = + if depends ds Us' then (Us, ds) :: Uds :: Udss else Uds :: ins Udss + in fold add Us (ins dss, ctxt2) end)) + in add T ([], ctxt) |>> append declss o map snd end + +end; diff --git a/src/main/SMT/smt_failure.ML b/src/main/SMT/smt_failure.ML new file mode 100644 index 0000000..ba892ae --- /dev/null +++ b/src/main/SMT/smt_failure.ML @@ -0,0 +1,40 @@ +(* Title: HOL/Tools/SMT/smt_failure.ML + Author: Sascha Boehme, TU Muenchen + +Failures and exception of SMT. +*) + +signature SMT_FAILURE = +sig + datatype failure = + Counterexample of bool | + Time_Out | + Out_Of_Memory | + Abnormal_Termination of int | + Other_Failure of string + val string_of_failure: failure -> string + exception SMT of failure +end; + +structure SMT_Failure: SMT_FAILURE = +struct + +datatype failure = + Counterexample of bool | + Time_Out | + Out_Of_Memory | + Abnormal_Termination of int | + Other_Failure of string + +fun string_of_failure (Counterexample genuine) = + if genuine then "Counterexample found (possibly spurious)" + else "Potential counterexample found" + | string_of_failure Time_Out = "Timed out" + | string_of_failure Out_Of_Memory = "Ran out of memory" + | string_of_failure (Abnormal_Termination err) = + "Solver terminated abnormally with error code " ^ string_of_int err + | string_of_failure (Other_Failure msg) = msg + +exception SMT of failure + +end; diff --git a/src/main/SMT/smt_normalize.ML b/src/main/SMT/smt_normalize.ML new file mode 100644 index 0000000..98e820b --- /dev/null +++ b/src/main/SMT/smt_normalize.ML @@ -0,0 +1,444 @@ +(* Title: HOL/Tools/SMT/smt_normalize.ML + Author: Sascha Boehme, TU Muenchen + +Normalization steps on theorems required by SMT solvers. +*) + +signature SMT_NORMALIZE = +sig + val drop_fact_warning: Proof.context -> thm -> unit + val atomize_conv: Proof.context -> conv + + val special_quant_table: (string * thm) list + val case_bool_entry: string * thm + val abs_min_max_table: (string * thm) list + + type extra_norm = Proof.context -> thm list * thm list -> thm list * thm list + val add_extra_norm: SMT_Util.class * extra_norm -> Context.generic -> Context.generic + val normalize: Proof.context -> thm list -> (int * thm) list +end; + +structure SMT_Normalize: SMT_NORMALIZE = +struct + +fun drop_fact_warning ctxt = + SMT_Config.verbose_msg ctxt (prefix "Warning: dropping assumption: " o + Thm.string_of_thm ctxt) + + +(* general theorem normalizations *) + +(** instantiate elimination rules **) + +local + val (cpfalse, cfalse) = `SMT_Util.mk_cprop (Thm.cterm_of @{context} @{const False}) + + fun inst f ct thm = + let val cv = f (Drule.strip_imp_concl (Thm.cprop_of thm)) + in Thm.instantiate ([], [(dest_Var (Thm.term_of cv), ct)]) thm end +in + +fun instantiate_elim thm = + (case Thm.concl_of thm of + @{const Trueprop} $ Var (_, @{typ bool}) => inst Thm.dest_arg cfalse thm + | Var _ => inst I cpfalse thm + | _ => thm) + +end + + +(** normalize definitions **) + +fun norm_def thm = + (case Thm.prop_of thm of + @{const Trueprop} $ (Const (@{const_name HOL.eq}, _) $ _ $ Abs _) => + norm_def (thm RS @{thm fun_cong}) + | Const (@{const_name Pure.eq}, _) $ _ $ Abs _ => norm_def (thm RS @{thm meta_eq_to_obj_eq}) + | _ => thm) + + +(** atomization **) + +fun atomize_conv ctxt ct = + (case Thm.term_of ct of + @{const Pure.imp} $ _ $ _ => + Conv.binop_conv (atomize_conv ctxt) then_conv Conv.rewr_conv @{thm atomize_imp} + | Const (@{const_name Pure.eq}, _) $ _ $ _ => + Conv.binop_conv (atomize_conv ctxt) then_conv Conv.rewr_conv @{thm atomize_eq} + | Const (@{const_name Pure.all}, _) $ Abs _ => + Conv.binder_conv (atomize_conv o snd) ctxt then_conv Conv.rewr_conv @{thm atomize_all} + | _ => Conv.all_conv) ct + handle CTERM _ => Conv.all_conv ct + +val setup_atomize = + fold SMT_Builtin.add_builtin_fun_ext'' [@{const_name Pure.imp}, @{const_name Pure.eq}, + @{const_name Pure.all}, @{const_name Trueprop}] + + +(** unfold special quantifiers **) + +val special_quant_table = [ + (@{const_name Ex1}, @{thm Ex1_def_raw}), + (@{const_name Ball}, @{thm Ball_def_raw}), + (@{const_name Bex}, @{thm Bex_def_raw})] + +local + fun special_quant (Const (n, _)) = AList.lookup (op =) special_quant_table n + | special_quant _ = NONE + + fun special_quant_conv _ ct = + (case special_quant (Thm.term_of ct) of + SOME thm => Conv.rewr_conv thm + | NONE => Conv.all_conv) ct +in + +fun unfold_special_quants_conv ctxt = + SMT_Util.if_exists_conv (is_some o special_quant) (Conv.top_conv special_quant_conv ctxt) + +val setup_unfolded_quants = fold (SMT_Builtin.add_builtin_fun_ext'' o fst) special_quant_table + +end + + +(** trigger inference **) + +local + (*** check trigger syntax ***) + + fun dest_trigger (Const (@{const_name pat}, _) $ _) = SOME true + | dest_trigger (Const (@{const_name nopat}, _) $ _) = SOME false + | dest_trigger _ = NONE + + fun eq_list [] = false + | eq_list (b :: bs) = forall (equal b) bs + + fun proper_trigger t = + t + |> these o try SMT_Util.dest_symb_list + |> map (map_filter dest_trigger o these o try SMT_Util.dest_symb_list) + |> (fn [] => false | bss => forall eq_list bss) + + fun proper_quant inside f t = + (case t of + Const (@{const_name All}, _) $ Abs (_, _, u) => proper_quant true f u + | Const (@{const_name Ex}, _) $ Abs (_, _, u) => proper_quant true f u + | @{const trigger} $ p $ u => + (if inside then f p else false) andalso proper_quant false f u + | Abs (_, _, u) => proper_quant false f u + | u1 $ u2 => proper_quant false f u1 andalso proper_quant false f u2 + | _ => true) + + fun check_trigger_error ctxt t = + error ("SMT triggers must only occur under quantifier and multipatterns " ^ + "must have the same kind: " ^ Syntax.string_of_term ctxt t) + + fun check_trigger_conv ctxt ct = + if proper_quant false proper_trigger (SMT_Util.term_of ct) then Conv.all_conv ct + else check_trigger_error ctxt (Thm.term_of ct) + + + (*** infer simple triggers ***) + + fun dest_cond_eq ct = + (case Thm.term_of ct of + Const (@{const_name HOL.eq}, _) $ _ $ _ => Thm.dest_binop ct + | @{const HOL.implies} $ _ $ _ => dest_cond_eq (Thm.dest_arg ct) + | _ => raise CTERM ("no equation", [ct])) + + fun get_constrs thy (Type (n, _)) = these (BNF_LFP_Compat.get_constrs thy n) + | get_constrs _ _ = [] + + fun is_constr thy (n, T) = + let fun match (m, U) = m = n andalso Sign.typ_instance thy (T, U) + in can (the o find_first match o get_constrs thy o Term.body_type) T end + + fun is_constr_pat thy t = + (case Term.strip_comb t of + (Free _, []) => true + | (Const c, ts) => is_constr thy c andalso forall (is_constr_pat thy) ts + | _ => false) + + fun is_simp_lhs ctxt t = + (case Term.strip_comb t of + (Const c, ts as _ :: _) => + not (SMT_Builtin.is_builtin_fun_ext ctxt c ts) andalso + forall (is_constr_pat (Proof_Context.theory_of ctxt)) ts + | _ => false) + + fun has_all_vars vs t = + subset (op aconv) (vs, map Free (Term.add_frees t [])) + + fun minimal_pats vs ct = + if has_all_vars vs (Thm.term_of ct) then + (case Thm.term_of ct of + _ $ _ => + (case apply2 (minimal_pats vs) (Thm.dest_comb ct) of + ([], []) => [[ct]] + | (ctss, ctss') => union (eq_set (op aconvc)) ctss ctss') + | _ => []) + else [] + + fun proper_mpat _ _ _ [] = false + | proper_mpat thy gen u cts = + let + val tps = (op ~~) (`gen (map Thm.term_of cts)) + fun some_match u = tps |> exists (fn (t', t) => + Pattern.matches thy (t', u) andalso not (t aconv u)) + in not (Term.exists_subterm some_match u) end + + val pat = SMT_Util.mk_const_pat @{theory} @{const_name pat} SMT_Util.destT1 + fun mk_pat ct = Thm.apply (SMT_Util.instT' ct pat) ct + + fun mk_clist T = + apply2 (Thm.cterm_of @{context}) (SMT_Util.symb_cons_const T, SMT_Util.symb_nil_const T) + fun mk_list (ccons, cnil) f cts = fold_rev (Thm.mk_binop ccons o f) cts cnil + val mk_pat_list = mk_list (mk_clist @{typ pattern}) + val mk_mpat_list = mk_list (mk_clist @{typ "pattern symb_list"}) + fun mk_trigger ctss = mk_mpat_list (mk_pat_list mk_pat) ctss + + val trigger_eq = mk_meta_eq @{lemma "p = trigger t p" by (simp add: trigger_def)} + + fun insert_trigger_conv [] ct = Conv.all_conv ct + | insert_trigger_conv ctss ct = + let + val (ctr, cp) = Thm.dest_binop (Thm.rhs_of trigger_eq) ||> rpair ct + val inst = map (apfst (dest_Var o Thm.term_of)) [cp, (ctr, mk_trigger ctss)] + in Thm.instantiate ([], inst) trigger_eq end + + fun infer_trigger_eq_conv outer_ctxt (ctxt, cvs) ct = + let + val (lhs, rhs) = dest_cond_eq ct + + val vs = map Thm.term_of cvs + val thy = Proof_Context.theory_of ctxt + + fun get_mpats ct = + if is_simp_lhs ctxt (Thm.term_of ct) then minimal_pats vs ct + else [] + val gen = Variable.export_terms ctxt outer_ctxt + val filter_mpats = filter (proper_mpat thy gen (Thm.term_of rhs)) + + in insert_trigger_conv (filter_mpats (get_mpats lhs)) ct end + + fun has_trigger (@{const trigger} $ _ $ _) = true + | has_trigger _ = false + + fun try_trigger_conv cv ct = + if SMT_Util.under_quant has_trigger (SMT_Util.term_of ct) then Conv.all_conv ct + else Conv.try_conv cv ct + + fun infer_trigger_conv ctxt = + if Config.get ctxt SMT_Config.infer_triggers then + try_trigger_conv (SMT_Util.under_quant_conv (infer_trigger_eq_conv ctxt) ctxt) + else Conv.all_conv +in + +fun trigger_conv ctxt = + SMT_Util.prop_conv (check_trigger_conv ctxt then_conv infer_trigger_conv ctxt) + +val setup_trigger = + fold SMT_Builtin.add_builtin_fun_ext'' + [@{const_name pat}, @{const_name nopat}, @{const_name trigger}] + +end + + +(** combined general normalizations **) + +fun gen_normalize1_conv ctxt = + atomize_conv ctxt then_conv + unfold_special_quants_conv ctxt then_conv + Thm.beta_conversion true then_conv + trigger_conv ctxt + +fun gen_normalize1 ctxt = + instantiate_elim #> + norm_def #> + Conv.fconv_rule (Thm.beta_conversion true then_conv Thm.eta_conversion) #> + Drule.forall_intr_vars #> + Conv.fconv_rule (gen_normalize1_conv ctxt) #> + (* Z3 4.3.1 silently normalizes "P --> Q --> R" to "P & Q --> R" *) + Raw_Simplifier.rewrite_rule ctxt @{thms HOL.imp_conjL[symmetric, THEN eq_reflection]} + +fun gen_norm1_safe ctxt (i, thm) = + (case try (gen_normalize1 ctxt) thm of + SOME thm' => SOME (i, thm') + | NONE => (drop_fact_warning ctxt thm; NONE)) + +fun gen_normalize ctxt iwthms = map_filter (gen_norm1_safe ctxt) iwthms + + +(* unfolding of definitions and theory-specific rewritings *) + +fun expand_head_conv cv ct = + (case Thm.term_of ct of + _ $ _ => + Conv.fun_conv (expand_head_conv cv) then_conv + Conv.try_conv (Thm.beta_conversion false) + | _ => cv) ct + + +(** rewrite bool case expressions as if expressions **) + +val case_bool_entry = (@{const_name "bool.case_bool"}, @{thm case_bool_if}) + +local + fun is_case_bool (Const (@{const_name "bool.case_bool"}, _)) = true + | is_case_bool _ = false + + fun unfold_conv _ = + SMT_Util.if_true_conv (is_case_bool o Term.head_of) + (expand_head_conv (Conv.rewr_conv @{thm case_bool_if})) +in + +fun rewrite_case_bool_conv ctxt = + SMT_Util.if_exists_conv is_case_bool (Conv.top_conv unfold_conv ctxt) + +val setup_case_bool = SMT_Builtin.add_builtin_fun_ext'' @{const_name "bool.case_bool"} + +end + + +(** unfold abs, min and max **) + +val abs_min_max_table = [ + (@{const_name min}, @{thm min_def_raw}), + (@{const_name max}, @{thm max_def_raw}), + (@{const_name abs}, @{thm abs_if_raw})] + +local + fun abs_min_max ctxt (Const (n, Type (@{type_name fun}, [T, _]))) = + (case AList.lookup (op =) abs_min_max_table n of + NONE => NONE + | SOME thm => if SMT_Builtin.is_builtin_typ_ext ctxt T then SOME thm else NONE) + | abs_min_max _ _ = NONE + + fun unfold_amm_conv ctxt ct = + (case abs_min_max ctxt (Term.head_of (Thm.term_of ct)) of + SOME thm => expand_head_conv (Conv.rewr_conv thm) + | NONE => Conv.all_conv) ct +in + +fun unfold_abs_min_max_conv ctxt = + SMT_Util.if_exists_conv (is_some o abs_min_max ctxt) (Conv.top_conv unfold_amm_conv ctxt) + +val setup_abs_min_max = fold (SMT_Builtin.add_builtin_fun_ext'' o fst) abs_min_max_table + +end + + +(** normalize numerals **) + +local + (* + rewrite Numeral1 into 1 + rewrite - 0 into 0 + *) + + fun is_irregular_number (Const (@{const_name numeral}, _) $ Const (@{const_name num.One}, _)) = + true + | is_irregular_number (Const (@{const_name uminus}, _) $ Const (@{const_name Groups.zero}, _)) = + true + | is_irregular_number _ = false + + fun is_strange_number ctxt t = is_irregular_number t andalso SMT_Builtin.is_builtin_num ctxt t + + val proper_num_ss = + simpset_of (put_simpset HOL_ss @{context} addsimps @{thms Num.numeral_One minus_zero}) + + fun norm_num_conv ctxt = + SMT_Util.if_conv (is_strange_number ctxt) (Simplifier.rewrite (put_simpset proper_num_ss ctxt)) + Conv.no_conv +in + +fun normalize_numerals_conv ctxt = + SMT_Util.if_exists_conv (is_strange_number ctxt) (Conv.top_sweep_conv norm_num_conv ctxt) + +end + + +(** combined unfoldings and rewritings **) + +fun unfold_conv ctxt = + rewrite_case_bool_conv ctxt then_conv + unfold_abs_min_max_conv ctxt then_conv + Thm.beta_conversion true + +fun unfold_polymorph ctxt = map (apsnd (Conv.fconv_rule (unfold_conv ctxt))) +fun unfold_monomorph ctxt = map (apsnd (Conv.fconv_rule (normalize_numerals_conv ctxt))) + + +(* overall normalization *) + +fun burrow_ids f ithms = + let + val (is, thms) = split_list ithms + val (thms', extra_thms) = f thms + in (is ~~ thms') @ map (pair ~1) extra_thms end + +type extra_norm = Proof.context -> thm list * thm list -> thm list * thm list + +structure Extra_Norms = Generic_Data +( + type T = extra_norm SMT_Util.dict + val empty = [] + val extend = I + fun merge data = SMT_Util.dict_merge fst data +) + +fun add_extra_norm (cs, norm) = Extra_Norms.map (SMT_Util.dict_update (cs, norm)) + +fun apply_extra_norms ctxt ithms = + let + val cs = SMT_Config.solver_class_of ctxt + val es = SMT_Util.dict_lookup (Extra_Norms.get (Context.Proof ctxt)) cs + in burrow_ids (fold (fn e => e ctxt) es o rpair []) ithms end + +local + val ignored = member (op =) [@{const_name All}, @{const_name Ex}, + @{const_name Let}, @{const_name If}, @{const_name HOL.eq}] + + val schematic_consts_of = + let + fun collect (@{const trigger} $ p $ t) = collect_trigger p #> collect t + | collect (t $ u) = collect t #> collect u + | collect (Abs (_, _, t)) = collect t + | collect (t as Const (n, _)) = + if not (ignored n) then Monomorph.add_schematic_consts_of t else I + | collect _ = I + and collect_trigger t = + let val dest = these o try SMT_Util.dest_symb_list + in fold (fold collect_pat o dest) (dest t) end + and collect_pat (Const (@{const_name pat}, _) $ t) = collect t + | collect_pat (Const (@{const_name nopat}, _) $ t) = collect t + | collect_pat _ = I + in (fn t => collect t Symtab.empty) end +in + +fun monomorph ctxt xthms = + let val (xs, thms) = split_list xthms + in + map (pair 1) thms + |> Monomorph.monomorph schematic_consts_of ctxt + |> maps (uncurry (map o pair)) o map2 pair xs o map (map snd) + end + +end + +fun normalize ctxt wthms = + wthms + |> map_index I + |> gen_normalize ctxt + |> unfold_polymorph ctxt + |> monomorph ctxt + |> unfold_monomorph ctxt + |> apply_extra_norms ctxt + +val _ = Theory.setup (Context.theory_map ( + setup_atomize #> + setup_unfolded_quants #> + setup_trigger #> + setup_case_bool #> + setup_abs_min_max)) + +end; diff --git a/src/main/SMT/smt_real.ML b/src/main/SMT/smt_real.ML new file mode 100644 index 0000000..8e08d4c --- /dev/null +++ b/src/main/SMT/smt_real.ML @@ -0,0 +1,115 @@ +(* Title: HOL/Tools/SMT/smt_real.ML + Author: Sascha Boehme, TU Muenchen + +SMT setup for reals. +*) + +structure SMT_Real: sig end = +struct + + +(* SMT-LIB logic *) + +fun smtlib_logic ts = + if exists (Term.exists_type (Term.exists_subtype (equal @{typ real}))) ts + then SOME "AUFLIRA" + else NONE + + +(* SMT-LIB and Z3 built-ins *) + +local + fun real_num _ i = SOME (string_of_int i ^ ".0") + + fun is_linear [t] = SMT_Util.is_number t + | is_linear [t, u] = SMT_Util.is_number t orelse SMT_Util.is_number u + | is_linear _ = false + + fun mk_times ts = Term.list_comb (@{const times (real)}, ts) + + fun times _ _ ts = if is_linear ts then SOME ("*", 2, ts, mk_times) else NONE +in + +val setup_builtins = + SMT_Builtin.add_builtin_typ SMTLIB_Interface.smtlibC + (@{typ real}, K (SOME "Real"), real_num) #> + fold (SMT_Builtin.add_builtin_fun' SMTLIB_Interface.smtlibC) [ + (@{const less (real)}, "<"), + (@{const less_eq (real)}, "<="), + (@{const uminus (real)}, "-"), + (@{const plus (real)}, "+"), + (@{const minus (real)}, "-") ] #> + SMT_Builtin.add_builtin_fun SMTLIB_Interface.smtlibC + (Term.dest_Const @{const times (real)}, times) #> + SMT_Builtin.add_builtin_fun' Z3_Interface.smtlib_z3C + (@{const times (real)}, "*") #> + SMT_Builtin.add_builtin_fun' Z3_Interface.smtlib_z3C + (@{const divide (real)}, "/") + +end + + +(* Z3 constructors *) + +local + fun z3_mk_builtin_typ (Z3_Interface.Sym ("Real", _)) = SOME @{typ real} + | z3_mk_builtin_typ (Z3_Interface.Sym ("real", _)) = SOME @{typ real} + (*FIXME: delete*) + | z3_mk_builtin_typ _ = NONE + + fun z3_mk_builtin_num _ i T = + if T = @{typ real} then SOME (Numeral.mk_cnumber @{ctyp real} i) + else NONE + + fun mk_nary _ cu [] = cu + | mk_nary ct _ cts = uncurry (fold_rev (Thm.mk_binop ct)) (split_last cts) + + val mk_uminus = Thm.apply (Thm.cterm_of @{context} @{const uminus (real)}) + val add = Thm.cterm_of @{context} @{const plus (real)} + val real0 = Numeral.mk_cnumber @{ctyp real} 0 + val mk_sub = Thm.mk_binop (Thm.cterm_of @{context} @{const minus (real)}) + val mk_mul = Thm.mk_binop (Thm.cterm_of @{context} @{const times (real)}) + val mk_div = Thm.mk_binop (Thm.cterm_of @{context} @{const divide (real)}) + val mk_lt = Thm.mk_binop (Thm.cterm_of @{context} @{const less (real)}) + val mk_le = Thm.mk_binop (Thm.cterm_of @{context} @{const less_eq (real)}) + + fun z3_mk_builtin_fun (Z3_Interface.Sym ("-", _)) [ct] = SOME (mk_uminus ct) + | z3_mk_builtin_fun (Z3_Interface.Sym ("+", _)) cts = SOME (mk_nary add real0 cts) + | z3_mk_builtin_fun (Z3_Interface.Sym ("-", _)) [ct, cu] = SOME (mk_sub ct cu) + | z3_mk_builtin_fun (Z3_Interface.Sym ("*", _)) [ct, cu] = SOME (mk_mul ct cu) + | z3_mk_builtin_fun (Z3_Interface.Sym ("/", _)) [ct, cu] = SOME (mk_div ct cu) + | z3_mk_builtin_fun (Z3_Interface.Sym ("<", _)) [ct, cu] = SOME (mk_lt ct cu) + | z3_mk_builtin_fun (Z3_Interface.Sym ("<=", _)) [ct, cu] = SOME (mk_le ct cu) + | z3_mk_builtin_fun (Z3_Interface.Sym (">", _)) [ct, cu] = SOME (mk_lt cu ct) + | z3_mk_builtin_fun (Z3_Interface.Sym (">=", _)) [ct, cu] = SOME (mk_le cu ct) + | z3_mk_builtin_fun _ _ = NONE +in + +val z3_mk_builtins = { + mk_builtin_typ = z3_mk_builtin_typ, + mk_builtin_num = z3_mk_builtin_num, + mk_builtin_fun = (fn _ => fn sym => fn cts => + (case try (Thm.typ_of_cterm o hd) cts of + SOME @{typ real} => z3_mk_builtin_fun sym cts + | _ => NONE)) } + +end + + +(* Z3 proof replay *) + +val real_linarith_proc = + Simplifier.make_simproc @{context} "fast_real_arith" + {lhss = [@{term "(m::real) < n"}, @{term "(m::real) \ n"}, @{term "(m::real) = n"}], + proc = K Lin_Arith.simproc} + + +(* setup *) + +val _ = Theory.setup (Context.theory_map ( + SMTLIB_Interface.add_logic (10, smtlib_logic) #> + setup_builtins #> + Z3_Interface.add_mk_builtins z3_mk_builtins #> + Z3_Replay_Util.add_simproc real_linarith_proc)) + +end; diff --git a/src/main/SMT/smt_solver.ML b/src/main/SMT/smt_solver.ML new file mode 100644 index 0000000..7ff85dd --- /dev/null +++ b/src/main/SMT/smt_solver.ML @@ -0,0 +1,307 @@ +(* Title: HOL/Tools/SMT/smt_solver.ML + Author: Sascha Boehme, TU Muenchen + +SMT solvers registry and SMT tactic. +*) + +signature SMT_SOLVER = +sig + (*configuration*) + datatype outcome = Unsat | Sat | Unknown + + type parsed_proof = + {outcome: SMT_Failure.failure option, + fact_ids: (int * ((string * ATP_Problem_Generate.stature) * thm)) list option, + atp_proof: unit -> (term, string) ATP_Proof.atp_step list} + + type solver_config = + {name: string, + class: Proof.context -> SMT_Util.class, + avail: unit -> bool, + command: unit -> string list, + options: Proof.context -> string list, + smt_options: (string * string) list, + default_max_relevant: int, + outcome: string -> string list -> outcome * string list, + parse_proof: (Proof.context -> SMT_Translate.replay_data -> + ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> + parsed_proof) option, + replay: (Proof.context -> SMT_Translate.replay_data -> string list -> thm) option} + + (*registry*) + val add_solver: solver_config -> theory -> theory + val default_max_relevant: Proof.context -> string -> int + + (*filter*) + val smt_filter: Proof.context -> thm -> ((string * ATP_Problem_Generate.stature) * thm) list -> + int -> Time.time -> parsed_proof + + (*tactic*) + val smt_tac: Proof.context -> thm list -> int -> tactic + val smt_tac': Proof.context -> thm list -> int -> tactic +end; + +structure SMT_Solver: SMT_SOLVER = +struct + +(* interface to external solvers *) + +local + +fun make_command command options problem_path proof_path = + "(exec 2>&1;" :: map Bash.string (command () @ options) @ + [File.bash_path problem_path, ")", ">", File.bash_path proof_path] + |> space_implode " " + +fun with_trace ctxt msg f x = + let val _ = SMT_Config.trace_msg ctxt (fn () => msg) () + in f x end + +fun run ctxt name mk_cmd input = + (case SMT_Config.certificates_of ctxt of + NONE => + if not (SMT_Config.is_available ctxt name) then + error ("The SMT solver " ^ quote name ^ " is not installed") + else if Config.get ctxt SMT_Config.debug_files = "" then + with_trace ctxt ("Invoking SMT solver " ^ quote name ^ " ...") (Cache_IO.run mk_cmd) input + else + let + val base_path = Path.explode (Config.get ctxt SMT_Config.debug_files) + val in_path = Path.ext "smt_in" base_path + val out_path = Path.ext "smt_out" base_path + in Cache_IO.raw_run mk_cmd input in_path out_path end + | SOME certs => + (case Cache_IO.lookup certs input of + (NONE, key) => + if Config.get ctxt SMT_Config.read_only_certificates then + error ("Bad certificate cache: missing certificate") + else + Cache_IO.run_and_cache certs key mk_cmd input + | (SOME output, _) => + with_trace ctxt ("Using cached certificate from " ^ + Path.print (Cache_IO.cache_path_of certs) ^ " ...") I output)) + +(* Z3 returns 1 if "get-proof" or "get-model" fails. veriT returns 255. *) +val normal_return_codes = [0, 1, 255] + +fun run_solver ctxt name mk_cmd input = + let + fun pretty tag lines = Pretty.string_of (Pretty.big_list tag (map Pretty.str lines)) + + val _ = SMT_Config.trace_msg ctxt (pretty "Problem:" o split_lines) input + + val {redirected_output = res, output = err, return_code} = + SMT_Config.with_timeout ctxt (run ctxt name mk_cmd) input + val _ = SMT_Config.trace_msg ctxt (pretty "Solver:") err + + val output = fst (take_suffix (equal "") res) + val _ = SMT_Config.trace_msg ctxt (pretty "Result:") output + + val _ = member (op =) normal_return_codes return_code orelse + raise SMT_Failure.SMT (SMT_Failure.Abnormal_Termination return_code) + in output end + +fun trace_assms ctxt = + SMT_Config.trace_msg ctxt (Pretty.string_of o + Pretty.big_list "Assertions:" o map (Thm.pretty_thm ctxt o snd)) + +fun trace_replay_data ({context = ctxt, typs, terms, ...} : SMT_Translate.replay_data) = + let + fun pretty_eq n p = Pretty.block [Pretty.str n, Pretty.str " = ", p] + fun p_typ (n, T) = pretty_eq n (Syntax.pretty_typ ctxt T) + fun p_term (n, t) = pretty_eq n (Syntax.pretty_term ctxt t) + in + SMT_Config.trace_msg ctxt (fn () => + Pretty.string_of (Pretty.big_list "Names:" [ + Pretty.big_list "sorts:" (map p_typ (Symtab.dest typs)), + Pretty.big_list "functions:" (map p_term (Symtab.dest terms))])) () + end + +in + +fun invoke name command smt_options ithms ctxt = + let + val options = SMT_Config.solver_options_of ctxt + val comments = [space_implode " " options] + + val (str, replay_data as {context = ctxt', ...}) = + ithms + |> tap (trace_assms ctxt) + |> SMT_Translate.translate ctxt smt_options comments + ||> tap trace_replay_data + in (run_solver ctxt' name (make_command command options) str, replay_data) end + +end + + +(* configuration *) + +datatype outcome = Unsat | Sat | Unknown + +type parsed_proof = + {outcome: SMT_Failure.failure option, + fact_ids: (int * ((string * ATP_Problem_Generate.stature) * thm)) list option, + atp_proof: unit -> (term, string) ATP_Proof.atp_step list} + +type solver_config = + {name: string, + class: Proof.context -> SMT_Util.class, + avail: unit -> bool, + command: unit -> string list, + options: Proof.context -> string list, + smt_options: (string * string) list, + default_max_relevant: int, + outcome: string -> string list -> outcome * string list, + parse_proof: (Proof.context -> SMT_Translate.replay_data -> + ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> + parsed_proof) option, + replay: (Proof.context -> SMT_Translate.replay_data -> string list -> thm) option} + + +(* check well-sortedness *) + +val has_topsort = Term.exists_type (Term.exists_subtype (fn + TFree (_, []) => true + | TVar (_, []) => true + | _ => false)) + +(* top sorts cause problems with atomization *) +fun check_topsort ctxt thm = + if has_topsort (Thm.prop_of thm) then (SMT_Normalize.drop_fact_warning ctxt thm; TrueI) else thm + + +(* registry *) + +type solver_info = { + command: unit -> string list, + smt_options: (string * string) list, + default_max_relevant: int, + parse_proof: Proof.context -> SMT_Translate.replay_data -> + ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> + parsed_proof, + replay: Proof.context -> SMT_Translate.replay_data -> string list -> thm} + +structure Solvers = Generic_Data +( + type T = solver_info Symtab.table + val empty = Symtab.empty + val extend = I + fun merge data = Symtab.merge (K true) data +) + +local + fun parse_proof outcome parse_proof0 outer_ctxt replay_data xfacts prems concl output = + (case outcome output of + (Unsat, lines) => + (case parse_proof0 of + SOME pp => pp outer_ctxt replay_data xfacts prems concl lines + | NONE => {outcome = NONE, fact_ids = NONE, atp_proof = K []}) + | (result, _) => raise SMT_Failure.SMT (SMT_Failure.Counterexample (result = Sat))) + + fun replay outcome replay0 oracle outer_ctxt + (replay_data as {context = ctxt, ...} : SMT_Translate.replay_data) output = + (case outcome output of + (Unsat, lines) => + if not (Config.get ctxt SMT_Config.oracle) andalso is_some replay0 + then the replay0 outer_ctxt replay_data lines + else oracle () + | (result, _) => raise SMT_Failure.SMT (SMT_Failure.Counterexample (result = Sat))) + + val cfalse = Thm.cterm_of @{context} @{prop False} +in + +fun add_solver ({name, class, avail, command, options, smt_options, default_max_relevant, outcome, + parse_proof = parse_proof0, replay = replay0} : solver_config) = + let + fun solver oracle = { + command = command, + smt_options = smt_options, + default_max_relevant = default_max_relevant, + parse_proof = parse_proof (outcome name) parse_proof0, + replay = replay (outcome name) replay0 oracle} + + val info = {name = name, class = class, avail = avail, options = options} + in + Thm.add_oracle (Binding.name name, K cfalse) #-> (fn (_, oracle) => + Context.theory_map (Solvers.map (Symtab.update_new (name, solver oracle)))) #> + Context.theory_map (SMT_Config.add_solver info) + end + +end + +fun get_info ctxt name = the (Symtab.lookup (Solvers.get (Context.Proof ctxt)) name) + +fun name_and_info_of ctxt = + let val name = SMT_Config.solver_of ctxt + in (name, get_info ctxt name) end + +val default_max_relevant = #default_max_relevant oo get_info + +fun apply_solver_and_replay ctxt thms0 = + let + val thms = map (check_topsort ctxt) thms0 + val (name, {command, smt_options, replay, ...}) = name_and_info_of ctxt + val (output, replay_data) = + invoke name command smt_options (SMT_Normalize.normalize ctxt thms) ctxt + in replay ctxt replay_data output end + + +(* filter *) + +fun smt_filter ctxt0 goal xfacts i time_limit = + let + val ctxt = ctxt0 |> Config.put SMT_Config.timeout (Time.toReal time_limit) + + val ({context = ctxt, prems, concl, ...}, _) = Subgoal.focus ctxt i NONE goal + fun negate ct = Thm.dest_comb ct ||> Thm.apply @{cterm Not} |-> Thm.apply + val cprop = + (case try negate (Thm.rhs_of (SMT_Normalize.atomize_conv ctxt concl)) of + SOME ct => ct + | NONE => raise SMT_Failure.SMT (SMT_Failure.Other_Failure "cannot atomize goal")) + + val conjecture = Thm.assume cprop + val facts = map snd xfacts + val thms = conjecture :: prems @ facts + val thms' = map (check_topsort ctxt) thms + + val (name, {command, smt_options, parse_proof, ...}) = name_and_info_of ctxt + val (output, replay_data) = + invoke name command smt_options (SMT_Normalize.normalize ctxt thms') ctxt + in + parse_proof ctxt replay_data xfacts (map Thm.prop_of prems) (Thm.term_of concl) output + end + handle SMT_Failure.SMT fail => {outcome = SOME fail, fact_ids = NONE, atp_proof = K []} + + +(* SMT tactic *) + +local + fun str_of ctxt fail = + "Solver " ^ SMT_Config.solver_of ctxt ^ ": " ^ SMT_Failure.string_of_failure fail + + fun safe_solve ctxt facts = SOME (apply_solver_and_replay ctxt facts) + handle + SMT_Failure.SMT (fail as SMT_Failure.Counterexample _) => + (SMT_Config.verbose_msg ctxt (str_of ctxt) fail; NONE) + | SMT_Failure.SMT (fail as SMT_Failure.Time_Out) => + error ("SMT: Solver " ^ quote (SMT_Config.solver_of ctxt) ^ ": " ^ + SMT_Failure.string_of_failure fail ^ " (setting the " ^ + "configuration option " ^ quote (Config.name_of SMT_Config.timeout) ^ " might help)") + | SMT_Failure.SMT fail => error (str_of ctxt fail) + + fun resolve ctxt (SOME thm) = resolve_tac ctxt [thm] 1 + | resolve _ NONE = no_tac + + fun tac prove ctxt rules = + CONVERSION (SMT_Normalize.atomize_conv ctxt) + THEN' resolve_tac ctxt @{thms ccontr} + THEN' SUBPROOF (fn {context = ctxt', prems, ...} => + resolve ctxt' (prove ctxt' (rules @ prems))) ctxt +in + +val smt_tac = tac safe_solve +val smt_tac' = tac (SOME oo apply_solver_and_replay) + +end + +end; diff --git a/src/main/SMT/smt_systems.ML b/src/main/SMT/smt_systems.ML new file mode 100644 index 0000000..b7581cb --- /dev/null +++ b/src/main/SMT/smt_systems.ML @@ -0,0 +1,154 @@ +(* Title: HOL/Tools/SMT/smt_systems.ML + Author: Sascha Boehme, TU Muenchen + +Setup SMT solvers. +*) + +signature SMT_SYSTEMS = +sig + val cvc4_extensions: bool Config.T + val z3_extensions: bool Config.T +end; + +structure SMT_Systems: SMT_SYSTEMS = +struct + +(* helper functions *) + +fun make_avail name () = getenv (name ^ "_SOLVER") <> "" + +fun make_command name () = [getenv (name ^ "_SOLVER")] + +fun outcome_of unsat sat unknown solver_name line = + if String.isPrefix unsat line then SMT_Solver.Unsat + else if String.isPrefix sat line then SMT_Solver.Sat + else if String.isPrefix unknown line then SMT_Solver.Unknown + else raise SMT_Failure.SMT (SMT_Failure.Other_Failure ("Solver " ^ quote solver_name ^ + " failed -- enable tracing using the " ^ quote (Config.name_of SMT_Config.trace) ^ + " option for details")) + +fun is_blank_or_error_line "" = true + | is_blank_or_error_line s = String.isPrefix "(error " s + +fun on_first_line test_outcome solver_name lines = + let + val split_first = (fn [] => ("", []) | l :: ls => (l, ls)) + val (l, ls) = split_first (snd (take_prefix is_blank_or_error_line lines)) + in (test_outcome solver_name l, ls) end + +fun on_first_non_unsupported_line test_outcome solver_name lines = + on_first_line test_outcome solver_name (filter (curry (op <>) "unsupported") lines) + +(* CVC3 *) + +local + fun cvc3_options ctxt = [ + "-seed", string_of_int (Config.get ctxt SMT_Config.random_seed), + "-lang", "smt2", + "-timeout", string_of_int (Real.ceil (Config.get ctxt SMT_Config.timeout))] +in + +val cvc3: SMT_Solver.solver_config = { + name = "cvc3", + class = K SMTLIB_Interface.smtlibC, + avail = make_avail "CVC3", + command = make_command "CVC3", + options = cvc3_options, + smt_options = [], + default_max_relevant = 400 (* FUDGE *), + outcome = on_first_line (outcome_of "unsat" "sat" "unknown"), + parse_proof = NONE, + replay = NONE } + +end + +(* CVC4 *) + +val cvc4_extensions = Attrib.setup_config_bool @{binding cvc4_extensions} (K false) + +local + fun cvc4_options ctxt = [ + "--no-statistics", + "--random-seed=" ^ string_of_int (Config.get ctxt SMT_Config.random_seed), + "--lang=smt2", + "--continued-execution", + "--tlimit", string_of_int (Real.ceil (1000.0 * Config.get ctxt SMT_Config.timeout))] + + fun select_class ctxt = + if Config.get ctxt cvc4_extensions then CVC4_Interface.smtlib_cvc4C + else SMTLIB_Interface.smtlibC +in + +val cvc4: SMT_Solver.solver_config = { + name = "cvc4", + class = select_class, + avail = make_avail "CVC4", + command = make_command "CVC4", + options = cvc4_options, + smt_options = [(":produce-unsat-cores", "true")], + default_max_relevant = 400 (* FUDGE *), + outcome = on_first_line (outcome_of "unsat" "sat" "unknown"), + parse_proof = SOME (K CVC4_Proof_Parse.parse_proof), + replay = NONE } + +end + +(* veriT *) + +val veriT: SMT_Solver.solver_config = { + name = "verit", + class = K SMTLIB_Interface.smtlibC, + avail = make_avail "VERIT", + command = make_command "VERIT", + options = (fn ctxt => [ + "--proof-version=1", + "--proof-prune", + "--proof-merge", + "--disable-print-success", + "--disable-banner", + "--max-time=" ^ string_of_int (Real.ceil (Config.get ctxt SMT_Config.timeout))]), + smt_options = [(":produce-proofs", "true")], + default_max_relevant = 200 (* FUDGE *), + outcome = on_first_non_unsupported_line (outcome_of "unsat" "sat" "unknown"), + parse_proof = SOME (K VeriT_Proof_Parse.parse_proof), + replay = NONE } + +(* Z3 *) + +val z3_extensions = Attrib.setup_config_bool @{binding z3_extensions} (K false) + +local + fun z3_options ctxt = + ["smt.random_seed=" ^ string_of_int (Config.get ctxt SMT_Config.random_seed), + "smt.refine_inj_axioms=false", + "-T:" ^ string_of_int (Real.ceil (Config.get ctxt SMT_Config.timeout)), + "-smt2"] + + fun select_class ctxt = + if Config.get ctxt z3_extensions then Z3_Interface.smtlib_z3C else SMTLIB_Interface.smtlibC +in + +val z3: SMT_Solver.solver_config = { + name = "z3", + class = select_class, + avail = make_avail "Z3", + command = make_command "Z3", + options = z3_options, + smt_options = [(":produce-proofs", "true")], + default_max_relevant = 350 (* FUDGE *), + outcome = on_first_line (outcome_of "unsat" "sat" "unknown"), + parse_proof = SOME Z3_Replay.parse_proof, + replay = SOME Z3_Replay.replay } + +end + + +(* overall setup *) + +val _ = Theory.setup ( + SMT_Solver.add_solver cvc3 #> + SMT_Solver.add_solver cvc4 #> + SMT_Solver.add_solver veriT #> + SMT_Solver.add_solver z3) + +end; diff --git a/src/main/SMT/smt_translate.ML b/src/main/SMT/smt_translate.ML new file mode 100644 index 0000000..9e9bb6a --- /dev/null +++ b/src/main/SMT/smt_translate.ML @@ -0,0 +1,527 @@ +(* Title: HOL/Tools/SMT/smt_translate.ML + Author: Sascha Boehme, TU Muenchen + +Translate theorems into an SMT intermediate format and serialize them. +*) + +signature SMT_TRANSLATE = +sig + (*intermediate term structure*) + datatype squant = SForall | SExists + datatype 'a spattern = SPat of 'a list | SNoPat of 'a list + datatype sterm = + SVar of int | + SApp of string * sterm list | + SLet of string * sterm * sterm | + SQua of squant * string list * sterm spattern list * sterm + + (*translation configuration*) + type sign = { + logic: string, + sorts: string list, + dtyps: (BNF_Util.fp_kind * (string * (string * (string * string) list) list)) list, + funcs: (string * (string list * string)) list } + type config = { + logic: term list -> string, + fp_kinds: BNF_Util.fp_kind list, + serialize: (string * string) list -> string list -> sign -> sterm list -> string } + type replay_data = { + context: Proof.context, + typs: typ Symtab.table, + terms: term Symtab.table, + ll_defs: term list, + rewrite_rules: thm list, + assms: (int * thm) list } + + (*translation*) + val add_config: SMT_Util.class * (Proof.context -> config) -> Context.generic -> Context.generic + val translate: Proof.context -> (string * string) list -> string list -> (int * thm) list -> + string * replay_data +end; + +structure SMT_Translate: SMT_TRANSLATE = +struct + + +(* intermediate term structure *) + +datatype squant = SForall | SExists + +datatype 'a spattern = SPat of 'a list | SNoPat of 'a list + +datatype sterm = + SVar of int | + SApp of string * sterm list | + SLet of string * sterm * sterm | + SQua of squant * string list * sterm spattern list * sterm + + +(* translation configuration *) + +type sign = { + logic: string, + sorts: string list, + dtyps: (BNF_Util.fp_kind * (string * (string * (string * string) list) list)) list, + funcs: (string * (string list * string)) list } + +type config = { + logic: term list -> string, + fp_kinds: BNF_Util.fp_kind list, + serialize: (string * string) list -> string list -> sign -> sterm list -> string } + +type replay_data = { + context: Proof.context, + typs: typ Symtab.table, + terms: term Symtab.table, + ll_defs: term list, + rewrite_rules: thm list, + assms: (int * thm) list } + + +(* translation context *) + +fun add_components_of_typ (Type (s, Ts)) = + cons (Long_Name.base_name s) #> fold_rev add_components_of_typ Ts + | add_components_of_typ (TFree (s, _)) = cons (perhaps (try (unprefix "'")) s) + | add_components_of_typ _ = I; + +fun suggested_name_of_typ T = space_implode "_" (add_components_of_typ T []); + +fun suggested_name_of_term (Const (s, _)) = Long_Name.base_name s + | suggested_name_of_term (Free (s, _)) = s + | suggested_name_of_term _ = Name.uu + +val empty_tr_context = (Name.context, Typtab.empty, Termtab.empty) +val safe_suffix = "$" + +fun add_typ T proper (cx as (names, typs, terms)) = + (case Typtab.lookup typs T of + SOME (name, _) => (name, cx) + | NONE => + let + val sugg = Name.desymbolize (SOME true) (suggested_name_of_typ T) ^ safe_suffix + val (name, names') = Name.variant sugg names + val typs' = Typtab.update (T, (name, proper)) typs + in (name, (names', typs', terms)) end) + +fun add_fun t sort (cx as (names, typs, terms)) = + (case Termtab.lookup terms t of + SOME (name, _) => (name, cx) + | NONE => + let + val sugg = Name.desymbolize (SOME false) (suggested_name_of_term t) ^ safe_suffix + val (name, names') = Name.variant sugg names + val terms' = Termtab.update (t, (name, sort)) terms + in (name, (names', typs, terms')) end) + +fun sign_of logic dtyps (_, typs, terms) = { + logic = logic, + sorts = Typtab.fold (fn (_, (n, true)) => cons n | _ => I) typs [], + dtyps = dtyps, + funcs = Termtab.fold (fn (_, (n, SOME ss)) => cons (n,ss) | _ => I) terms []} + +fun replay_data_of ctxt ll_defs rules assms (_, typs, terms) = + let + fun add_typ (T, (n, _)) = Symtab.update (n, T) + val typs' = Typtab.fold add_typ typs Symtab.empty + + fun add_fun (t, (n, _)) = Symtab.update (n, t) + val terms' = Termtab.fold add_fun terms Symtab.empty + in + {context = ctxt, typs = typs', terms = terms', ll_defs = ll_defs, rewrite_rules = rules, + assms = assms} + end + + +(* preprocessing *) + +(** (co)datatype declarations **) + +fun collect_co_datatypes fp_kinds (tr_context, ctxt) ts = + let + val (fp_decls, ctxt') = + ([], ctxt) + |> fold (Term.fold_types (SMT_Datatypes.add_decls fp_kinds)) ts + |>> flat + + fun is_decl_typ T = exists (equal T o fst o snd) fp_decls + + fun add_typ' T proper = + (case SMT_Builtin.dest_builtin_typ ctxt' T of + SOME n => pair n + | NONE => add_typ T proper) + + fun tr_select sel = + let val T = Term.range_type (Term.fastype_of sel) + in add_fun sel NONE ##>> add_typ' T (not (is_decl_typ T)) end + fun tr_constr (constr, selects) = + add_fun constr NONE ##>> fold_map tr_select selects + fun tr_typ (fp, (T, cases)) = + add_typ' T false ##>> fold_map tr_constr cases #>> pair fp + + val (fp_decls', tr_context') = fold_map tr_typ fp_decls tr_context + + fun add (constr, selects) = + Termtab.update (constr, length selects) #> + fold (Termtab.update o rpair 1) selects + + val funcs = fold (fold add o snd o snd) fp_decls Termtab.empty + + in ((funcs, fp_decls', tr_context', ctxt'), ts) end + (* FIXME: also return necessary (co)datatype theorems *) + + +(** eta-expand quantifiers, let expressions and built-ins *) + +local + fun eta f T t = Abs (Name.uu, T, f (Term.incr_boundvars 1 t $ Bound 0)) + + fun exp f T = eta f (Term.domain_type (Term.domain_type T)) + + fun exp2 T q = + let val U = Term.domain_type T + in Abs (Name.uu, U, q $ eta I (Term.domain_type U) (Bound 0)) end + + fun expf k i T t = + let val Ts = drop i (fst (SMT_Util.dest_funT k T)) + in + Term.incr_boundvars (length Ts) t + |> fold_rev (fn i => fn u => u $ Bound i) (0 upto length Ts - 1) + |> fold_rev (fn T => fn u => Abs (Name.uu, T, u)) Ts + end +in + +fun eta_expand ctxt funcs = + let + fun exp_func t T ts = + (case Termtab.lookup funcs t of + SOME k => Term.list_comb (t, ts) |> k <> length ts ? expf k (length ts) T + | NONE => Term.list_comb (t, ts)) + + fun expand ((q as Const (@{const_name All}, _)) $ Abs a) = q $ abs_expand a + | expand ((q as Const (@{const_name All}, T)) $ t) = q $ exp expand T t + | expand (q as Const (@{const_name All}, T)) = exp2 T q + | expand ((q as Const (@{const_name Ex}, _)) $ Abs a) = q $ abs_expand a + | expand ((q as Const (@{const_name Ex}, T)) $ t) = q $ exp expand T t + | expand (q as Const (@{const_name Ex}, T)) = exp2 T q + | expand (Const (@{const_name Let}, _) $ t $ u) = expand (Term.betapply (u, t)) + | expand (Const (@{const_name Let}, T) $ t) = + let val U = Term.domain_type (Term.range_type T) + in Abs (Name.uu, U, Bound 0 $ Term.incr_boundvars 1 t) end + | expand (Const (@{const_name Let}, T)) = + let val U = Term.domain_type (Term.range_type T) + in Abs (Name.uu, Term.domain_type T, Abs (Name.uu, U, Bound 0 $ Bound 1)) end + | expand t = + (case Term.strip_comb t of + (u as Const (c as (_, T)), ts) => + (case SMT_Builtin.dest_builtin ctxt c ts of + SOME (_, k, us, mk) => + if k = length us then mk (map expand us) + else if k < length us then chop k (map expand us) |>> mk |> Term.list_comb + else expf k (length ts) T (mk (map expand us)) + | NONE => exp_func u T (map expand ts)) + | (u as Free (_, T), ts) => exp_func u T (map expand ts) + | (Abs a, ts) => Term.list_comb (abs_expand a, map expand ts) + | (u, ts) => Term.list_comb (u, map expand ts)) + + and abs_expand (n, T, t) = Abs (n, T, expand t) + + in map expand end + +end + + +(** introduce explicit applications **) + +local + (* + Make application explicit for functions with varying number of arguments. + *) + + fun add t i = apfst (Termtab.map_default (t, i) (Integer.min i)) + fun add_type T = apsnd (Typtab.update (T, ())) + + fun min_arities t = + (case Term.strip_comb t of + (u as Const _, ts) => add u (length ts) #> fold min_arities ts + | (u as Free _, ts) => add u (length ts) #> fold min_arities ts + | (Abs (_, T, u), ts) => (can dest_funT T ? add_type T) #> min_arities u #> fold min_arities ts + | (_, ts) => fold min_arities ts) + + fun minimize types t i = + let + fun find_min j [] _ = j + | find_min j (U :: Us) T = + if Typtab.defined types T then j else find_min (j + 1) Us (U --> T) + + val (Ts, T) = Term.strip_type (Term.type_of t) + in find_min 0 (take i (rev Ts)) T end + + fun app u (t, T) = (Const (@{const_name fun_app}, T --> T) $ t $ u, Term.range_type T) + + fun apply i t T ts = + let + val (ts1, ts2) = chop i ts + val (_, U) = SMT_Util.dest_funT i T + in fst (fold app ts2 (Term.list_comb (t, ts1), U)) end +in + +fun intro_explicit_application ctxt funcs ts = + let + val (arities, types) = fold min_arities ts (Termtab.empty, Typtab.empty) + val arities' = Termtab.map (minimize types) arities (* FIXME: highly suspicious *) + + fun app_func t T ts = + if is_some (Termtab.lookup funcs t) then Term.list_comb (t, ts) + else apply (the (Termtab.lookup arities' t)) t T ts + + fun in_list T f t = SMT_Util.mk_symb_list T (map f (SMT_Util.dest_symb_list t)) + + fun traverse Ts t = + (case Term.strip_comb t of + (q as Const (@{const_name All}, _), [Abs (x, T, u)]) => + q $ Abs (x, T, in_trigger (T :: Ts) u) + | (q as Const (@{const_name Ex}, _), [Abs (x, T, u)]) => + q $ Abs (x, T, in_trigger (T :: Ts) u) + | (q as Const (@{const_name Let}, _), [u1, u2 as Abs _]) => + q $ traverse Ts u1 $ traverse Ts u2 + | (u as Const (c as (_, T)), ts) => + (case SMT_Builtin.dest_builtin ctxt c ts of + SOME (_, k, us, mk) => + let + val (ts1, ts2) = chop k (map (traverse Ts) us) + val U = Term.strip_type T |>> snd o chop k |> (op --->) + in apply 0 (mk ts1) U ts2 end + | NONE => app_func u T (map (traverse Ts) ts)) + | (u as Free (_, T), ts) => app_func u T (map (traverse Ts) ts) + | (u as Bound i, ts) => apply 0 u (nth Ts i) (map (traverse Ts) ts) + | (Abs (n, T, u), ts) => traverses Ts (Abs (n, T, traverse (T::Ts) u)) ts + | (u, ts) => traverses Ts u ts) + and in_trigger Ts ((c as @{const trigger}) $ p $ t) = c $ in_pats Ts p $ traverse Ts t + | in_trigger Ts t = traverse Ts t + and in_pats Ts ps = + in_list @{typ "pattern symb_list"} (in_list @{typ pattern} (in_pat Ts)) ps + and in_pat Ts ((p as Const (@{const_name pat}, _)) $ t) = p $ traverse Ts t + | in_pat Ts ((p as Const (@{const_name nopat}, _)) $ t) = p $ traverse Ts t + | in_pat _ t = raise TERM ("bad pattern", [t]) + and traverses Ts t ts = Term.list_comb (t, map (traverse Ts) ts) + in map (traverse []) ts end + +val fun_app_eq = mk_meta_eq @{thm fun_app_def} + +end + + +(** map HOL formulas to FOL formulas (i.e., separate formulas froms terms) **) + +local + val is_quant = member (op =) [@{const_name All}, @{const_name Ex}] + + val fol_rules = [ + Let_def, + @{lemma "P = True == P" by (rule eq_reflection) simp}] + + exception BAD_PATTERN of unit + + fun is_builtin_conn_or_pred ctxt c ts = + is_some (SMT_Builtin.dest_builtin_conn ctxt c ts) orelse + is_some (SMT_Builtin.dest_builtin_pred ctxt c ts) +in + +fun folify ctxt = + let + fun in_list T f t = SMT_Util.mk_symb_list T (map_filter f (SMT_Util.dest_symb_list t)) + + fun in_term pat t = + (case Term.strip_comb t of + (@{const True}, []) => t + | (@{const False}, []) => t + | (u as Const (@{const_name If}, _), [t1, t2, t3]) => + if pat then raise BAD_PATTERN () else u $ in_form t1 $ in_term pat t2 $ in_term pat t3 + | (Const (c as (n, _)), ts) => + if is_builtin_conn_or_pred ctxt c ts orelse is_quant n then + if pat then raise BAD_PATTERN () else in_form t + else + Term.list_comb (Const c, map (in_term pat) ts) + | (Free c, ts) => Term.list_comb (Free c, map (in_term pat) ts) + | _ => t) + + and in_pat ((p as Const (@{const_name pat}, _)) $ t) = + p $ in_term true t + | in_pat ((p as Const (@{const_name nopat}, _)) $ t) = + p $ in_term true t + | in_pat t = raise TERM ("bad pattern", [t]) + + and in_pats ps = + in_list @{typ "pattern symb_list"} (SOME o in_list @{typ pattern} (try in_pat)) ps + + and in_trigger ((c as @{const trigger}) $ p $ t) = c $ in_pats p $ in_form t + | in_trigger t = in_form t + + and in_form t = + (case Term.strip_comb t of + (q as Const (qn, _), [Abs (n, T, u)]) => + if is_quant qn then q $ Abs (n, T, in_trigger u) + else in_term false t + | (Const c, ts) => + (case SMT_Builtin.dest_builtin_conn ctxt c ts of + SOME (_, _, us, mk) => mk (map in_form us) + | NONE => + (case SMT_Builtin.dest_builtin_pred ctxt c ts of + SOME (_, _, us, mk) => mk (map (in_term false) us) + | NONE => in_term false t)) + | _ => in_term false t) + in + map in_form #> + pair (fol_rules, I) + end + +end + + +(* translation into intermediate format *) + +(** utility functions **) + +val quantifier = (fn + @{const_name All} => SOME SForall + | @{const_name Ex} => SOME SExists + | _ => NONE) + +fun group_quant qname Ts (t as Const (q, _) $ Abs (_, T, u)) = + if q = qname then group_quant qname (T :: Ts) u else (Ts, t) + | group_quant _ Ts t = (Ts, t) + +fun dest_pat (Const (@{const_name pat}, _) $ t) = (t, true) + | dest_pat (Const (@{const_name nopat}, _) $ t) = (t, false) + | dest_pat t = raise TERM ("bad pattern", [t]) + +fun dest_pats [] = I + | dest_pats ts = + (case map dest_pat ts |> split_list ||> distinct (op =) of + (ps, [true]) => cons (SPat ps) + | (ps, [false]) => cons (SNoPat ps) + | _ => raise TERM ("bad multi-pattern", ts)) + +fun dest_trigger (@{const trigger} $ tl $ t) = + (rev (fold (dest_pats o SMT_Util.dest_symb_list) (SMT_Util.dest_symb_list tl) []), t) + | dest_trigger t = ([], t) + +fun dest_quant qn T t = quantifier qn |> Option.map (fn q => + let + val (Ts, u) = group_quant qn [T] t + val (ps, p) = dest_trigger u + in (q, rev Ts, ps, p) end) + +fun fold_map_pat f (SPat ts) = fold_map f ts #>> SPat + | fold_map_pat f (SNoPat ts) = fold_map f ts #>> SNoPat + + +(** translation from Isabelle terms into SMT intermediate terms **) + +fun intermediate logic dtyps builtin ctxt ts trx = + let + fun transT (T as TFree _) = add_typ T true + | transT (T as TVar _) = (fn _ => raise TYPE ("bad SMT type", [T], [])) + | transT (T as Type _) = + (case SMT_Builtin.dest_builtin_typ ctxt T of + SOME n => pair n + | NONE => add_typ T true) + + fun app n ts = SApp (n, ts) + + fun trans t = + (case Term.strip_comb t of + (Const (qn, _), [Abs (_, T, t1)]) => + (case dest_quant qn T t1 of + SOME (q, Ts, ps, b) => + fold_map transT Ts ##>> fold_map (fold_map_pat trans) ps ##>> + trans b #>> (fn ((Ts', ps'), b') => SQua (q, Ts', ps', b')) + | NONE => raise TERM ("unsupported quantifier", [t])) + | (Const (@{const_name Let}, _), [t1, Abs (_, T, t2)]) => + transT T ##>> trans t1 ##>> trans t2 #>> (fn ((U, u1), u2) => SLet (U, u1, u2)) + | (u as Const (c as (_, T)), ts) => + (case builtin ctxt c ts of + SOME (n, _, us, _) => fold_map trans us #>> app n + | NONE => transs u T ts) + | (u as Free (_, T), ts) => transs u T ts + | (Bound i, []) => pair (SVar i) + | _ => raise TERM ("bad SMT term", [t])) + + and transs t T ts = + let val (Us, U) = SMT_Util.dest_funT (length ts) T + in + fold_map transT Us ##>> transT U #-> (fn Up => + add_fun t (SOME Up) ##>> fold_map trans ts #>> SApp) + end + + val (us, trx') = fold_map trans ts trx + in ((sign_of (logic ts) dtyps trx', us), trx') end + + +(* translation *) + +structure Configs = Generic_Data +( + type T = (Proof.context -> config) SMT_Util.dict + val empty = [] + val extend = I + fun merge data = SMT_Util.dict_merge fst data +) + +fun add_config (cs, cfg) = Configs.map (SMT_Util.dict_update (cs, cfg)) + +fun get_config ctxt = + let val cs = SMT_Config.solver_class_of ctxt + in + (case SMT_Util.dict_get (Configs.get (Context.Proof ctxt)) cs of + SOME cfg => cfg ctxt + | NONE => error ("SMT: no translation configuration found " ^ + "for solver class " ^ quote (SMT_Util.string_of_class cs))) + end + +fun translate ctxt smt_options comments ithms = + let + val {logic, fp_kinds, serialize} = get_config ctxt + + fun no_dtyps (tr_context, ctxt) ts = + ((Termtab.empty, [], tr_context, ctxt), ts) + + val ts1 = map (Envir.beta_eta_contract o SMT_Util.prop_of o snd) ithms + + val ((funcs, dtyps, tr_context, ctxt1), ts2) = + ((empty_tr_context, ctxt), ts1) + |-> (if null fp_kinds then no_dtyps else collect_co_datatypes fp_kinds) + + fun is_binder (Const (@{const_name Let}, _) $ _) = true + | is_binder t = Lambda_Lifting.is_quantifier t + + fun mk_trigger ((q as Const (@{const_name All}, _)) $ Abs (n, T, t)) = + q $ Abs (n, T, mk_trigger t) + | mk_trigger (eq as (Const (@{const_name HOL.eq}, T) $ lhs $ _)) = + Term.domain_type T --> @{typ pattern} + |> (fn T => Const (@{const_name pat}, T) $ lhs) + |> SMT_Util.mk_symb_list @{typ pattern} o single + |> SMT_Util.mk_symb_list @{typ "pattern symb_list"} o single + |> (fn t => @{const trigger} $ t $ eq) + | mk_trigger t = t + + val (ctxt2, (ts3, ll_defs)) = + ts2 + |> eta_expand ctxt1 funcs + |> rpair ctxt1 + |-> Lambda_Lifting.lift_lambdas NONE is_binder + |-> (fn (ts', ll_defs) => fn ctxt' => + (ctxt', (intro_explicit_application ctxt' funcs (map mk_trigger ll_defs @ ts'), ll_defs))) + + val ((rewrite_rules, builtin), ts4) = folify ctxt2 ts3 + |>> apfst (cons fun_app_eq) +val _ = dtyps : (BNF_Util.fp_kind * (string * (string * (string * string) list) list)) list (*###*) + in + (ts4, tr_context) + |-> intermediate logic dtyps (builtin SMT_Builtin.dest_builtin) ctxt2 + |>> uncurry (serialize smt_options comments) + ||> replay_data_of ctxt2 ll_defs rewrite_rules ithms + end + +end; diff --git a/src/main/SMT/smt_util.ML b/src/main/SMT/smt_util.ML new file mode 100644 index 0000000..387c204 --- /dev/null +++ b/src/main/SMT/smt_util.ML @@ -0,0 +1,240 @@ +(* Title: HOL/Tools/SMT/smt_util.ML + Author: Sascha Boehme, TU Muenchen + +General utility functions. +*) + +signature SMT_UTIL = +sig + (*basic combinators*) + val repeat: ('a -> 'a option) -> 'a -> 'a + val repeat_yield: ('a -> 'b -> ('a * 'b) option) -> 'a -> 'b -> 'a * 'b + + (*class dictionaries*) + type class = string list + val basicC: class + val string_of_class: class -> string + type 'a dict = (class * 'a) Ord_List.T + val dict_map_default: class * 'a -> ('a -> 'a) -> 'a dict -> 'a dict + val dict_update: class * 'a -> 'a dict -> 'a dict + val dict_merge: ('a * 'a -> 'a) -> 'a dict * 'a dict -> 'a dict + val dict_lookup: 'a dict -> class -> 'a list + val dict_get: 'a dict -> class -> 'a option + + (*types*) + val dest_funT: int -> typ -> typ list * typ + + (*terms*) + val dest_conj: term -> term * term + val dest_disj: term -> term * term + val under_quant: (term -> 'a) -> term -> 'a + val is_number: term -> bool + + (*symbolic lists*) + val symb_nil_const: typ -> term + val symb_cons_const: typ -> term + val mk_symb_list: typ -> term list -> term + val dest_symb_list: term -> term list + + (*patterns and instantiations*) + val mk_const_pat: theory -> string -> (ctyp -> 'a) -> 'a * cterm + val destT1: ctyp -> ctyp + val destT2: ctyp -> ctyp + val instTs: ctyp list -> ctyp list * cterm -> cterm + val instT: ctyp -> ctyp * cterm -> cterm + val instT': cterm -> ctyp * cterm -> cterm + + (*certified terms*) + val dest_cabs: cterm -> Proof.context -> cterm * Proof.context + val dest_all_cabs: cterm -> Proof.context -> cterm * Proof.context + val dest_cbinder: cterm -> Proof.context -> cterm * Proof.context + val dest_all_cbinders: cterm -> Proof.context -> cterm * Proof.context + val mk_cprop: cterm -> cterm + val dest_cprop: cterm -> cterm + val mk_cequals: cterm -> cterm -> cterm + val term_of: cterm -> term + val prop_of: thm -> term + + (*conversions*) + val if_conv: (term -> bool) -> conv -> conv -> conv + val if_true_conv: (term -> bool) -> conv -> conv + val if_exists_conv: (term -> bool) -> conv -> conv + val binders_conv: (Proof.context -> conv) -> Proof.context -> conv + val under_quant_conv: (Proof.context * cterm list -> conv) -> + Proof.context -> conv + val prop_conv: conv -> conv +end; + +structure SMT_Util: SMT_UTIL = +struct + +(* basic combinators *) + +fun repeat f = + let fun rep x = (case f x of SOME y => rep y | NONE => x) + in rep end + +fun repeat_yield f = + let fun rep x y = (case f x y of SOME (x', y') => rep x' y' | NONE => (x, y)) + in rep end + + +(* class dictionaries *) + +type class = string list + +val basicC = [] + +fun string_of_class [] = "basic" + | string_of_class cs = "basic." ^ space_implode "." cs + +type 'a dict = (class * 'a) Ord_List.T + +fun class_ord ((cs1, _), (cs2, _)) = + rev_order (list_ord fast_string_ord (cs1, cs2)) + +fun dict_insert (cs, x) d = + if AList.defined (op =) d cs then d + else Ord_List.insert class_ord (cs, x) d + +fun dict_map_default (cs, x) f = + dict_insert (cs, x) #> AList.map_entry (op =) cs f + +fun dict_update (e as (_, x)) = dict_map_default e (K x) + +fun dict_merge val_merge = sort class_ord o AList.join (op =) (K val_merge) + +fun dict_lookup d cs = + let fun match (cs', x) = if is_prefix (op =) cs' cs then SOME x else NONE + in map_filter match d end + +fun dict_get d cs = + (case AList.lookup (op =) d cs of + NONE => (case cs of [] => NONE | _ => dict_get d (take (length cs - 1) cs)) + | SOME x => SOME x) + + +(* types *) + +val dest_funT = + let + fun dest Ts 0 T = (rev Ts, T) + | dest Ts i (Type ("fun", [T, U])) = dest (T::Ts) (i-1) U + | dest _ _ T = raise TYPE ("not a function type", [T], []) + in dest [] end + + +(* terms *) + +fun dest_conj (@{const HOL.conj} $ t $ u) = (t, u) + | dest_conj t = raise TERM ("not a conjunction", [t]) + +fun dest_disj (@{const HOL.disj} $ t $ u) = (t, u) + | dest_disj t = raise TERM ("not a disjunction", [t]) + +fun under_quant f t = + (case t of + Const (@{const_name All}, _) $ Abs (_, _, u) => under_quant f u + | Const (@{const_name Ex}, _) $ Abs (_, _, u) => under_quant f u + | _ => f t) + +val is_number = + let + fun is_num env (Const (@{const_name Let}, _) $ t $ Abs (_, _, u)) = is_num (t :: env) u + | is_num env (Bound i) = i < length env andalso is_num env (nth env i) + | is_num _ t = can HOLogic.dest_number t + in is_num [] end + + +(* symbolic lists *) + +fun symb_listT T = Type (@{type_name symb_list}, [T]) + +fun symb_nil_const T = Const (@{const_name Symb_Nil}, symb_listT T) + +fun symb_cons_const T = + let val listT = symb_listT T in Const (@{const_name Symb_Cons}, T --> listT --> listT) end + +fun mk_symb_list T ts = + fold_rev (fn t => fn u => symb_cons_const T $ t $ u) ts (symb_nil_const T) + +fun dest_symb_list (Const (@{const_name Symb_Nil}, _)) = [] + | dest_symb_list (Const (@{const_name Symb_Cons}, _) $ t $ u) = t :: dest_symb_list u + + +(* patterns and instantiations *) + +fun mk_const_pat thy name destT = + let val cpat = Thm.global_cterm_of thy (Const (name, Sign.the_const_type thy name)) + in (destT (Thm.ctyp_of_cterm cpat), cpat) end + +val destT1 = hd o Thm.dest_ctyp +val destT2 = hd o tl o Thm.dest_ctyp + +fun instTs cUs (cTs, ct) = Thm.instantiate_cterm (map (dest_TVar o Thm.typ_of) cTs ~~ cUs, []) ct +fun instT cU (cT, ct) = instTs [cU] ([cT], ct) +fun instT' ct = instT (Thm.ctyp_of_cterm ct) + + +(* certified terms *) + +fun dest_cabs ct ctxt = + (case Thm.term_of ct of + Abs _ => + let val (n, ctxt') = yield_singleton Variable.variant_fixes Name.uu ctxt + in (snd (Thm.dest_abs (SOME n) ct), ctxt') end + | _ => raise CTERM ("no abstraction", [ct])) + +val dest_all_cabs = repeat_yield (try o dest_cabs) + +fun dest_cbinder ct ctxt = + (case Thm.term_of ct of + Const _ $ Abs _ => dest_cabs (Thm.dest_arg ct) ctxt + | _ => raise CTERM ("not a binder", [ct])) + +val dest_all_cbinders = repeat_yield (try o dest_cbinder) + +val mk_cprop = Thm.apply (Thm.cterm_of @{context} @{const Trueprop}) + +fun dest_cprop ct = + (case Thm.term_of ct of + @{const Trueprop} $ _ => Thm.dest_arg ct + | _ => raise CTERM ("not a property", [ct])) + +val equals = mk_const_pat @{theory} @{const_name Pure.eq} destT1 +fun mk_cequals ct cu = Thm.mk_binop (instT' ct equals) ct cu + +val dest_prop = (fn @{const Trueprop} $ t => t | t => t) +fun term_of ct = dest_prop (Thm.term_of ct) +fun prop_of thm = dest_prop (Thm.prop_of thm) + + +(* conversions *) + +fun if_conv pred cv1 cv2 ct = if pred (Thm.term_of ct) then cv1 ct else cv2 ct + +fun if_true_conv pred cv = if_conv pred cv Conv.all_conv + +fun if_exists_conv pred = if_true_conv (Term.exists_subterm pred) + +fun binders_conv cv ctxt = + Conv.binder_conv (binders_conv cv o snd) ctxt else_conv cv ctxt + +fun under_quant_conv cv ctxt = + let + fun quant_conv inside ctxt cvs ct = + (case Thm.term_of ct of + Const (@{const_name All}, _) $ Abs _ => + Conv.binder_conv (under_conv cvs) ctxt + | Const (@{const_name Ex}, _) $ Abs _ => + Conv.binder_conv (under_conv cvs) ctxt + | _ => if inside then cv (ctxt, cvs) else Conv.all_conv) ct + and under_conv cvs (cv, ctxt) = quant_conv true ctxt (cv :: cvs) + in quant_conv false ctxt [] end + +fun prop_conv cv ct = + (case Thm.term_of ct of + @{const Trueprop} $ _ => Conv.arg_conv cv ct + | _ => raise CTERM ("not a property", [ct])) + +end; diff --git a/src/main/SMT/smtlib.ML b/src/main/SMT/smtlib.ML new file mode 100644 index 0000000..e20b0ba --- /dev/null +++ b/src/main/SMT/smtlib.ML @@ -0,0 +1,191 @@ +(* Title: HOL/Tools/SMT/smtlib.ML + Author: Sascha Boehme, TU Muenchen + +Parsing and generating SMT-LIB 2. +*) + +signature SMTLIB = +sig + exception PARSE of int * string + datatype tree = + Num of int | + Dec of int * int | + Str of string | + Sym of string | + Key of string | + S of tree list + val parse: string list -> tree + val pretty_tree: tree -> Pretty.T + val str_of: tree -> string +end; + +structure SMTLIB: SMTLIB = +struct + +(* data structures *) + +exception PARSE of int * string + +datatype tree = + Num of int | + Dec of int * int | + Str of string | + Sym of string | + Key of string | + S of tree list + +datatype unfinished = None | String of string | Symbol of string + + +(* utilities *) + +fun read_raw pred l cs = + (case take_prefix pred cs of + ([], []) => raise PARSE (l, "empty token") + | ([], c :: _) => raise PARSE (l, "unexpected character " ^ quote c) + | x => x) + + +(* numerals and decimals *) + +fun int_of cs = fst (read_int cs) + +fun read_num l cs = + (case read_raw Symbol.is_ascii_digit l cs of + (cs1, "." :: cs') => + let val (cs2, cs'') = read_raw Symbol.is_ascii_digit l cs' + in (Dec (int_of cs1, int_of cs2), cs'') end + | (cs1, cs2) => (Num (int_of cs1), cs2)) + + +(* binary numbers *) + +fun is_bin c = (c = "0" orelse c = "1") + +fun read_bin l cs = read_raw is_bin l cs |>> Num o fst o read_radix_int 2 + + +(* hex numbers *) + +val is_hex = member (op =) (raw_explode "0123456789abcdefABCDEF") + +fun within c1 c2 c = (ord c1 <= ord c andalso ord c <= ord c2) + +fun unhex i [] = i + | unhex i (c :: cs) = + if within "0" "9" c then unhex (i * 16 + (ord c - ord "0")) cs + else if within "a" "f" c then unhex (i * 16 + (ord c - ord "a" + 10)) cs + else if within "A" "F" c then unhex (i * 16 + (ord c - ord "A" + 10)) cs + else raise Fail ("bad hex character " ^ quote c) + +fun read_hex l cs = read_raw is_hex l cs |>> Num o unhex 0 + + +(* symbols *) + +val symbol_chars = raw_explode "~!@$%^&*_+=<>.?/-" + +fun is_sym c = + Symbol.is_ascii_letter c orelse + Symbol.is_ascii_digit c orelse + member (op =) symbol_chars c + +fun read_sym f l cs = read_raw is_sym l cs |>> f o implode + + +(* quoted tokens *) + +fun read_quoted stop (escape, replacement) cs = + let + fun read _ [] = NONE + | read rs (cs as (c :: cs')) = + if is_prefix (op =) stop cs then + SOME (implode (rev rs), drop (length stop) cs) + else if not (null escape) andalso is_prefix (op =) escape cs then + read (replacement :: rs) (drop (length escape) cs) + else read (c :: rs) cs' + in read [] cs end + +fun read_string cs = read_quoted ["\\", "\""] (["\\", "\\"], "\\") cs +fun read_symbol cs = read_quoted ["|"] ([], "") cs + + +(* core parser *) + +fun read _ [] rest tss = (rest, tss) + | read l ("(" :: cs) None tss = read l cs None ([] :: tss) + | read l (")" :: cs) None (ts1 :: ts2 :: tss) = + read l cs None ((S (rev ts1) :: ts2) :: tss) + | read l ("#" :: "x" :: cs) None (ts :: tss) = + token read_hex l cs ts tss + | read l ("#" :: "b" :: cs) None (ts :: tss) = + token read_bin l cs ts tss + | read l (":" :: cs) None (ts :: tss) = + token (read_sym Key) l cs ts tss + | read l ("\"" :: cs) None (ts :: tss) = + quoted read_string String Str l "" cs ts tss + | read l ("|" :: cs) None (ts :: tss) = + quoted read_symbol Symbol Sym l "" cs ts tss + | read l ((c as "!") :: cs) None (ts :: tss) = + token (fn _ => pair (Sym c)) l cs ts tss + | read l (c :: cs) None (ts :: tss) = + if Symbol.is_ascii_blank c then read l cs None (ts :: tss) + else if Symbol.is_digit c then token read_num l (c :: cs) ts tss + else token (read_sym Sym) l (c :: cs) ts tss + | read l cs (String s) (ts :: tss) = + quoted read_string String Str l s cs ts tss + | read l cs (Symbol s) (ts :: tss) = + quoted read_symbol Symbol Sym l s cs ts tss + | read l _ _ [] = raise PARSE (l, "bad parser state") + +and token f l cs ts tss = + let val (t, cs') = f l cs + in read l cs' None ((t :: ts) :: tss) end + +and quoted r f g l s cs ts tss = + (case r cs of + NONE => (f (s ^ implode cs), ts :: tss) + | SOME (s', cs') => read l cs' None ((g (s ^ s') :: ts) :: tss)) + + + +(* overall parser *) + +fun read_line l line = read l (raw_explode line) + +fun add_line line (l, (None, tss)) = + if size line = 0 orelse nth_string line 0 = ";" then (l + 1, (None, tss)) + else (l + 1, read_line l line None tss) + | add_line line (l, (unfinished, tss)) = + (l + 1, read_line l line unfinished tss) + +fun finish (_, (None, [[t]])) = t + | finish (l, _) = raise PARSE (l, "bad nesting") + +fun parse lines = finish (fold add_line lines (1, (None, [[]]))) + + +(* pretty printer *) + +fun pretty_tree (Num i) = Pretty.str (string_of_int i) + | pretty_tree (Dec (i, j)) = + Pretty.str (string_of_int i ^ "." ^ string_of_int j) + | pretty_tree (Str s) = + raw_explode s + |> maps (fn "\"" => ["\\", "\""] | "\\" => ["\\", "\\"] | c => [c]) + |> implode + |> enclose "\"" "\"" + |> Pretty.str + | pretty_tree (Sym s) = + if String.isPrefix "(" s (* for bit vector functions *) orelse + forall is_sym (raw_explode s) then + Pretty.str s + else + Pretty.str ("|" ^ s ^ "|") + | pretty_tree (Key s) = Pretty.str (":" ^ s) + | pretty_tree (S trees) = + Pretty.enclose "(" ")" (Pretty.separate "" (map pretty_tree trees)) + +val str_of = Pretty.unformatted_string_of o pretty_tree + +end; diff --git a/src/main/SMT/smtlib_interface.ML b/src/main/SMT/smtlib_interface.ML new file mode 100644 index 0000000..37ffb50 --- /dev/null +++ b/src/main/SMT/smtlib_interface.ML @@ -0,0 +1,171 @@ +(* Title: HOL/Tools/SMT/smtlib_interface.ML + Author: Sascha Boehme, TU Muenchen + Author: Jasmin Blanchette, TU Muenchen + +Interface to SMT solvers based on the SMT-LIB 2 format. +*) + +signature SMTLIB_INTERFACE = +sig + val smtlibC: SMT_Util.class + val add_logic: int * (term list -> string option) -> Context.generic -> Context.generic + val translate_config: Proof.context -> SMT_Translate.config + val assert_name_of_index: int -> string + val assert_index_of_name: string -> int + val assert_prefix : string +end; + +structure SMTLIB_Interface: SMTLIB_INTERFACE = +struct + +val smtlibC = ["smtlib"] + + +(* builtins *) + +local + fun int_num _ i = SOME (string_of_int i) + + fun is_linear [t] = SMT_Util.is_number t + | is_linear [t, u] = SMT_Util.is_number t orelse SMT_Util.is_number u + | is_linear _ = false + + fun times _ _ ts = + let val mk = Term.list_comb o pair @{const times (int)} + in if is_linear ts then SOME ("*", 2, ts, mk) else NONE end +in + +val setup_builtins = + fold (SMT_Builtin.add_builtin_typ smtlibC) [ + (@{typ bool}, K (SOME "Bool"), K (K NONE)), + (@{typ int}, K (SOME "Int"), int_num)] #> + fold (SMT_Builtin.add_builtin_fun' smtlibC) [ + (@{const True}, "true"), + (@{const False}, "false"), + (@{const Not}, "not"), + (@{const HOL.conj}, "and"), + (@{const HOL.disj}, "or"), + (@{const HOL.implies}, "=>"), + (@{const HOL.eq ('a)}, "="), + (@{const If ('a)}, "ite"), + (@{const less (int)}, "<"), + (@{const less_eq (int)}, "<="), + (@{const uminus (int)}, "-"), + (@{const plus (int)}, "+"), + (@{const minus (int)}, "-")] #> + SMT_Builtin.add_builtin_fun smtlibC + (Term.dest_Const @{const times (int)}, times) + +end + + +(* serialization *) + +(** logic **) + +fun fst_int_ord ((i1, _), (i2, _)) = int_ord (i1, i2) + +structure Logics = Generic_Data +( + type T = (int * (term list -> string option)) list + val empty = [] + val extend = I + fun merge data = Ord_List.merge fst_int_ord data +) + +fun add_logic pf = Logics.map (Ord_List.insert fst_int_ord pf) + +fun choose_logic ctxt ts = + let + fun choose [] = "AUFLIA" + | choose ((_, f) :: fs) = (case f ts of SOME s => s | NONE => choose fs) + in + (case choose (Logics.get (Context.Proof ctxt)) of + "" => "" (* for default Z3 logic, a subset of everything *) + | logic => "(set-logic " ^ logic ^ ")\n") + end + + +(** serialization **) + +fun var i = "?v" ^ string_of_int i + +fun tree_of_sterm l (SMT_Translate.SVar i) = SMTLIB.Sym (var (l - i - 1)) + | tree_of_sterm _ (SMT_Translate.SApp (n, [])) = SMTLIB.Sym n + | tree_of_sterm l (SMT_Translate.SApp (n, ts)) = + SMTLIB.S (SMTLIB.Sym n :: map (tree_of_sterm l) ts) + | tree_of_sterm _ (SMT_Translate.SLet _) = + raise Fail "SMT-LIB: unsupported let expression" + | tree_of_sterm l (SMT_Translate.SQua (q, ss, pats, t)) = + let + val l' = l + length ss + + fun quant_name SMT_Translate.SForall = "forall" + | quant_name SMT_Translate.SExists = "exists" + + fun gen_trees_of_pat keyword ps = + [SMTLIB.Key keyword, SMTLIB.S (map (tree_of_sterm l') ps)] + fun trees_of_pat (SMT_Translate.SPat ps) = gen_trees_of_pat "pattern" ps + | trees_of_pat (SMT_Translate.SNoPat ps) = gen_trees_of_pat "no-pattern" ps + fun tree_of_pats [] t = t + | tree_of_pats pats t = SMTLIB.S (SMTLIB.Sym "!" :: t :: maps trees_of_pat pats) + + val vs = map_index (fn (i, ty) => + SMTLIB.S [SMTLIB.Sym (var (l + i)), SMTLIB.Sym ty]) ss + + val body = t + |> tree_of_sterm l' + |> tree_of_pats pats + in + SMTLIB.S [SMTLIB.Sym (quant_name q), SMTLIB.S vs, body] + end + + +fun sctrarg (sel, typ) = "(" ^ sel ^ " " ^ typ ^ ")" +fun sctr (name, args) = enclose "(" ")" (space_implode " " (name :: map sctrarg args)) +fun sdatatype (name, ctrs) = enclose "(" ")" (space_implode " " (name :: map sctr ctrs)) + +fun string_of_fun (f, (ss, s)) = f ^ " (" ^ space_implode " " ss ^ ") " ^ s + +fun named_sterm s t = SMTLIB.S [SMTLIB.Sym "!", t, SMTLIB.Key "named", SMTLIB.Sym s] + +val assert_prefix = "a" + +fun assert_name_of_index i = assert_prefix ^ string_of_int i +fun assert_index_of_name s = the_default ~1 (Int.fromString (unprefix assert_prefix s)) + +fun sdtyp (fp, dtyps) = + Buffer.add (enclose ("(declare-" ^ BNF_FP_Util.co_prefix fp ^ "datatypes () (") "))\n" + (space_implode "\n " (map sdatatype dtyps))) + +fun serialize smt_options comments {logic, sorts, dtyps, funcs} ts = + let + val unsat_core = member (op =) smt_options (":produce-unsat-cores", "true") + in + Buffer.empty + |> fold (Buffer.add o enclose "; " "\n") comments + |> fold (fn (k, v) => Buffer.add ("(set-option " ^ k ^ " " ^ v ^ ")\n")) smt_options + |> Buffer.add logic + |> fold (Buffer.add o enclose "(declare-sort " " 0)\n") (sort fast_string_ord sorts) + |> fold sdtyp (AList.coalesce (op =) dtyps) + |> fold (Buffer.add o enclose "(declare-fun " ")\n" o string_of_fun) + (sort (fast_string_ord o apply2 fst) funcs) + |> fold (fn (i, t) => Buffer.add (enclose "(assert " ")\n" + (SMTLIB.str_of (named_sterm (assert_name_of_index i) (tree_of_sterm 0 t))))) (map_index I ts) + |> Buffer.add "(check-sat)\n" + |> Buffer.add (if unsat_core then "(get-unsat-core)\n" else "(get-proof)\n") + |> Buffer.content + end + +(* interface *) + +fun translate_config ctxt = { + logic = choose_logic ctxt, + fp_kinds = [], + serialize = serialize} + +val _ = Theory.setup (Context.theory_map + (setup_builtins #> + SMT_Translate.add_config (smtlibC, translate_config))) + +end; diff --git a/src/main/SMT/smtlib_isar.ML b/src/main/SMT/smtlib_isar.ML new file mode 100644 index 0000000..2f7ae04 --- /dev/null +++ b/src/main/SMT/smtlib_isar.ML @@ -0,0 +1,75 @@ +(* Title: HOL/Tools/SMT/smtlib_isar.ML + Author: Jasmin Blanchette, TU Muenchen + Author: Mathias Fleury, ENS Rennes + +General tools for Isar proof reconstruction. +*) + +signature SMTLIB_ISAR = +sig + val unlift_term: term list -> term -> term + val postprocess_step_conclusion: Proof.context -> thm list -> term list -> term -> term + val normalizing_prems : Proof.context -> term -> (string * string list) list + val distinguish_conjecture_and_hypothesis : ''a list -> ''b -> ''b -> ''b list -> + (''a * term) list -> term list -> term -> (ATP_Problem.atp_formula_role * term) option + val unskolemize_names: Proof.context -> term -> term +end; + +structure SMTLIB_Isar: SMTLIB_ISAR = +struct + +open ATP_Util +open ATP_Problem +open ATP_Proof_Reconstruct + +fun unlift_term ll_defs = + let + val lifted = map (ATP_Util.extract_lambda_def dest_Free o ATP_Util.hol_open_form I) ll_defs + + fun un_free (t as Free (s, _)) = + (case AList.lookup (op =) lifted s of + SOME t => un_term t + | NONE => t) + | un_free t = t + and un_term t = map_aterms un_free t + in un_term end + +(* Remove the "__" suffix for newly introduced variables (Skolems). It is not clear why "__" is + generated also for abstraction variables, but this is repaired here. *) +fun unskolemize_names ctxt = + Term.map_abs_vars (perhaps (try Name.dest_skolem)) + #> Term.map_aterms (perhaps (try (fn Free (s, T) => + Free (s |> not (Variable.is_fixed ctxt s) ? Name.dest_skolem, T)))) + +fun postprocess_step_conclusion ctxt rewrite_rules ll_defs = + let val thy = Proof_Context.theory_of ctxt in + Raw_Simplifier.rewrite_term thy rewrite_rules [] + #> Object_Logic.atomize_term ctxt + #> not (null ll_defs) ? unlift_term ll_defs + #> simplify_bool + #> unskolemize_names ctxt + #> HOLogic.mk_Trueprop + end + +fun normalizing_prems ctxt concl0 = + SMT_Normalize.case_bool_entry :: SMT_Normalize.special_quant_table @ + SMT_Normalize.abs_min_max_table + |> map_filter (fn (c, th) => + if exists_Const (curry (op =) c o fst) concl0 then + let val s = short_thm_name ctxt th in SOME (s, [s]) end + else + NONE) + +fun distinguish_conjecture_and_hypothesis ss id conjecture_id prem_ids fact_helper_ts hyp_ts + concl_t = + (case ss of + [s] => SOME (Axiom, the (AList.lookup (op =) fact_helper_ts s)) + | _ => + if id = conjecture_id then + SOME (Conjecture, concl_t) + else + (case find_index (curry (op =) id) prem_ids of + ~1 => NONE (* lambda-lifting definition *) + | i => SOME (Hypothesis, close_form (nth hyp_ts i)))) + +end; diff --git a/src/main/SMT/smtlib_proof.ML b/src/main/SMT/smtlib_proof.ML new file mode 100644 index 0000000..909b7a5 --- /dev/null +++ b/src/main/SMT/smtlib_proof.ML @@ -0,0 +1,298 @@ +(* Title: HOL/Tools/SMT/smtlib_proof.ML + Author: Sascha Boehme, TU Muenchen + Author: Mathias Fleury, ENS Rennes + Author: Jasmin Blanchette, TU Muenchen + +SMT-LIB-2-style proofs: parsing and abstract syntax tree. +*) + +signature SMTLIB_PROOF = +sig + datatype 'b shared = Tree of SMTLIB.tree | Term of term | Proof of 'b | None + type ('a, 'b) context + + val mk_context: Proof.context -> int -> 'b shared Symtab.table -> typ Symtab.table -> + term Symtab.table -> 'a -> ('a, 'b) context + val empty_context: Proof.context -> typ Symtab.table -> term Symtab.table -> ('a list, 'b) context + val ctxt_of: ('a, 'b) context -> Proof.context + val lookup_binding: ('a, 'b) context -> string -> 'b shared + val update_binding: string * 'b shared -> ('a, 'b) context -> ('a, 'b) context + val with_bindings: (string * 'b shared) list -> (('a, 'b) context -> 'c * ('d, 'b) context) -> + ('a, 'b) context -> 'c * ('d, 'b) context + val next_id: ('a, 'b) context -> int * ('a, 'b) context + val with_fresh_names: (('a list, 'b) context -> + term * ((string * (string * typ)) list, 'b) context) -> ('c, 'b) context -> (term * string list) + + (*type and term parsers*) + type type_parser = SMTLIB.tree * typ list -> typ option + type term_parser = SMTLIB.tree * term list -> term option + val add_type_parser: type_parser -> Context.generic -> Context.generic + val add_term_parser: term_parser -> Context.generic -> Context.generic + + exception SMTLIB_PARSE of string * SMTLIB.tree + + val declare_fun: string -> typ -> ((string * typ) list, 'a) context -> + ((string * typ) list, 'a) context + val dest_binding: SMTLIB.tree -> string * 'a shared + val type_of: ('a, 'b) context -> SMTLIB.tree -> typ + val term_of: SMTLIB.tree -> ((string * (string * typ)) list, 'a) context -> + term * ((string * (string * typ)) list, 'a) context +end; + +structure SMTLIB_Proof: SMTLIB_PROOF = +struct + +(* proof parser context *) + +datatype 'b shared = Tree of SMTLIB.tree | Term of term | Proof of 'b | None + +type ('a, 'b) context = { + ctxt: Proof.context, + id: int, + syms: 'b shared Symtab.table, + typs: typ Symtab.table, + funs: term Symtab.table, + extra: 'a} + +fun mk_context ctxt id syms typs funs extra: ('a, 'b) context = + {ctxt = ctxt, id = id, syms = syms, typs = typs, funs = funs, extra = extra} + +fun empty_context ctxt typs funs = mk_context ctxt 1 Symtab.empty typs funs [] + +fun ctxt_of ({ctxt, ...}: ('a, 'b) context) = ctxt + +fun lookup_binding ({syms, ...}: ('a, 'b) context) = + the_default None o Symtab.lookup syms + +fun map_syms f ({ctxt, id, syms, typs, funs, extra}: ('a, 'b) context) = + mk_context ctxt id (f syms) typs funs extra + +fun update_binding b = map_syms (Symtab.update b) + +fun with_bindings bs f cx = + let val bs' = map (lookup_binding cx o fst) bs + in + cx + |> fold update_binding bs + |> f + ||> fold2 (fn (name, _) => update_binding o pair name) bs bs' + end + +fun next_id ({ctxt, id, syms, typs, funs, extra}: ('a, 'b) context) = + (id, mk_context ctxt (id + 1) syms typs funs extra) + +fun with_fresh_names f ({ctxt, id, syms, typs, funs, ...}: ('a, 'b) context) = + let + fun bind (_, v as (_, T)) t = Logic.all_const T $ Term.absfree v t + + val needs_inferT = equal Term.dummyT orf Term.is_TVar + val needs_infer = Term.exists_type (Term.exists_subtype needs_inferT) + fun infer_types ctxt = + singleton (Type_Infer_Context.infer_types ctxt) #> + singleton (Proof_Context.standard_term_check_finish ctxt) + fun infer ctxt t = if needs_infer t then infer_types ctxt t else t + + val (t, {ctxt = ctxt', extra = names, ...}: ((string * (string * typ)) list, 'b) context) = + f (mk_context ctxt id syms typs funs []) + val t' = infer ctxt' (fold_rev bind names (HOLogic.mk_Trueprop t)) + in + (t', map fst names) + end + +fun lookup_typ ({typs, ...}: ('a, 'b) context) = Symtab.lookup typs +fun lookup_fun ({funs, ...}: ('a, 'b) context) = Symtab.lookup funs + + +(* core type and term parser *) + +fun core_type_parser (SMTLIB.Sym "Bool", []) = SOME @{typ HOL.bool} + | core_type_parser (SMTLIB.Sym "Int", []) = SOME @{typ Int.int} + | core_type_parser _ = NONE + +fun mk_unary n t = + let val T = fastype_of t + in Const (n, T --> T) $ t end + +fun mk_binary' n T U t1 t2 = Const (n, [T, T] ---> U) $ t1 $ t2 + +fun mk_binary n t1 t2 = + let val T = fastype_of t1 + in mk_binary' n T T t1 t2 end + +fun mk_rassoc f t ts = + let val us = rev (t :: ts) + in fold f (tl us) (hd us) end + +fun mk_lassoc f t ts = fold (fn u1 => fn u2 => f u2 u1) ts t + +fun mk_lassoc' n = mk_lassoc (mk_binary n) + +fun mk_binary_pred n S t1 t2 = + let + val T1 = fastype_of t1 + val T2 = fastype_of t2 + val T = + if T1 <> Term.dummyT then T1 + else if T2 <> Term.dummyT then T2 + else TVar (("?a", serial ()), S) + in mk_binary' n T @{typ HOL.bool} t1 t2 end + +fun mk_less t1 t2 = mk_binary_pred @{const_name ord_class.less} @{sort linorder} t1 t2 +fun mk_less_eq t1 t2 = mk_binary_pred @{const_name ord_class.less_eq} @{sort linorder} t1 t2 + +fun core_term_parser (SMTLIB.Sym "true", _) = SOME @{const HOL.True} + | core_term_parser (SMTLIB.Sym "false", _) = SOME @{const HOL.False} + | core_term_parser (SMTLIB.Sym "not", [t]) = SOME (HOLogic.mk_not t) + | core_term_parser (SMTLIB.Sym "and", t :: ts) = SOME (mk_rassoc (curry HOLogic.mk_conj) t ts) + | core_term_parser (SMTLIB.Sym "or", t :: ts) = SOME (mk_rassoc (curry HOLogic.mk_disj) t ts) + | core_term_parser (SMTLIB.Sym "=>", [t1, t2]) = SOME (HOLogic.mk_imp (t1, t2)) + | core_term_parser (SMTLIB.Sym "implies", [t1, t2]) = SOME (HOLogic.mk_imp (t1, t2)) + | core_term_parser (SMTLIB.Sym "=", [t1, t2]) = SOME (HOLogic.mk_eq (t1, t2)) + | core_term_parser (SMTLIB.Sym "~", [t1, t2]) = SOME (HOLogic.mk_eq (t1, t2)) + | core_term_parser (SMTLIB.Sym "ite", [t1, t2, t3]) = + let + val T = fastype_of t2 + val c = Const (@{const_name HOL.If}, [@{typ HOL.bool}, T, T] ---> T) + in SOME (c $ t1 $ t2 $ t3) end + | core_term_parser (SMTLIB.Num i, []) = SOME (HOLogic.mk_number @{typ Int.int} i) + | core_term_parser (SMTLIB.Sym "-", [t]) = SOME (mk_unary @{const_name uminus_class.uminus} t) + | core_term_parser (SMTLIB.Sym "~", [t]) = SOME (mk_unary @{const_name uminus_class.uminus} t) + | core_term_parser (SMTLIB.Sym "+", t :: ts) = + SOME (mk_lassoc' @{const_name plus_class.plus} t ts) + | core_term_parser (SMTLIB.Sym "-", t :: ts) = + SOME (mk_lassoc' @{const_name minus_class.minus} t ts) + | core_term_parser (SMTLIB.Sym "*", t :: ts) = + SOME (mk_lassoc' @{const_name times_class.times} t ts) + | core_term_parser (SMTLIB.Sym "div", [t1, t2]) = SOME (mk_binary @{const_name z3div} t1 t2) + | core_term_parser (SMTLIB.Sym "mod", [t1, t2]) = SOME (mk_binary @{const_name z3mod} t1 t2) + | core_term_parser (SMTLIB.Sym "<", [t1, t2]) = SOME (mk_less t1 t2) + | core_term_parser (SMTLIB.Sym ">", [t1, t2]) = SOME (mk_less t2 t1) + | core_term_parser (SMTLIB.Sym "<=", [t1, t2]) = SOME (mk_less_eq t1 t2) + | core_term_parser (SMTLIB.Sym ">=", [t1, t2]) = SOME (mk_less_eq t2 t1) + | core_term_parser _ = NONE + + +(* custom type and term parsers *) + +type type_parser = SMTLIB.tree * typ list -> typ option + +type term_parser = SMTLIB.tree * term list -> term option + +fun id_ord ((id1, _), (id2, _)) = int_ord (id1, id2) + +structure Parsers = Generic_Data +( + type T = (int * type_parser) list * (int * term_parser) list + val empty : T = ([(serial (), core_type_parser)], [(serial (), core_term_parser)]) + val extend = I + fun merge ((tys1, ts1), (tys2, ts2)) = + (Ord_List.merge id_ord (tys1, tys2), Ord_List.merge id_ord (ts1, ts2)) +) + +fun add_type_parser type_parser = + Parsers.map (apfst (Ord_List.insert id_ord (serial (), type_parser))) + +fun add_term_parser term_parser = + Parsers.map (apsnd (Ord_List.insert id_ord (serial (), term_parser))) + +fun get_type_parsers ctxt = map snd (fst (Parsers.get (Context.Proof ctxt))) +fun get_term_parsers ctxt = map snd (snd (Parsers.get (Context.Proof ctxt))) + +fun apply_parsers parsers x = + let + fun apply [] = NONE + | apply (parser :: parsers) = + (case parser x of + SOME y => SOME y + | NONE => apply parsers) + in apply parsers end + + +(* type and term parsing *) + +exception SMTLIB_PARSE of string * SMTLIB.tree + +val desymbolize = Name.desymbolize (SOME false) o perhaps (try (unprefix "?")) + +fun fresh_fun add name n T ({ctxt, id, syms, typs, funs, extra}: ('a, 'b) context) = + let + val (n', ctxt') = yield_singleton Variable.variant_fixes n ctxt + val t = Free (n', T) + val funs' = Symtab.update (name, t) funs + in (t, mk_context ctxt' id syms typs funs' (add (n', T) extra)) end + +fun declare_fun name = snd oo fresh_fun cons name (desymbolize name) +fun declare_free name = fresh_fun (cons o pair name) name (desymbolize name) + +fun parse_type cx ty Ts = + (case apply_parsers (get_type_parsers (ctxt_of cx)) (ty, Ts) of + SOME T => T + | NONE => + (case ty of + SMTLIB.Sym name => + (case lookup_typ cx name of + SOME T => T + | NONE => raise SMTLIB_PARSE ("unknown SMT type", ty)) + | _ => raise SMTLIB_PARSE ("bad SMT type format", ty))) + +fun parse_term t ts cx = + (case apply_parsers (get_term_parsers (ctxt_of cx)) (t, ts) of + SOME u => (u, cx) + | NONE => + (case t of + SMTLIB.Sym name => + (case lookup_fun cx name of + SOME u => (Term.list_comb (u, ts), cx) + | NONE => + if null ts then declare_free name Term.dummyT cx + else raise SMTLIB_PARSE ("bad SMT term", t)) + | _ => raise SMTLIB_PARSE ("bad SMT term format", t))) + +fun type_of cx ty = + (case try (parse_type cx ty) [] of + SOME T => T + | NONE => + (case ty of + SMTLIB.S (ty' :: tys) => parse_type cx ty' (map (type_of cx) tys) + | _ => raise SMTLIB_PARSE ("bad SMT type", ty))) + +fun dest_var cx (SMTLIB.S [SMTLIB.Sym name, ty]) = (name, (desymbolize name, type_of cx ty)) + | dest_var _ v = raise SMTLIB_PARSE ("bad SMT quantifier variable format", v) + +fun dest_body (SMTLIB.S (SMTLIB.Sym "!" :: body :: _)) = dest_body body + | dest_body body = body + +fun dest_binding (SMTLIB.S [SMTLIB.Sym name, t]) = (name, Tree t) + | dest_binding b = raise SMTLIB_PARSE ("bad SMT let binding format", b) + +fun term_of t cx = + (case t of + SMTLIB.S [SMTLIB.Sym "forall", SMTLIB.S vars, body] => quant HOLogic.mk_all vars body cx + | SMTLIB.S [SMTLIB.Sym "exists", SMTLIB.S vars, body] => quant HOLogic.mk_exists vars body cx + | SMTLIB.S [SMTLIB.Sym "let", SMTLIB.S bindings, body] => + with_bindings (map dest_binding bindings) (term_of body) cx + | SMTLIB.S (SMTLIB.Sym "!" :: t :: _) => term_of t cx + | SMTLIB.S (f :: args) => + cx + |> fold_map term_of args + |-> parse_term f + | SMTLIB.Sym name => + (case lookup_binding cx name of + Tree u => + cx + |> term_of u + |-> (fn u' => pair u' o update_binding (name, Term u')) + | Term u => (u, cx) + | None => parse_term t [] cx + | _ => raise SMTLIB_PARSE ("bad SMT term format", t)) + | _ => parse_term t [] cx) + +and quant q vars body cx = + let val vs = map (dest_var cx) vars + in + cx + |> with_bindings (map (apsnd (Term o Free)) vs) (term_of (dest_body body)) + |>> fold_rev (fn (_, (n, T)) => fn t => q (n, T, t)) vs + end + +end; diff --git a/src/main/SMT/verit_isar.ML b/src/main/SMT/verit_isar.ML new file mode 100644 index 0000000..28ee6d9 --- /dev/null +++ b/src/main/SMT/verit_isar.ML @@ -0,0 +1,60 @@ +(* Title: HOL/Tools/SMT/verit_isar.ML + Author: Mathias Fleury, TU Muenchen + Author: Jasmin Blanchette, TU Muenchen + +VeriT proofs as generic ATP proofs for Isar proof reconstruction. +*) + +signature VERIT_ISAR = +sig + type ('a, 'b) atp_step = ('a, 'b) ATP_Proof.atp_step + val atp_proof_of_veriT_proof: Proof.context -> term list -> thm list -> term list -> term -> + (string * term) list -> int list -> int -> (int * string) list -> VeriT_Proof.veriT_step list -> + (term, string) ATP_Proof.atp_step list +end; + +structure VeriT_Isar: VERIT_ISAR = +struct + +open ATP_Util +open ATP_Problem +open ATP_Proof +open ATP_Proof_Reconstruct +open SMTLIB_Interface +open SMTLIB_Isar +open VeriT_Proof + +fun atp_proof_of_veriT_proof ctxt ll_defs rewrite_rules hyp_ts concl_t fact_helper_ts prem_ids + conjecture_id fact_helper_ids = + let + fun steps_of (VeriT_Proof.VeriT_Step {id, rule, prems, concl, ...}) = + let + val concl' = postprocess_step_conclusion ctxt rewrite_rules ll_defs concl + fun standard_step role = ((id, []), role, concl', rule, map (rpair []) prems) + in + if rule = veriT_input_rule then + let + val id_num = the (Int.fromString (unprefix assert_prefix id)) + val ss = the_list (AList.lookup (op =) fact_helper_ids id_num) + in + (case distinguish_conjecture_and_hypothesis ss id_num conjecture_id prem_ids + fact_helper_ts hyp_ts concl_t of + NONE => [] + | SOME (role0, concl00) => + let + val name0 = (id ^ "a", ss) + val concl0 = unskolemize_names ctxt concl00 + in + [(name0, role0, concl0, rule, []), + ((id, []), Plain, concl', veriT_rewrite_rule, + name0 :: normalizing_prems ctxt concl0)] + end) + end + else + [standard_step (if null prems then Lemma else Plain)] + end + in + maps steps_of + end + +end; diff --git a/src/main/SMT/verit_proof.ML b/src/main/SMT/verit_proof.ML new file mode 100644 index 0000000..1dab112 --- /dev/null +++ b/src/main/SMT/verit_proof.ML @@ -0,0 +1,324 @@ +(* Title: HOL/Tools/SMT/verit_proof.ML + Author: Mathias Fleury, ENS Rennes + Author: Sascha Boehme, TU Muenchen + +VeriT proofs: parsing and abstract syntax tree. +*) + +signature VERIT_PROOF = +sig + (*proofs*) + datatype veriT_step = VeriT_Step of { + id: string, + rule: string, + prems: string list, + concl: term, + fixes: string list} + + (*proof parser*) + val parse: typ Symtab.table -> term Symtab.table -> string list -> + Proof.context -> veriT_step list * Proof.context + + val veriT_step_prefix : string + val veriT_input_rule: string + val veriT_la_generic_rule : string + val veriT_rewrite_rule : string + val veriT_simp_arith_rule : string + val veriT_tmp_ite_elim_rule : string + val veriT_tmp_skolemize_rule : string +end; + +structure VeriT_Proof: VERIT_PROOF = +struct + +open SMTLIB_Proof + +datatype veriT_node = VeriT_Node of { + id: string, + rule: string, + prems: string list, + concl: term, + bounds: string list} + +fun mk_node id rule prems concl bounds = + VeriT_Node {id = id, rule = rule, prems = prems, concl = concl, bounds = bounds} + +datatype veriT_step = VeriT_Step of { + id: string, + rule: string, + prems: string list, + concl: term, + fixes: string list} + +fun mk_step id rule prems concl fixes = + VeriT_Step {id = id, rule = rule, prems = prems, concl = concl, fixes = fixes} + +val veriT_step_prefix = ".c" +val veriT_input_rule = "input" +val veriT_la_generic_rule = "la_generic" +val veriT_rewrite_rule = "__rewrite" (* arbitrary *) +val veriT_simp_arith_rule = "simp_arith" +val veriT_tmp_alphaconv_rule = "tmp_alphaconv" +val veriT_tmp_ite_elim_rule = "tmp_ite_elim" +val veriT_tmp_skolemize_rule = "tmp_skolemize" + +(* proof parser *) + +fun node_of p cx = + ([], cx) + ||>> `(with_fresh_names (term_of p)) + |>> snd + +(*in order to get Z3-style quantification*) +fun repair_quantification (SMTLIB.S (SMTLIB.Sym "forall" :: l)) = + let val (quantified_vars, t) = split_last (map repair_quantification l) + in + SMTLIB.S (SMTLIB.Sym "forall" :: SMTLIB.S quantified_vars :: t :: []) + end + | repair_quantification (SMTLIB.S (SMTLIB.Sym "exists" :: l)) = + let val (quantified_vars, t) = split_last (map repair_quantification l) + in + SMTLIB.S (SMTLIB.Sym "exists" :: SMTLIB.S quantified_vars :: t :: []) + end + | repair_quantification (SMTLIB.S l) = SMTLIB.S (map repair_quantification l) + | repair_quantification x = x + +fun replace_bound_var_by_free_var (q $ Abs (var, ty, u)) free_var = + (case List.find (fn v => String.isPrefix v var) free_var of + NONE => q $ Abs (var, ty, replace_bound_var_by_free_var u free_var) + | SOME _ => replace_bound_var_by_free_var (Term.subst_bound (Free (var, ty), u)) free_var) + | replace_bound_var_by_free_var (u $ v) free_vars = replace_bound_var_by_free_var u free_vars $ + replace_bound_var_by_free_var v free_vars + | replace_bound_var_by_free_var u _ = u + +fun find_type_in_formula (Abs (v, T, u)) var_name = + if String.isPrefix var_name v then SOME T else find_type_in_formula u var_name + | find_type_in_formula (u $ v) var_name = + (case find_type_in_formula u var_name of + NONE => find_type_in_formula v var_name + | some_T => some_T) + | find_type_in_formula _ _ = NONE + +fun add_bound_variables_to_ctxt concl = + fold (update_binding o + (fn s => (s, Term (Free (s, the_default dummyT (find_type_in_formula concl s)))))) + +fun update_step_and_cx (node as VeriT_Node {id, rule, prems, concl, bounds}) cx = + if rule = veriT_tmp_ite_elim_rule then + (mk_node id rule prems concl bounds, add_bound_variables_to_ctxt concl bounds cx) + else if rule = veriT_tmp_skolemize_rule then + let val concl' = replace_bound_var_by_free_var concl bounds in + (mk_node id rule prems concl' [], add_bound_variables_to_ctxt concl bounds cx) + end + else + (node, cx) + +fun fix_subproof_steps ((((id_of_father_step, rule), prems), subproof), ((step_concl, bounds), + cx)) = + let + fun mk_prop_of_term concl = + concl |> fastype_of concl = @{typ bool} ? curry (op $) @{term Trueprop} + fun update_prems assumption_id prems = + map (fn prem => id_of_father_step ^ prem) + (filter_out (curry (op =) assumption_id) prems) + fun inline_assumption assumption assumption_id + (VeriT_Node {id, rule, prems, concl, bounds}) = + mk_node id rule (update_prems assumption_id prems) + (@{const Pure.imp} $ mk_prop_of_term assumption $ mk_prop_of_term concl) bounds + fun find_input_steps_and_inline [] last_step = ([], last_step) + | find_input_steps_and_inline (VeriT_Node {id = id', rule, prems, concl, bounds} :: steps) + last_step = + if rule = veriT_input_rule then + find_input_steps_and_inline (map (inline_assumption concl id') steps) last_step + else + apfst (cons (mk_node (id_of_father_step ^ id') rule prems concl bounds)) + (find_input_steps_and_inline steps (id_of_father_step ^ id')) + val (subproof', last_step_id) = find_input_steps_and_inline subproof "" + val prems' = + if last_step_id = "" then + prems + else + (case prems of + NONE => SOME [last_step_id] + | SOME l => SOME (last_step_id :: l)) + in + (subproof', (((((id_of_father_step, rule), prems'), step_concl), bounds), cx)) + end + +(* +(set id rule :clauses(...) :args(..) :conclusion (...)). +or +(set id subproof (set ...) :conclusion (...)). +*) + +fun parse_proof_step cx = + let + fun rotate_pair (a, (b, c)) = ((a, b), c) + fun get_id (SMTLIB.S [SMTLIB.Sym "set", SMTLIB.Sym id, SMTLIB.S l]) = (id, l) + | get_id t = raise Fail ("unrecognized VeriT proof " ^ @{make_string} t) + fun parse_rule (SMTLIB.Sym rule :: l) = (rule, l) + fun parse_source (SMTLIB.Key "clauses" :: SMTLIB.S source ::l) = + (SOME (map (fn (SMTLIB.Sym id) => id) source), l) + | parse_source l = (NONE, l) + fun parse_subproof cx id_of_father_step ((subproof_step as SMTLIB.S (SMTLIB.Sym "set" :: _)) :: l) = + let val (subproof_steps, cx') = parse_proof_step cx subproof_step in + apfst (apfst (curry (op @) subproof_steps)) (parse_subproof cx' id_of_father_step l) + end + | parse_subproof cx _ l = (([], cx), l) + fun skip_args (SMTLIB.Key "args" :: SMTLIB.S _ :: l) = l + | skip_args l = l + fun parse_conclusion (SMTLIB.Key "conclusion" :: SMTLIB.S concl :: []) = concl + fun make_or_from_clausification l = + foldl1 (fn ((concl1, bounds1), (concl2, bounds2)) => + (HOLogic.mk_disj (perhaps (try HOLogic.dest_Trueprop) concl1, + perhaps (try HOLogic.dest_Trueprop) concl2), bounds1 @ bounds2)) l + fun to_node (((((id, rule), prems), concl), bounds), cx) = + (mk_node id rule (the_default [] prems) concl bounds, cx) + in + get_id + ##> parse_rule + #> rotate_pair + ##> parse_source + #> rotate_pair + ##> skip_args + #> (fn (((id, rule), prems), sub) => (((id, rule), prems), parse_subproof cx id sub)) + #> rotate_pair + ##> parse_conclusion + ##> map repair_quantification + #> (fn ((((id, rule), prems), (subproof, cx)), terms) => + (((((id, rule), prems), subproof), fold_map (fn t => fn cx => node_of t cx) terms cx))) + ##> apfst (fn [] => (@{const False}, []) | concls => make_or_from_clausification concls) + #> fix_subproof_steps + ##> to_node + #> (fn (subproof, (step, cx)) => (subproof @ [step], cx)) + #-> fold_map update_step_and_cx + end + +(*subproofs are written on multiple lines: SMTLIB can not parse then, because parentheses are +unbalanced on each line*) +fun seperate_into_steps lines = + let + fun count ("(" :: l) n = count l (n + 1) + | count (")" :: l) n = count l (n - 1) + | count (_ :: l) n = count l n + | count [] n = n + fun seperate (line :: l) actual_lines m = + let val n = count (raw_explode line) 0 in + if m + n = 0 then + [actual_lines ^ line] :: seperate l "" 0 + else + seperate l (actual_lines ^ line) (m + n) + end + | seperate [] _ 0 = [] + in + seperate lines "" 0 + end + +(* VeriT adds "@" before every variable. *) +fun remove_all_at (SMTLIB.Sym v :: l) = + SMTLIB.Sym (perhaps (try (unprefix "@")) v) :: remove_all_at l + | remove_all_at (SMTLIB.S l :: l') = SMTLIB.S (remove_all_at l) :: remove_all_at l' + | remove_all_at (SMTLIB.Key v :: l) = SMTLIB.Key v :: remove_all_at l + | remove_all_at (v :: l) = v :: remove_all_at l + | remove_all_at [] = [] + +fun find_in_which_step_defined var (VeriT_Node {id, bounds, ...} :: l) = + (case List.find (fn v => String.isPrefix v var) bounds of + NONE => find_in_which_step_defined var l + | SOME _ => id) + | find_in_which_step_defined var _ = raise Fail ("undefined " ^ var) + +(*Yes every case is possible: the introduced var is not on a special size of the equality sign.*) +fun find_ite_var_in_term (Const (@{const_name If}, _) $ _ $ + (Const (@{const_name HOL.eq}, _) $ Free (var1, _) $ Free (var2, _) ) $ + (Const (@{const_name HOL.eq}, _) $ Free (var3, _) $ Free (var4, _) )) = + let + fun get_number_of_ite_transformed_var var = + perhaps (try (unprefix "ite")) var + |> Int.fromString + fun is_equal_and_has_correct_substring var var' var'' = + if var = var' andalso String.isPrefix "ite" var then SOME var' + else if var = var'' andalso String.isPrefix "ite" var then SOME var'' else NONE + val var1_introduced_var = is_equal_and_has_correct_substring var1 var3 var4 + val var2_introduced_var = is_equal_and_has_correct_substring var3 var1 var2 + in + (case (var1_introduced_var, var2_introduced_var) of + (SOME a, SOME b) => + (*ill-generated case, might be possible when applying the rule to max a a. Only if the + variable have been introduced before. Probably an impossible edge case*) + (case (get_number_of_ite_transformed_var a, get_number_of_ite_transformed_var b) of + (SOME a, SOME b) => if a < b then var2_introduced_var else var1_introduced_var + (*Otherwise, it is a name clase between a parameter name and the introduced variable. + Or the name convention has been changed.*) + | (NONE, SOME _) => var2_introduced_var + | (SOME _, NONE) => var2_introduced_var) + | (_, SOME _) => var2_introduced_var + | (SOME _, _) => var1_introduced_var) + end + | find_ite_var_in_term (Const (@{const_name If}, _) $ _ $ + (Const (@{const_name HOL.eq}, _) $ Free (var, _) $ _ ) $ + (Const (@{const_name HOL.eq}, _) $ Free (var', _) $ _ )) = + if var = var' then SOME var else NONE + | find_ite_var_in_term (Const (@{const_name If}, _) $ _ $ + (Const (@{const_name HOL.eq}, _) $ _ $ Free (var, _)) $ + (Const (@{const_name HOL.eq}, _) $ _ $ Free (var', _))) = + if var = var' then SOME var else NONE + | find_ite_var_in_term (p $ q) = + (case find_ite_var_in_term p of + NONE => find_ite_var_in_term q + | x => x) + | find_ite_var_in_term (Abs (_, _, body)) = find_ite_var_in_term body + | find_ite_var_in_term _ = NONE + +fun correct_veriT_step steps (node as VeriT_Node {id, rule, prems, concl, bounds}) = + if rule = veriT_tmp_ite_elim_rule then + if bounds = [] then + (*if the introduced var has already been defined, adding the definition as a dependency*) + let + val new_prems = prems + |> (case find_ite_var_in_term concl of + NONE => I + | SOME var => cons (find_in_which_step_defined var steps)) + in + VeriT_Node {id = id, rule = rule, prems = new_prems, concl = concl, bounds = bounds} + end + else + (*some new variables are created*) + let val concl' = replace_bound_var_by_free_var concl bounds in + mk_node id rule prems concl' [] + end + else + node + +fun remove_alpha_conversion _ [] = [] + | remove_alpha_conversion replace_table (VeriT_Node {id, rule, prems, concl, bounds} :: steps) = + let + val correct_dependency = map (perhaps (Symtab.lookup replace_table)) + val find_predecessor = perhaps (Symtab.lookup replace_table) + in + if rule = veriT_tmp_alphaconv_rule then + remove_alpha_conversion (Symtab.update (id, find_predecessor (hd prems)) + replace_table) steps + else + VeriT_Node {id = id, rule = rule, prems = correct_dependency prems, + concl = concl, bounds = bounds} :: remove_alpha_conversion replace_table steps + end + +fun correct_veriT_steps steps = + steps + |> map (correct_veriT_step steps) + |> remove_alpha_conversion Symtab.empty + +fun parse typs funs lines ctxt = + let + val smtlib_lines_without_at = remove_all_at (map SMTLIB.parse (seperate_into_steps lines)) + val (u, env) = apfst flat (fold_map (fn l => fn cx => parse_proof_step cx l) + smtlib_lines_without_at (empty_context ctxt typs funs)) + val t = correct_veriT_steps u + fun node_to_step (VeriT_Node {id, rule, prems, concl, bounds, ...}) = + mk_step id rule prems concl bounds + in + (map node_to_step t, ctxt_of env) + end + +end; diff --git a/src/main/SMT/verit_proof_parse.ML b/src/main/SMT/verit_proof_parse.ML new file mode 100644 index 0000000..cddc609 --- /dev/null +++ b/src/main/SMT/verit_proof_parse.ML @@ -0,0 +1,78 @@ +(* Title: HOL/Tools/SMT/verit_proof_parse.ML + Author: Mathias Fleury, TU Muenchen + Author: Jasmin Blanchette, TU Muenchen + +VeriT proof parsing. +*) + +signature VERIT_PROOF_PARSE = +sig + type ('a, 'b) atp_step = ('a, 'b) ATP_Proof.atp_step + val parse_proof: SMT_Translate.replay_data -> + ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> + SMT_Solver.parsed_proof +end; + +structure VeriT_Proof_Parse: VERIT_PROOF_PARSE = +struct + +open ATP_Util +open ATP_Problem +open ATP_Proof +open ATP_Proof_Reconstruct +open VeriT_Isar +open VeriT_Proof + +fun add_used_asserts_in_step (VeriT_Proof.VeriT_Step {prems, ...}) = + union (op =) (map_filter (try SMTLIB_Interface.assert_index_of_name) prems) + +fun parse_proof + ({context = ctxt, typs, terms, ll_defs, rewrite_rules, assms} : SMT_Translate.replay_data) + xfacts prems concl output = + let + val num_ll_defs = length ll_defs + + val id_of_index = Integer.add num_ll_defs + val index_of_id = Integer.add (~ num_ll_defs) + + fun step_of_assume j (_, th) = + VeriT_Proof.VeriT_Step {id = SMTLIB_Interface.assert_name_of_index (id_of_index j), + rule = veriT_input_rule, prems = [], concl = Thm.prop_of th, fixes = []} + + val (actual_steps, _) = VeriT_Proof.parse typs terms output ctxt + val used_assert_ids = fold add_used_asserts_in_step actual_steps [] + val used_assm_js = + map_filter (fn id => let val i = index_of_id id in if i >= 0 then SOME i else NONE end) + used_assert_ids + val used_assms = map (nth assms) used_assm_js + val assm_steps = map2 step_of_assume used_assm_js used_assms + val steps = assm_steps @ actual_steps + + val conjecture_i = 0 + val prems_i = conjecture_i + 1 + val num_prems = length prems + val facts_i = prems_i + num_prems + val num_facts = length xfacts + val helpers_i = facts_i + num_facts + + val conjecture_id = id_of_index conjecture_i + val prem_ids = map id_of_index (prems_i upto prems_i + num_prems - 1) + val fact_ids' = + map_filter (fn j => + let val (i, _) = nth assms j in + try (apsnd (nth xfacts)) (id_of_index j, i - facts_i) + end) used_assm_js + val helper_ids' = filter (fn (i, _) => i >= helpers_i) used_assms + + val fact_helper_ts = + map (fn (_, th) => (ATP_Util.short_thm_name ctxt th, Thm.prop_of th)) helper_ids' @ + map (fn (_, ((s, _), th)) => (s, Thm.prop_of th)) fact_ids' + val fact_helper_ids' = + map (apsnd (ATP_Util.short_thm_name ctxt)) helper_ids' @ map (apsnd (fst o fst)) fact_ids' + in + {outcome = NONE, fact_ids = SOME fact_ids', + atp_proof = fn () => atp_proof_of_veriT_proof ctxt ll_defs rewrite_rules prems concl + fact_helper_ts prem_ids conjecture_id fact_helper_ids' steps} + end + +end; diff --git a/src/main/SMT/z3_interface.ML b/src/main/SMT/z3_interface.ML new file mode 100644 index 0000000..588458a --- /dev/null +++ b/src/main/SMT/z3_interface.ML @@ -0,0 +1,192 @@ +(* Title: HOL/Tools/SMT/z3_interface.ML + Author: Sascha Boehme, TU Muenchen + +Interface to Z3 based on a relaxed version of SMT-LIB. +*) + +signature Z3_INTERFACE = +sig + val smtlib_z3C: SMT_Util.class + + datatype sym = Sym of string * sym list + type mk_builtins = { + mk_builtin_typ: sym -> typ option, + mk_builtin_num: theory -> int -> typ -> cterm option, + mk_builtin_fun: theory -> sym -> cterm list -> cterm option } + val add_mk_builtins: mk_builtins -> Context.generic -> Context.generic + val mk_builtin_typ: Proof.context -> sym -> typ option + val mk_builtin_num: Proof.context -> int -> typ -> cterm option + val mk_builtin_fun: Proof.context -> sym -> cterm list -> cterm option + + val is_builtin_theory_term: Proof.context -> term -> bool +end; + +structure Z3_Interface: Z3_INTERFACE = +struct + +val smtlib_z3C = SMTLIB_Interface.smtlibC @ ["z3"] + + +(* interface *) + +local + fun translate_config ctxt = + {logic = K "", fp_kinds = [BNF_Util.Least_FP], + serialize = #serialize (SMTLIB_Interface.translate_config ctxt)} + + fun is_div_mod @{const divide (int)} = true + | is_div_mod @{const modulo (int)} = true + | is_div_mod _ = false + + val have_int_div_mod = exists (Term.exists_subterm is_div_mod o Thm.prop_of) + + fun add_div_mod _ (thms, extra_thms) = + if have_int_div_mod thms orelse have_int_div_mod extra_thms then + (thms, @{thms div_as_z3div mod_as_z3mod} @ extra_thms) + else (thms, extra_thms) + + val setup_builtins = + SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const times (int)}, "*") #> + SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const z3div}, "div") #> + SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const z3mod}, "mod") +in + +val _ = Theory.setup (Context.theory_map ( + setup_builtins #> + SMT_Normalize.add_extra_norm (smtlib_z3C, add_div_mod) #> + SMT_Translate.add_config (smtlib_z3C, translate_config))) + +end + + +(* constructors *) + +datatype sym = Sym of string * sym list + + +(** additional constructors **) + +type mk_builtins = { + mk_builtin_typ: sym -> typ option, + mk_builtin_num: theory -> int -> typ -> cterm option, + mk_builtin_fun: theory -> sym -> cterm list -> cterm option } + +fun chained _ [] = NONE + | chained f (b :: bs) = (case f b of SOME y => SOME y | NONE => chained f bs) + +fun chained_mk_builtin_typ bs sym = + chained (fn {mk_builtin_typ=mk, ...} : mk_builtins => mk sym) bs + +fun chained_mk_builtin_num ctxt bs i T = + let val thy = Proof_Context.theory_of ctxt + in chained (fn {mk_builtin_num=mk, ...} : mk_builtins => mk thy i T) bs end + +fun chained_mk_builtin_fun ctxt bs s cts = + let val thy = Proof_Context.theory_of ctxt + in chained (fn {mk_builtin_fun=mk, ...} : mk_builtins => mk thy s cts) bs end + +fun fst_int_ord ((i1, _), (i2, _)) = int_ord (i1, i2) + +structure Mk_Builtins = Generic_Data +( + type T = (int * mk_builtins) list + val empty = [] + val extend = I + fun merge data = Ord_List.merge fst_int_ord data +) + +fun add_mk_builtins mk = Mk_Builtins.map (Ord_List.insert fst_int_ord (serial (), mk)) + +fun get_mk_builtins ctxt = map snd (Mk_Builtins.get (Context.Proof ctxt)) + + +(** basic and additional constructors **) + +fun mk_builtin_typ _ (Sym ("Bool", _)) = SOME @{typ bool} + | mk_builtin_typ _ (Sym ("Int", _)) = SOME @{typ int} + | mk_builtin_typ _ (Sym ("bool", _)) = SOME @{typ bool} (*FIXME: legacy*) + | mk_builtin_typ _ (Sym ("int", _)) = SOME @{typ int} (*FIXME: legacy*) + | mk_builtin_typ ctxt sym = chained_mk_builtin_typ (get_mk_builtins ctxt) sym + +fun mk_builtin_num _ i @{typ int} = SOME (Numeral.mk_cnumber @{ctyp int} i) + | mk_builtin_num ctxt i T = + chained_mk_builtin_num ctxt (get_mk_builtins ctxt) i T + +val mk_true = Thm.cterm_of @{context} (@{const Not} $ @{const False}) +val mk_false = Thm.cterm_of @{context} @{const False} +val mk_not = Thm.apply (Thm.cterm_of @{context} @{const Not}) +val mk_implies = Thm.mk_binop (Thm.cterm_of @{context} @{const HOL.implies}) +val mk_iff = Thm.mk_binop (Thm.cterm_of @{context} @{const HOL.eq (bool)}) +val conj = Thm.cterm_of @{context} @{const HOL.conj} +val disj = Thm.cterm_of @{context} @{const HOL.disj} + +fun mk_nary _ cu [] = cu + | mk_nary ct _ cts = uncurry (fold_rev (Thm.mk_binop ct)) (split_last cts) + +val eq = SMT_Util.mk_const_pat @{theory} @{const_name HOL.eq} SMT_Util.destT1 +fun mk_eq ct cu = Thm.mk_binop (SMT_Util.instT' ct eq) ct cu + +val if_term = + SMT_Util.mk_const_pat @{theory} @{const_name If} (SMT_Util.destT1 o SMT_Util.destT2) +fun mk_if cc ct = Thm.mk_binop (Thm.apply (SMT_Util.instT' ct if_term) cc) ct + +val access = SMT_Util.mk_const_pat @{theory} @{const_name fun_app} SMT_Util.destT1 +fun mk_access array = Thm.apply (SMT_Util.instT' array access) array + +val update = + SMT_Util.mk_const_pat @{theory} @{const_name fun_upd} (Thm.dest_ctyp o SMT_Util.destT1) +fun mk_update array index value = + let val cTs = Thm.dest_ctyp (Thm.ctyp_of_cterm array) + in Thm.apply (Thm.mk_binop (SMT_Util.instTs cTs update) array index) value end + +val mk_uminus = Thm.apply (Thm.cterm_of @{context} @{const uminus (int)}) +val add = Thm.cterm_of @{context} @{const plus (int)} +val int0 = Numeral.mk_cnumber @{ctyp int} 0 +val mk_sub = Thm.mk_binop (Thm.cterm_of @{context} @{const minus (int)}) +val mk_mul = Thm.mk_binop (Thm.cterm_of @{context} @{const times (int)}) +val mk_div = Thm.mk_binop (Thm.cterm_of @{context} @{const z3div}) +val mk_mod = Thm.mk_binop (Thm.cterm_of @{context} @{const z3mod}) +val mk_lt = Thm.mk_binop (Thm.cterm_of @{context} @{const less (int)}) +val mk_le = Thm.mk_binop (Thm.cterm_of @{context} @{const less_eq (int)}) + +fun mk_builtin_fun ctxt sym cts = + (case (sym, cts) of + (Sym ("true", _), []) => SOME mk_true + | (Sym ("false", _), []) => SOME mk_false + | (Sym ("not", _), [ct]) => SOME (mk_not ct) + | (Sym ("and", _), _) => SOME (mk_nary conj mk_true cts) + | (Sym ("or", _), _) => SOME (mk_nary disj mk_false cts) + | (Sym ("implies", _), [ct, cu]) => SOME (mk_implies ct cu) + | (Sym ("iff", _), [ct, cu]) => SOME (mk_iff ct cu) + | (Sym ("~", _), [ct, cu]) => SOME (mk_iff ct cu) + | (Sym ("xor", _), [ct, cu]) => SOME (mk_not (mk_iff ct cu)) + | (Sym ("if", _), [ct1, ct2, ct3]) => SOME (mk_if ct1 ct2 ct3) + | (Sym ("ite", _), [ct1, ct2, ct3]) => SOME (mk_if ct1 ct2 ct3) (* FIXME: remove *) + | (Sym ("=", _), [ct, cu]) => SOME (mk_eq ct cu) + | (Sym ("select", _), [ca, ck]) => SOME (Thm.apply (mk_access ca) ck) + | (Sym ("store", _), [ca, ck, cv]) => SOME (mk_update ca ck cv) + | _ => + (case (sym, try (Thm.typ_of_cterm o hd) cts, cts) of + (Sym ("+", _), SOME @{typ int}, _) => SOME (mk_nary add int0 cts) + | (Sym ("-", _), SOME @{typ int}, [ct]) => SOME (mk_uminus ct) + | (Sym ("-", _), SOME @{typ int}, [ct, cu]) => SOME (mk_sub ct cu) + | (Sym ("*", _), SOME @{typ int}, [ct, cu]) => SOME (mk_mul ct cu) + | (Sym ("div", _), SOME @{typ int}, [ct, cu]) => SOME (mk_div ct cu) + | (Sym ("mod", _), SOME @{typ int}, [ct, cu]) => SOME (mk_mod ct cu) + | (Sym ("<", _), SOME @{typ int}, [ct, cu]) => SOME (mk_lt ct cu) + | (Sym ("<=", _), SOME @{typ int}, [ct, cu]) => SOME (mk_le ct cu) + | (Sym (">", _), SOME @{typ int}, [ct, cu]) => SOME (mk_lt cu ct) + | (Sym (">=", _), SOME @{typ int}, [ct, cu]) => SOME (mk_le cu ct) + | _ => chained_mk_builtin_fun ctxt (get_mk_builtins ctxt) sym cts)) + + +(* abstraction *) + +fun is_builtin_theory_term ctxt t = + if SMT_Builtin.is_builtin_num ctxt t then true + else + (case Term.strip_comb t of + (Const c, ts) => SMT_Builtin.is_builtin_fun ctxt c ts + | _ => false) + +end; diff --git a/src/main/SMT/z3_isar.ML b/src/main/SMT/z3_isar.ML new file mode 100644 index 0000000..5b73931 --- /dev/null +++ b/src/main/SMT/z3_isar.ML @@ -0,0 +1,120 @@ +(* Title: HOL/Tools/SMT/z3_isar.ML + Author: Jasmin Blanchette, TU Muenchen + +Z3 proofs as generic ATP proofs for Isar proof reconstruction. +*) + +signature Z3_ISAR = +sig + val atp_proof_of_z3_proof: Proof.context -> term list -> thm list -> term list -> term -> + (string * term) list -> int list -> int -> (int * string) list -> Z3_Proof.z3_step list -> + (term, string) ATP_Proof.atp_step list +end; + +structure Z3_Isar: Z3_ISAR = +struct + +open ATP_Util +open ATP_Problem +open ATP_Proof +open ATP_Proof_Reconstruct +open SMTLIB_Isar + +val z3_apply_def_rule = Z3_Proof.string_of_rule Z3_Proof.Apply_Def +val z3_hypothesis_rule = Z3_Proof.string_of_rule Z3_Proof.Hypothesis +val z3_intro_def_rule = Z3_Proof.string_of_rule Z3_Proof.Intro_Def +val z3_lemma_rule = Z3_Proof.string_of_rule Z3_Proof.Lemma + +fun inline_z3_defs _ [] = [] + | inline_z3_defs defs ((name, role, t, rule, deps) :: lines) = + if rule = z3_intro_def_rule then + let val def = t |> HOLogic.dest_Trueprop |> HOLogic.dest_eq |> swap in + inline_z3_defs (insert (op =) def defs) + (map (replace_dependencies_in_line (name, [])) lines) + end + else if rule = z3_apply_def_rule then + inline_z3_defs defs (map (replace_dependencies_in_line (name, [])) lines) + else + (name, role, Term.subst_atomic defs t, rule, deps) :: inline_z3_defs defs lines + +fun add_z3_hypotheses [] = I + | add_z3_hypotheses hyps = + HOLogic.dest_Trueprop + #> curry s_imp (Library.foldr1 s_conj (map HOLogic.dest_Trueprop hyps)) + #> HOLogic.mk_Trueprop + +fun inline_z3_hypotheses _ _ [] = [] + | inline_z3_hypotheses hyp_names hyps ((name, role, t, rule, deps) :: lines) = + if rule = z3_hypothesis_rule then + inline_z3_hypotheses (name :: hyp_names) (AList.map_default (op =) (t, []) (cons name) hyps) + lines + else + let val deps' = subtract (op =) hyp_names deps in + if rule = z3_lemma_rule then + (name, role, t, rule, deps') :: inline_z3_hypotheses hyp_names hyps lines + else + let + val add_hyps = filter_out (null o inter (op =) deps o snd) hyps + val t' = add_z3_hypotheses (map fst add_hyps) t + val hyps' = fold (AList.update (op =) o apsnd (insert (op =) name)) add_hyps hyps + in + (name, role, t', rule, deps') :: inline_z3_hypotheses hyp_names hyps' lines + end + end + +fun dest_alls (Const (@{const_name Pure.all}, _) $ Abs (abs as (_, T, _))) = + let val (s', t') = Term.dest_abs abs in + dest_alls t' |>> cons (s', T) + end + | dest_alls t = ([], t) + +val reorder_foralls = + dest_alls + #>> sort_by fst + #-> fold_rev (Logic.all o Free); + +fun atp_proof_of_z3_proof ctxt ll_defs rewrite_rules hyp_ts concl_t fact_helper_ts prem_ids + conjecture_id fact_helper_ids proof = + let + fun steps_of (Z3_Proof.Z3_Step {id, rule, prems, concl, ...}) = + let + val sid = string_of_int id + + val concl' = concl + |> reorder_foralls (* crucial for skolemization steps *) + |> postprocess_step_conclusion ctxt rewrite_rules ll_defs + fun standard_step role = + ((sid, []), role, concl', Z3_Proof.string_of_rule rule, + map (fn id => (string_of_int id, [])) prems) + in + (case rule of + Z3_Proof.Asserted => + let val ss = the_list (AList.lookup (op =) fact_helper_ids id) in + (case distinguish_conjecture_and_hypothesis ss id conjecture_id prem_ids fact_helper_ts + hyp_ts concl_t of + NONE => [] + | SOME (role0, concl00) => + let + val name0 = (sid ^ "a", ss) + val concl0 = unskolemize_names ctxt concl00 + in + (if role0 = Axiom then [] + else [(name0, role0, concl0, Z3_Proof.string_of_rule rule, [])]) @ + [((sid, []), Plain, concl', Z3_Proof.string_of_rule Z3_Proof.Rewrite, + name0 :: normalizing_prems ctxt concl0)] + end) + end + | Z3_Proof.Rewrite => [standard_step Lemma] + | Z3_Proof.Rewrite_Star => [standard_step Lemma] + | Z3_Proof.Skolemize => [standard_step Lemma] + | Z3_Proof.Th_Lemma _ => [standard_step Lemma] + | _ => [standard_step Plain]) + end + in + proof + |> maps steps_of + |> inline_z3_defs [] + |> inline_z3_hypotheses [] [] + end + +end; diff --git a/src/main/SMT/z3_proof.ML b/src/main/SMT/z3_proof.ML new file mode 100644 index 0000000..2c3ab4e --- /dev/null +++ b/src/main/SMT/z3_proof.ML @@ -0,0 +1,303 @@ +(* Title: HOL/Tools/SMT/z3_proof.ML + Author: Sascha Boehme, TU Muenchen + +Z3 proofs: parsing and abstract syntax tree. +*) + +signature Z3_PROOF = +sig + (*proof rules*) + datatype z3_rule = + True_Axiom | Asserted | Goal | Modus_Ponens | Reflexivity | Symmetry | Transitivity | + Transitivity_Star | Monotonicity | Quant_Intro | Distributivity | And_Elim | Not_Or_Elim | + Rewrite | Rewrite_Star | Pull_Quant | Pull_Quant_Star | Push_Quant | Elim_Unused_Vars | + Dest_Eq_Res | Quant_Inst | Hypothesis | Lemma | Unit_Resolution | Iff_True | Iff_False | + Commutativity | Def_Axiom | Intro_Def | Apply_Def | Iff_Oeq | Nnf_Pos | Nnf_Neg | Nnf_Star | + Cnf_Star | Skolemize | Modus_Ponens_Oeq | Th_Lemma of string + + val is_assumption: z3_rule -> bool + val string_of_rule: z3_rule -> string + + (*proofs*) + datatype z3_step = Z3_Step of { + id: int, + rule: z3_rule, + prems: int list, + concl: term, + fixes: string list, + is_fix_step: bool} + + (*proof parser*) + val parse: typ Symtab.table -> term Symtab.table -> string list -> + Proof.context -> z3_step list * Proof.context +end; + +structure Z3_Proof: Z3_PROOF = +struct + +open SMTLIB_Proof + + +(* proof rules *) + +datatype z3_rule = + True_Axiom | Asserted | Goal | Modus_Ponens | Reflexivity | Symmetry | Transitivity | + Transitivity_Star | Monotonicity | Quant_Intro | Distributivity | And_Elim | Not_Or_Elim | + Rewrite | Rewrite_Star | Pull_Quant | Pull_Quant_Star | Push_Quant | Elim_Unused_Vars | + Dest_Eq_Res | Quant_Inst | Hypothesis | Lemma | Unit_Resolution | Iff_True | Iff_False | + Commutativity | Def_Axiom | Intro_Def | Apply_Def | Iff_Oeq | Nnf_Pos | Nnf_Neg | Nnf_Star | + Cnf_Star | Skolemize | Modus_Ponens_Oeq | Th_Lemma of string + (* some proof rules include further information that is currently dropped by the parser *) + +val rule_names = Symtab.make [ + ("true-axiom", True_Axiom), + ("asserted", Asserted), + ("goal", Goal), + ("mp", Modus_Ponens), + ("refl", Reflexivity), + ("symm", Symmetry), + ("trans", Transitivity), + ("trans*", Transitivity_Star), + ("monotonicity", Monotonicity), + ("quant-intro", Quant_Intro), + ("distributivity", Distributivity), + ("and-elim", And_Elim), + ("not-or-elim", Not_Or_Elim), + ("rewrite", Rewrite), + ("rewrite*", Rewrite_Star), + ("pull-quant", Pull_Quant), + ("pull-quant*", Pull_Quant_Star), + ("push-quant", Push_Quant), + ("elim-unused", Elim_Unused_Vars), + ("der", Dest_Eq_Res), + ("quant-inst", Quant_Inst), + ("hypothesis", Hypothesis), + ("lemma", Lemma), + ("unit-resolution", Unit_Resolution), + ("iff-true", Iff_True), + ("iff-false", Iff_False), + ("commutativity", Commutativity), + ("def-axiom", Def_Axiom), + ("intro-def", Intro_Def), + ("apply-def", Apply_Def), + ("iff~", Iff_Oeq), + ("nnf-pos", Nnf_Pos), + ("nnf-neg", Nnf_Neg), + ("nnf*", Nnf_Star), + ("cnf*", Cnf_Star), + ("sk", Skolemize), + ("mp~", Modus_Ponens_Oeq)] + +fun is_assumption Asserted = true + | is_assumption Goal = true + | is_assumption Hypothesis = true + | is_assumption Intro_Def = true + | is_assumption Skolemize = true + | is_assumption _ = false + +fun rule_of_string name = + (case Symtab.lookup rule_names name of + SOME rule => rule + | NONE => error ("unknown Z3 proof rule " ^ quote name)) + +fun string_of_rule (Th_Lemma kind) = "th-lemma" ^ (if kind = "" then "" else " " ^ kind) + | string_of_rule r = + let fun eq_rule (s, r') = if r = r' then SOME s else NONE + in the (Symtab.get_first eq_rule rule_names) end + + +(* proofs *) + +datatype z3_node = Z3_Node of { + id: int, + rule: z3_rule, + prems: z3_node list, + concl: term, + bounds: string list} + +fun mk_node id rule prems concl bounds = + Z3_Node {id = id, rule = rule, prems = prems, concl = concl, bounds = bounds} + +datatype z3_step = Z3_Step of { + id: int, + rule: z3_rule, + prems: int list, + concl: term, + fixes: string list, + is_fix_step: bool} + +fun mk_step id rule prems concl fixes is_fix_step = + Z3_Step {id = id, rule = rule, prems = prems, concl = concl, fixes = fixes, + is_fix_step = is_fix_step} + + +(* proof parser *) + +fun rule_of (SMTLIB.Sym name) = rule_of_string name + | rule_of (SMTLIB.S (SMTLIB.Sym "_" :: SMTLIB.Sym name :: args)) = + (case (name, args) of + ("th-lemma", SMTLIB.Sym kind :: _) => Th_Lemma kind + | _ => rule_of_string name) + | rule_of r = raise SMTLIB_PARSE ("bad Z3 proof rule format", r) + +fun node_of p cx = + (case p of + SMTLIB.Sym name => + (case lookup_binding cx name of + Proof node => (node, cx) + | Tree p' => + cx + |> node_of p' + |-> (fn node => pair node o update_binding (name, Proof node)) + | _ => raise SMTLIB_PARSE ("bad Z3 proof format", p)) + | SMTLIB.S [SMTLIB.Sym "let", SMTLIB.S bindings, p] => + with_bindings (map dest_binding bindings) (node_of p) cx + | SMTLIB.S (name :: parts) => + let + val (ps, p) = split_last parts + val r = rule_of name + in + cx + |> fold_map node_of ps + ||>> `(with_fresh_names (term_of p)) + ||>> next_id + |>> (fn ((prems, (t, ns)), id) => mk_node id r prems t ns) + end + | _ => raise SMTLIB_PARSE ("bad Z3 proof format", p)) + +fun dest_name (SMTLIB.Sym name) = name + | dest_name t = raise SMTLIB_PARSE ("bad name", t) + +fun dest_seq (SMTLIB.S ts) = ts + | dest_seq t = raise SMTLIB_PARSE ("bad Z3 proof format", t) + +fun parse' (SMTLIB.S (SMTLIB.Sym "set-logic" :: _) :: ts) cx = parse' ts cx + | parse' (SMTLIB.S [SMTLIB.Sym "declare-fun", n, tys, ty] :: ts) cx = + let + val name = dest_name n + val Ts = map (type_of cx) (dest_seq tys) + val T = type_of cx ty + in parse' ts (declare_fun name (Ts ---> T) cx) end + | parse' (SMTLIB.S [SMTLIB.Sym "proof", p] :: _) cx = node_of p cx + | parse' ts _ = raise SMTLIB_PARSE ("bad Z3 proof declarations", SMTLIB.S ts) + +fun parse_proof typs funs lines ctxt = + let + val ts = dest_seq (SMTLIB.parse lines) + val (node, cx) = parse' ts (empty_context ctxt typs funs) + in (node, ctxt_of cx) end + handle SMTLIB.PARSE (l, msg) => error ("parsing error at line " ^ string_of_int l ^ ": " ^ msg) + | SMTLIB_PARSE (msg, t) => error (msg ^ ": " ^ SMTLIB.str_of t) + + +(* handling of bound variables *) + +fun subst_of tyenv = + let fun add (ix, (S, T)) = cons (TVar (ix, S), T) + in Vartab.fold add tyenv [] end + +fun substTs_same subst = + let val applyT = Same.function (AList.lookup (op =) subst) + in Term_Subst.map_atypsT_same applyT end + +fun subst_types ctxt env bounds t = + let + val match = Sign.typ_match (Proof_Context.theory_of ctxt) + + val t' = singleton (Variable.polymorphic ctxt) t + val patTs = map snd (Term.strip_qnt_vars @{const_name Pure.all} t') + val objTs = map (the o Symtab.lookup env) bounds + val subst = subst_of (fold match (patTs ~~ objTs) Vartab.empty) + in Same.commit (Term_Subst.map_types_same (substTs_same subst)) t' end + +fun eq_quant (@{const_name HOL.All}, _) (@{const_name HOL.All}, _) = true + | eq_quant (@{const_name HOL.Ex}, _) (@{const_name HOL.Ex}, _) = true + | eq_quant _ _ = false + +fun opp_quant (@{const_name HOL.All}, _) (@{const_name HOL.Ex}, _) = true + | opp_quant (@{const_name HOL.Ex}, _) (@{const_name HOL.All}, _) = true + | opp_quant _ _ = false + +fun with_quant pred i (Const q1 $ Abs (_, T1, t1), Const q2 $ Abs (_, T2, t2)) = + if pred q1 q2 andalso T1 = T2 then + let val t = Var (("", i), T1) + in SOME (apply2 Term.subst_bound ((t, t1), (t, t2))) end + else NONE + | with_quant _ _ _ = NONE + +fun dest_quant_pair i (@{term HOL.Not} $ t1, t2) = + Option.map (apfst HOLogic.mk_not) (with_quant opp_quant i (t1, t2)) + | dest_quant_pair i (t1, t2) = with_quant eq_quant i (t1, t2) + +fun dest_quant i t = + (case dest_quant_pair i (HOLogic.dest_eq (HOLogic.dest_Trueprop t)) of + SOME (t1, t2) => HOLogic.mk_Trueprop (HOLogic.mk_eq (t1, t2)) + | NONE => raise TERM ("lift_quant", [t])) + +fun match_types ctxt pat obj = + (Vartab.empty, Vartab.empty) + |> Pattern.first_order_match (Proof_Context.theory_of ctxt) (pat, obj) + +fun strip_match ctxt pat i obj = + (case try (match_types ctxt pat) obj of + SOME (tyenv, _) => subst_of tyenv + | NONE => strip_match ctxt pat (i + 1) (dest_quant i obj)) + +fun dest_all i (Const (@{const_name Pure.all}, _) $ (a as Abs (_, T, _))) = + dest_all (i + 1) (Term.betapply (a, Var (("", i), T))) + | dest_all i t = (i, t) + +fun dest_alls t = dest_all (Term.maxidx_of_term t + 1) t + +fun match_rule ctxt env (Z3_Node {bounds = bs', concl = t', ...}) bs t = + let + val t'' = singleton (Variable.polymorphic ctxt) t' + val (i, obj) = dest_alls (subst_types ctxt env bs t) + in + (case try (strip_match ctxt (snd (dest_alls t'')) i) obj of + NONE => NONE + | SOME subst => + let + val applyT = Same.commit (substTs_same subst) + val patTs = map snd (Term.strip_qnt_vars @{const_name Pure.all} t'') + in SOME (Symtab.make (bs' ~~ map applyT patTs)) end) + end + + +(* linearizing proofs and resolving types of bound variables *) + +fun has_step (tab, _) = Inttab.defined tab + +fun add_step id rule bounds concl is_fix_step ids (tab, sts) = + let val step = mk_step id rule ids concl bounds is_fix_step + in (id, (Inttab.update (id, ()) tab, step :: sts)) end + +fun is_fix_rule rule prems = + member (op =) [Quant_Intro, Nnf_Pos, Nnf_Neg] rule andalso length prems = 1 + +fun lin_proof ctxt env (Z3_Node {id, rule, prems, concl, bounds}) steps = + if has_step steps id then (id, steps) + else + let + val t = subst_types ctxt env bounds concl + val add = add_step id rule bounds t + fun rec_apply e b = fold_map (lin_proof ctxt e) prems #-> add b + in + if is_fix_rule rule prems then + (case match_rule ctxt env (hd prems) bounds t of + NONE => rec_apply env false steps + | SOME env' => rec_apply env' true steps) + else rec_apply env false steps + end + +fun linearize ctxt node = + rev (snd (snd (lin_proof ctxt Symtab.empty node (Inttab.empty, [])))) + + +(* overall proof parser *) + +fun parse typs funs lines ctxt = + let val (node, ctxt') = parse_proof typs funs lines ctxt + in (linearize ctxt' node, ctxt') end + +end; diff --git a/src/main/SMT/z3_real.ML b/src/main/SMT/z3_real.ML new file mode 100644 index 0000000..15ef469 --- /dev/null +++ b/src/main/SMT/z3_real.ML @@ -0,0 +1,32 @@ +(* Title: HOL/Tools/SMT/z3_real.ML + Author: Sascha Boehme, TU Muenchen + +Z3 setup for reals. +*) + +structure Z3_Real: sig end = +struct + +fun real_type_parser (SMTLIB.Sym "Real", []) = SOME @{typ Real.real} + | real_type_parser _ = NONE + +fun real_term_parser (SMTLIB.Dec (i, 0), []) = SOME (HOLogic.mk_number @{typ Real.real} i) + | real_term_parser (SMTLIB.Sym "/", [t1, t2]) = + SOME (@{term "Rings.divide :: real => _"} $ t1 $ t2) + | real_term_parser (SMTLIB.Sym "to_real", [t]) = SOME (@{term "Int.of_int :: int => _"} $ t) + | real_term_parser _ = NONE + +fun abstract abs t = + (case t of + (c as @{term "Rings.divide :: real => _"}) $ t1 $ t2 => + abs t1 ##>> abs t2 #>> (fn (u1, u2) => SOME (c $ u1 $ u2)) + | (c as @{term "Int.of_int :: int => _"}) $ t => + abs t #>> (fn u => SOME (c $ u)) + | _ => pair NONE) + +val _ = Theory.setup (Context.theory_map ( + SMTLIB_Proof.add_type_parser real_type_parser #> + SMTLIB_Proof.add_term_parser real_term_parser #> + Z3_Replay_Methods.add_arith_abstracter abstract)) + +end; diff --git a/src/main/SMT/z3_replay.ML b/src/main/SMT/z3_replay.ML new file mode 100644 index 0000000..b9ecce8 --- /dev/null +++ b/src/main/SMT/z3_replay.ML @@ -0,0 +1,262 @@ +(* Title: HOL/Tools/SMT/z3_replay.ML + Author: Sascha Boehme, TU Muenchen + Author: Jasmin Blanchette, TU Muenchen + +Z3 proof parsing and replay. +*) + +signature Z3_REPLAY = +sig + val parse_proof: Proof.context -> SMT_Translate.replay_data -> + ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> + SMT_Solver.parsed_proof + val replay: Proof.context -> SMT_Translate.replay_data -> string list -> thm +end; + +structure Z3_Replay: Z3_REPLAY = +struct + +fun params_of t = Term.strip_qnt_vars @{const_name Pure.all} t + +fun varify ctxt thm = + let + val maxidx = Thm.maxidx_of thm + 1 + val vs = params_of (Thm.prop_of thm) + val vars = map_index (fn (i, (n, T)) => Var ((n, i + maxidx), T)) vs + in Drule.forall_elim_list (map (Thm.cterm_of ctxt) vars) thm end + +fun add_paramTs names t = + fold2 (fn n => fn (_, T) => AList.update (op =) (n, T)) names (params_of t) + +fun new_fixes ctxt nTs = + let + val (ns, ctxt') = Variable.variant_fixes (replicate (length nTs) "") ctxt + fun mk (n, T) n' = (n, Thm.cterm_of ctxt' (Free (n', T))) + in (ctxt', Symtab.make (map2 mk nTs ns)) end + +fun forall_elim_term ct (Const (@{const_name Pure.all}, _) $ (a as Abs _)) = + Term.betapply (a, Thm.term_of ct) + | forall_elim_term _ qt = raise TERM ("forall_elim'", [qt]) + +fun apply_fixes elim env = fold (elim o the o Symtab.lookup env) + +val apply_fixes_prem = uncurry o apply_fixes Thm.forall_elim +val apply_fixes_concl = apply_fixes forall_elim_term + +fun export_fixes env names = Drule.forall_intr_list (map (the o Symtab.lookup env) names) + +fun under_fixes f ctxt (prems, nthms) names concl = + let + val thms1 = map (varify ctxt) prems + val (ctxt', env) = + add_paramTs names concl [] + |> fold (uncurry add_paramTs o apsnd Thm.prop_of) nthms + |> new_fixes ctxt + val thms2 = map (apply_fixes_prem env) nthms + val t = apply_fixes_concl env names concl + in export_fixes env names (f ctxt' (thms1 @ thms2) t) end + +fun replay_thm ctxt assumed nthms (Z3_Proof.Z3_Step {id, rule, concl, fixes, is_fix_step, ...}) = + if Z3_Proof.is_assumption rule then + (case Inttab.lookup assumed id of + SOME (_, thm) => thm + | NONE => Thm.assume (Thm.cterm_of ctxt concl)) + else + under_fixes (Z3_Replay_Methods.method_for rule) ctxt + (if is_fix_step then (map snd nthms, []) else ([], nthms)) fixes concl + +fun replay_step ctxt assumed (step as Z3_Proof.Z3_Step {id, rule, prems, fixes, ...}) state = + let + val (proofs, stats) = state + val nthms = map (the o Inttab.lookup proofs) prems + val replay = Timing.timing (replay_thm ctxt assumed nthms) + val ({elapsed, ...}, thm) = + SMT_Config.with_time_limit ctxt SMT_Config.reconstruction_step_timeout replay step + handle Timeout.TIMEOUT _ => raise SMT_Failure.SMT SMT_Failure.Time_Out + val stats' = Symtab.cons_list (Z3_Proof.string_of_rule rule, Time.toMilliseconds elapsed) stats + in (Inttab.update (id, (fixes, thm)) proofs, stats') end + +local + val remove_trigger = mk_meta_eq @{thm trigger_def} + val remove_fun_app = mk_meta_eq @{thm fun_app_def} + + fun rewrite_conv _ [] = Conv.all_conv + | rewrite_conv ctxt eqs = Simplifier.full_rewrite (empty_simpset ctxt addsimps eqs) + + val rewrite_true_rule = @{lemma "True == ~ False" by simp} + val prep_rules = [@{thm Let_def}, remove_trigger, remove_fun_app, rewrite_true_rule] + + fun rewrite _ [] = I + | rewrite ctxt eqs = Conv.fconv_rule (rewrite_conv ctxt eqs) + + fun lookup_assm assms_net ct = + Z3_Replay_Util.net_instances assms_net ct + |> map (fn ithm as (_, thm) => (ithm, Thm.cprop_of thm aconvc ct)) +in + +fun add_asserted outer_ctxt rewrite_rules assms steps ctxt = + let + val eqs = map (rewrite ctxt [rewrite_true_rule]) rewrite_rules + val eqs' = union Thm.eq_thm eqs prep_rules + + val assms_net = + assms + |> map (apsnd (rewrite ctxt eqs')) + |> map (apsnd (Conv.fconv_rule Thm.eta_conversion)) + |> Z3_Replay_Util.thm_net_of snd + + fun revert_conv ctxt = rewrite_conv ctxt eqs' then_conv Thm.eta_conversion + + fun assume thm ctxt = + let + val ct = Thm.cprem_of thm 1 + val (thm', ctxt') = yield_singleton Assumption.add_assumes ct ctxt + in (thm' RS thm, ctxt') end + + fun add1 id fixes thm1 ((i, th), exact) ((iidths, thms), (ctxt, ptab)) = + let + val (thm, ctxt') = if exact then (Thm.implies_elim thm1 th, ctxt) else assume thm1 ctxt + val thms' = if exact then thms else th :: thms + in (((i, (id, th)) :: iidths, thms'), (ctxt', Inttab.update (id, (fixes, thm)) ptab)) end + + fun add (Z3_Proof.Z3_Step {id, rule, concl, fixes, ...}) + (cx as ((iidths, thms), (ctxt, ptab))) = + if Z3_Proof.is_assumption rule andalso rule <> Z3_Proof.Hypothesis then + let + val ct = Thm.cterm_of ctxt concl + val thm1 = Thm.trivial ct |> Conv.fconv_rule (Conv.arg1_conv (revert_conv outer_ctxt)) + val thm2 = singleton (Variable.export ctxt outer_ctxt) thm1 + in + (case lookup_assm assms_net (Thm.cprem_of thm2 1) of + [] => + let val (thm, ctxt') = assume thm1 ctxt + in ((iidths, thms), (ctxt', Inttab.update (id, (fixes, thm)) ptab)) end + | ithms => fold (add1 id fixes thm1) ithms cx) + end + else + cx + in fold add steps (([], []), (ctxt, Inttab.empty)) end + +end + +(* |- (EX x. P x) = P c |- ~ (ALL x. P x) = ~ P c *) +local + val sk_rules = @{lemma + "c = (SOME x. P x) ==> (EX x. P x) = P c" + "c = (SOME x. ~ P x) ==> (~ (ALL x. P x)) = (~ P c)" + by (metis someI_ex)+} +in + +fun discharge_sk_tac ctxt i st = + (resolve_tac ctxt @{thms trans} i + THEN resolve_tac ctxt sk_rules i + THEN (resolve_tac ctxt @{thms refl} ORELSE' discharge_sk_tac ctxt) (i+1) + THEN resolve_tac ctxt @{thms refl} i) st + +end + +val true_thm = @{lemma "~False" by simp} +fun make_discharge_rules rules = rules @ [@{thm allI}, @{thm refl}, @{thm reflexive}, true_thm] + +val intro_def_rules = @{lemma + "(~ P | P) & (P | ~ P)" + "(P | ~ P) & (~ P | P)" + by fast+} + +fun discharge_assms_tac ctxt rules = + REPEAT + (HEADGOAL (resolve_tac ctxt (intro_def_rules @ rules) ORELSE' + SOLVED' (discharge_sk_tac ctxt))) + +fun discharge_assms ctxt rules thm = + (if Thm.nprems_of thm = 0 then + thm + else + (case Seq.pull (discharge_assms_tac ctxt rules thm) of + SOME (thm', _) => thm' + | NONE => raise THM ("failed to discharge premise", 1, [thm]))) + |> Goal.norm_result ctxt + +fun discharge rules outer_ctxt inner_ctxt = + singleton (Proof_Context.export inner_ctxt outer_ctxt) + #> discharge_assms outer_ctxt (make_discharge_rules rules) + +fun parse_proof outer_ctxt + ({context = ctxt, typs, terms, ll_defs, rewrite_rules, assms} : SMT_Translate.replay_data) + xfacts prems concl output = + let + val (steps, ctxt2) = Z3_Proof.parse typs terms output ctxt + val ((iidths, _), _) = add_asserted outer_ctxt rewrite_rules assms steps ctxt2 + + fun id_of_index i = the_default ~1 (Option.map fst (AList.lookup (op =) iidths i)) + + val conjecture_i = 0 + val prems_i = 1 + val facts_i = prems_i + length prems + + val conjecture_id = id_of_index conjecture_i + val prem_ids = map id_of_index (prems_i upto facts_i - 1) + val fact_ids' = + map_filter (fn (i, (id, _)) => try (apsnd (nth xfacts)) (id, i - facts_i)) iidths + val helper_ids' = map_filter (try (fn (~1, idth) => idth)) iidths + + val fact_helper_ts = + map (fn (_, th) => (ATP_Util.short_thm_name ctxt th, Thm.prop_of th)) helper_ids' @ + map (fn (_, ((s, _), th)) => (s, Thm.prop_of th)) fact_ids' + val fact_helper_ids' = + map (apsnd (ATP_Util.short_thm_name ctxt)) helper_ids' @ map (apsnd (fst o fst)) fact_ids' + in + {outcome = NONE, fact_ids = SOME fact_ids', + atp_proof = fn () => Z3_Isar.atp_proof_of_z3_proof ctxt ll_defs rewrite_rules prems concl + fact_helper_ts prem_ids conjecture_id fact_helper_ids' steps} + end + +fun intermediate_statistics ctxt start total = + SMT_Config.statistics_msg ctxt (fn current => + "Reconstructed " ^ string_of_int current ^ " of " ^ string_of_int total ^ " steps in " ^ + string_of_int (Time.toMilliseconds (#elapsed (Timing.result start))) ^ " ms") + +fun pretty_statistics total stats = + let + fun mean_of is = + let + val len = length is + val mid = len div 2 + in if len mod 2 = 0 then (nth is (mid - 1) + nth is mid) div 2 else nth is mid end + fun pretty_item name p = Pretty.item (Pretty.separate ":" [Pretty.str name, p]) + fun pretty (name, milliseconds) = pretty_item name (Pretty.block (Pretty.separate "," [ + Pretty.str (string_of_int (length milliseconds) ^ " occurrences") , + Pretty.str (string_of_int (mean_of milliseconds) ^ " ms mean time"), + Pretty.str (string_of_int (fold Integer.max milliseconds 0) ^ " ms maximum time"), + Pretty.str (string_of_int (fold Integer.add milliseconds 0) ^ " ms total time")])) + in + Pretty.big_list "Z3 proof reconstruction statistics:" ( + pretty_item "total time" (Pretty.str (string_of_int total ^ " ms")) :: + map pretty (Symtab.dest stats)) + end + +fun replay outer_ctxt + ({context = ctxt, typs, terms, rewrite_rules, assms, ...} : SMT_Translate.replay_data) output = + let + val (steps, ctxt2) = Z3_Proof.parse typs terms output ctxt + val ((_, rules), (ctxt3, assumed)) = add_asserted outer_ctxt rewrite_rules assms steps ctxt2 + val ctxt4 = + ctxt3 + |> put_simpset (Z3_Replay_Util.make_simpset ctxt3 []) + |> Config.put SAT.solver (Config.get ctxt3 SMT_Config.sat_solver) + val len = length steps + val start = Timing.start () + val print_runtime_statistics = intermediate_statistics ctxt4 start len + fun blockwise f (i, x) y = + (if i > 0 andalso i mod 100 = 0 then print_runtime_statistics i else (); f x y) + val (proofs, stats) = + fold_index (blockwise (replay_step ctxt4 assumed)) steps (assumed, Symtab.empty) + val _ = print_runtime_statistics len + val total = Time.toMilliseconds (#elapsed (Timing.result start)) + val (_, Z3_Proof.Z3_Step {id, ...}) = split_last steps + val _ = SMT_Config.statistics_msg ctxt4 (Pretty.string_of o pretty_statistics total) stats + in + Inttab.lookup proofs id |> the |> snd |> discharge rules outer_ctxt ctxt4 + end + +end; diff --git a/src/main/SMT/z3_replay_methods.ML b/src/main/SMT/z3_replay_methods.ML new file mode 100644 index 0000000..e4fbb73 --- /dev/null +++ b/src/main/SMT/z3_replay_methods.ML @@ -0,0 +1,685 @@ +(* Title: HOL/Tools/SMT/z3_replay_methods.ML + Author: Sascha Boehme, TU Muenchen + Author: Jasmin Blanchette, TU Muenchen + +Proof methods for replaying Z3 proofs. +*) + +signature Z3_REPLAY_METHODS = +sig + (*abstraction*) + type abs_context = int * term Termtab.table + type 'a abstracter = term -> abs_context -> 'a * abs_context + val add_arith_abstracter: (term abstracter -> term option abstracter) -> + Context.generic -> Context.generic + + (*theory lemma methods*) + type th_lemma_method = Proof.context -> thm list -> term -> thm + val add_th_lemma_method: string * th_lemma_method -> Context.generic -> + Context.generic + + (*methods for Z3 proof rules*) + type z3_method = Proof.context -> thm list -> term -> thm + val true_axiom: z3_method + val mp: z3_method + val refl: z3_method + val symm: z3_method + val trans: z3_method + val cong: z3_method + val quant_intro: z3_method + val distrib: z3_method + val and_elim: z3_method + val not_or_elim: z3_method + val rewrite: z3_method + val rewrite_star: z3_method + val pull_quant: z3_method + val push_quant: z3_method + val elim_unused: z3_method + val dest_eq_res: z3_method + val quant_inst: z3_method + val lemma: z3_method + val unit_res: z3_method + val iff_true: z3_method + val iff_false: z3_method + val comm: z3_method + val def_axiom: z3_method + val apply_def: z3_method + val iff_oeq: z3_method + val nnf_pos: z3_method + val nnf_neg: z3_method + val mp_oeq: z3_method + val th_lemma: string -> z3_method + val method_for: Z3_Proof.z3_rule -> z3_method +end; + +structure Z3_Replay_Methods: Z3_REPLAY_METHODS = +struct + +type z3_method = Proof.context -> thm list -> term -> thm + + +(* utility functions *) + +fun trace ctxt f = SMT_Config.trace_msg ctxt f () + +fun pretty_thm ctxt thm = Syntax.pretty_term ctxt (Thm.concl_of thm) + +fun pretty_goal ctxt msg rule thms t = + let + val full_msg = msg ^ ": " ^ quote (Z3_Proof.string_of_rule rule) + val assms = + if null thms then [] + else [Pretty.big_list "assumptions:" (map (pretty_thm ctxt) thms)] + val concl = Pretty.big_list "proposition:" [Syntax.pretty_term ctxt t] + in Pretty.big_list full_msg (assms @ [concl]) end + +fun replay_error ctxt msg rule thms t = error (Pretty.string_of (pretty_goal ctxt msg rule thms t)) + +fun replay_rule_error ctxt = replay_error ctxt "Failed to replay Z3 proof step" + +fun trace_goal ctxt rule thms t = + trace ctxt (fn () => Pretty.string_of (pretty_goal ctxt "Goal" rule thms t)) + +fun as_prop (t as Const (@{const_name Trueprop}, _) $ _) = t + | as_prop t = HOLogic.mk_Trueprop t + +fun dest_prop (Const (@{const_name Trueprop}, _) $ t) = t + | dest_prop t = t + +fun dest_thm thm = dest_prop (Thm.concl_of thm) + +fun certify_prop ctxt t = Thm.cterm_of ctxt (as_prop t) + +fun try_provers ctxt rule [] thms t = replay_rule_error ctxt rule thms t + | try_provers ctxt rule ((name, prover) :: named_provers) thms t = + (case (trace ctxt (K ("Trying prover " ^ quote name)); try prover t) of + SOME thm => thm + | NONE => try_provers ctxt rule named_provers thms t) + +fun match ctxt pat t = + (Vartab.empty, Vartab.empty) + |> Pattern.first_order_match (Proof_Context.theory_of ctxt) (pat, t) + +fun gen_certify_inst sel cert ctxt thm t = + let + val inst = match ctxt (dest_thm thm) (dest_prop t) + fun cert_inst (ix, (a, b)) = ((ix, a), cert b) + in Vartab.fold (cons o cert_inst) (sel inst) [] end + +fun match_instantiateT ctxt t thm = + if Term.exists_type (Term.exists_subtype Term.is_TVar) (dest_thm thm) then + Thm.instantiate (gen_certify_inst fst (Thm.ctyp_of ctxt) ctxt thm t, []) thm + else thm + +fun match_instantiate ctxt t thm = + let val thm' = match_instantiateT ctxt t thm in + Thm.instantiate ([], gen_certify_inst snd (Thm.cterm_of ctxt) ctxt thm' t) thm' + end + +fun apply_rule ctxt t = + (case Z3_Replay_Rules.apply ctxt (certify_prop ctxt t) of + SOME thm => thm + | NONE => raise Fail "apply_rule") + +fun discharge _ [] thm = thm + | discharge i (rule :: rules) thm = discharge (i + Thm.nprems_of rule) rules (rule RSN (i, thm)) + +fun by_tac ctxt thms ns ts t tac = + Goal.prove ctxt [] (map as_prop ts) (as_prop t) + (fn {context, prems} => HEADGOAL (tac context prems)) + |> Drule.generalize ([], ns) + |> discharge 1 thms + +fun prove ctxt t tac = by_tac ctxt [] [] [] t (K o tac) + +fun prop_tac ctxt prems = + Method.insert_tac ctxt prems + THEN' SUBGOAL (fn (prop, i) => + if Term.size_of_term prop > 100 then SAT.satx_tac ctxt i + else (Classical.fast_tac ctxt ORELSE' Clasimp.force_tac ctxt) i) + +fun quant_tac ctxt = Blast.blast_tac ctxt + + +(* plug-ins *) + +type abs_context = int * term Termtab.table + +type 'a abstracter = term -> abs_context -> 'a * abs_context + +type th_lemma_method = Proof.context -> thm list -> term -> thm + +fun id_ord ((id1, _), (id2, _)) = int_ord (id1, id2) + +structure Plugins = Generic_Data +( + type T = + (int * (term abstracter -> term option abstracter)) list * + th_lemma_method Symtab.table + val empty = ([], Symtab.empty) + val extend = I + fun merge ((abss1, ths1), (abss2, ths2)) = ( + Ord_List.merge id_ord (abss1, abss2), + Symtab.merge (K true) (ths1, ths2)) +) + +fun add_arith_abstracter abs = Plugins.map (apfst (Ord_List.insert id_ord (serial (), abs))) +fun get_arith_abstracters ctxt = map snd (fst (Plugins.get (Context.Proof ctxt))) + +fun add_th_lemma_method method = Plugins.map (apsnd (Symtab.update_new method)) +fun get_th_lemma_method ctxt = snd (Plugins.get (Context.Proof ctxt)) + + +(* abstraction *) + +fun prove_abstract ctxt thms t tac f = + let + val ((prems, concl), (_, ts)) = f (1, Termtab.empty) + val ns = Termtab.fold (fn (_, v) => cons (fst (Term.dest_Free v))) ts [] + in + by_tac ctxt [] ns prems concl tac + |> match_instantiate ctxt t + |> discharge 1 thms + end + +fun prove_abstract' ctxt t tac f = + prove_abstract ctxt [] t tac (f #>> pair []) + +fun lookup_term (_, terms) t = Termtab.lookup terms t + +fun abstract_sub t f cx = + (case lookup_term cx t of + SOME v => (v, cx) + | NONE => f cx) + +fun mk_fresh_free t (i, terms) = + let val v = Free ("t" ^ string_of_int i, fastype_of t) + in (v, (i + 1, Termtab.update (t, v) terms)) end + +fun apply_abstracters _ [] _ cx = (NONE, cx) + | apply_abstracters abs (abstracter :: abstracters) t cx = + (case abstracter abs t cx of + (NONE, _) => apply_abstracters abs abstracters t cx + | x as (SOME _, _) => x) + +fun abstract_term (t as _ $ _) = abstract_sub t (mk_fresh_free t) + | abstract_term (t as Abs _) = abstract_sub t (mk_fresh_free t) + | abstract_term t = pair t + +fun abstract_bin abs f t t1 t2 = abstract_sub t (abs t1 ##>> abs t2 #>> f) + +fun abstract_ter abs f t t1 t2 t3 = + abstract_sub t (abs t1 ##>> abs t2 ##>> abs t3 #>> (Scan.triple1 #> f)) + +fun abstract_lit (@{const HOL.Not} $ t) = abstract_term t #>> HOLogic.mk_not + | abstract_lit t = abstract_term t + +fun abstract_not abs (t as @{const HOL.Not} $ t1) = + abstract_sub t (abs t1 #>> HOLogic.mk_not) + | abstract_not _ t = abstract_lit t + +fun abstract_conj (t as @{const HOL.conj} $ t1 $ t2) = + abstract_bin abstract_conj HOLogic.mk_conj t t1 t2 + | abstract_conj t = abstract_lit t + +fun abstract_disj (t as @{const HOL.disj} $ t1 $ t2) = + abstract_bin abstract_disj HOLogic.mk_disj t t1 t2 + | abstract_disj t = abstract_lit t + +fun abstract_prop (t as (c as @{const If (bool)}) $ t1 $ t2 $ t3) = + abstract_ter abstract_prop (fn (t1, t2, t3) => c $ t1 $ t2 $ t3) t t1 t2 t3 + | abstract_prop (t as @{const HOL.disj} $ t1 $ t2) = + abstract_bin abstract_prop HOLogic.mk_disj t t1 t2 + | abstract_prop (t as @{const HOL.conj} $ t1 $ t2) = + abstract_bin abstract_prop HOLogic.mk_conj t t1 t2 + | abstract_prop (t as @{const HOL.implies} $ t1 $ t2) = + abstract_bin abstract_prop HOLogic.mk_imp t t1 t2 + | abstract_prop (t as @{term "HOL.eq :: bool => _"} $ t1 $ t2) = + abstract_bin abstract_prop HOLogic.mk_eq t t1 t2 + | abstract_prop t = abstract_not abstract_prop t + +fun abstract_arith ctxt u = + let + fun abs (t as (c as Const _) $ Abs (s, T, t')) = + abstract_sub t (abs t' #>> (fn u' => c $ Abs (s, T, u'))) + | abs (t as (c as Const (@{const_name If}, _)) $ t1 $ t2 $ t3) = + abstract_ter abs (fn (t1, t2, t3) => c $ t1 $ t2 $ t3) t t1 t2 t3 + | abs (t as @{const HOL.Not} $ t1) = abstract_sub t (abs t1 #>> HOLogic.mk_not) + | abs (t as @{const HOL.disj} $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> HOLogic.mk_disj) + | abs (t as (c as Const (@{const_name uminus_class.uminus}, _)) $ t1) = + abstract_sub t (abs t1 #>> (fn u => c $ u)) + | abs (t as (c as Const (@{const_name plus_class.plus}, _)) $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) + | abs (t as (c as Const (@{const_name minus_class.minus}, _)) $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) + | abs (t as (c as Const (@{const_name times_class.times}, _)) $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) + | abs (t as (c as Const (@{const_name z3div}, _)) $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) + | abs (t as (c as Const (@{const_name z3mod}, _)) $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) + | abs (t as (c as Const (@{const_name HOL.eq}, _)) $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) + | abs (t as (c as Const (@{const_name ord_class.less}, _)) $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) + | abs (t as (c as Const (@{const_name ord_class.less_eq}, _)) $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) + | abs t = abstract_sub t (fn cx => + if can HOLogic.dest_number t then (t, cx) + else + (case apply_abstracters abs (get_arith_abstracters ctxt) t cx of + (SOME u, cx') => (u, cx') + | (NONE, _) => abstract_term t cx)) + in abs u end + + +(* truth axiom *) + +fun true_axiom _ _ _ = @{thm TrueI} + + +(* modus ponens *) + +fun mp _ [p, p_eq_q] _ = discharge 1 [p_eq_q, p] iffD1 + | mp ctxt thms t = replay_rule_error ctxt Z3_Proof.Modus_Ponens thms t + +val mp_oeq = mp + + +(* reflexivity *) + +fun refl ctxt _ t = match_instantiate ctxt t @{thm refl} + + +(* symmetry *) + +fun symm _ [thm] _ = thm RS @{thm sym} + | symm ctxt thms t = replay_rule_error ctxt Z3_Proof.Reflexivity thms t + + +(* transitivity *) + +fun trans _ [thm1, thm2] _ = thm1 RSN (1, thm2 RSN (2, @{thm trans})) + | trans ctxt thms t = replay_rule_error ctxt Z3_Proof.Transitivity thms t + + +(* congruence *) + +fun ctac ctxt prems i st = st |> ( + resolve_tac ctxt (@{thm refl} :: prems) i + ORELSE (cong_tac ctxt i THEN ctac ctxt prems (i + 1) THEN ctac ctxt prems i)) + +fun cong_basic ctxt thms t = + let val st = Thm.trivial (certify_prop ctxt t) + in + (case Seq.pull (ctac ctxt thms 1 st) of + SOME (thm, _) => thm + | NONE => raise THM ("cong", 0, thms @ [st])) + end + +val cong_dest_rules = @{lemma + "(~ P | Q) & (P | ~ Q) ==> P = Q" + "(P | ~ Q) & (~ P | Q) ==> P = Q" + by fast+} + +fun cong_full ctxt thms t = prove ctxt t (fn ctxt' => + Method.insert_tac ctxt thms + THEN' (Classical.fast_tac ctxt' + ORELSE' dresolve_tac ctxt cong_dest_rules + THEN' Classical.fast_tac ctxt')) + +fun cong ctxt thms = try_provers ctxt Z3_Proof.Monotonicity [ + ("basic", cong_basic ctxt thms), + ("full", cong_full ctxt thms)] thms + + +(* quantifier introduction *) + +val quant_intro_rules = @{lemma + "(!!x. P x = Q x) ==> (ALL x. P x) = (ALL x. Q x)" + "(!!x. P x = Q x) ==> (EX x. P x) = (EX x. Q x)" + "(!!x. (~ P x) = Q x) ==> (~ (EX x. P x)) = (ALL x. Q x)" + "(!!x. (~ P x) = Q x) ==> (~ (ALL x. P x)) = (EX x. Q x)" + by fast+} + +fun quant_intro ctxt [thm] t = + prove ctxt t (K (REPEAT_ALL_NEW (resolve_tac ctxt (thm :: quant_intro_rules)))) + | quant_intro ctxt thms t = replay_rule_error ctxt Z3_Proof.Quant_Intro thms t + + +(* distributivity of conjunctions and disjunctions *) + +(* TODO: there are no tests with this proof rule *) +fun distrib ctxt _ t = + prove_abstract' ctxt t prop_tac (abstract_prop (dest_prop t)) + + +(* elimination of conjunctions *) + +fun and_elim ctxt [thm] t = + prove_abstract ctxt [thm] t prop_tac ( + abstract_lit (dest_prop t) ##>> + abstract_conj (dest_thm thm) #>> + apfst single o swap) + | and_elim ctxt thms t = replay_rule_error ctxt Z3_Proof.And_Elim thms t + + +(* elimination of negated disjunctions *) + +fun not_or_elim ctxt [thm] t = + prove_abstract ctxt [thm] t prop_tac ( + abstract_lit (dest_prop t) ##>> + abstract_not abstract_disj (dest_thm thm) #>> + apfst single o swap) + | not_or_elim ctxt thms t = + replay_rule_error ctxt Z3_Proof.Not_Or_Elim thms t + + +(* rewriting *) + +local + +fun dest_all (Const (@{const_name HOL.All}, _) $ Abs (_, T, t)) nctxt = + let + val (n, nctxt') = Name.variant "" nctxt + val f = Free (n, T) + val t' = Term.subst_bound (f, t) + in dest_all t' nctxt' |>> cons f end + | dest_all t _ = ([], t) + +fun dest_alls t = + let + val nctxt = Name.make_context (Term.add_free_names t []) + val (lhs, rhs) = HOLogic.dest_eq (dest_prop t) + val (ls, lhs') = dest_all lhs nctxt + val (rs, rhs') = dest_all rhs nctxt + in + if eq_list (op aconv) (ls, rs) then SOME (ls, (HOLogic.mk_eq (lhs', rhs'))) + else NONE + end + +fun forall_intr ctxt t thm = + let val ct = Thm.cterm_of ctxt t + in Thm.forall_intr ct thm COMP_INCR @{thm iff_allI} end + +in + +fun focus_eq f ctxt t = + (case dest_alls t of + NONE => f ctxt t + | SOME (vs, t') => fold (forall_intr ctxt) vs (f ctxt t')) + +end + +fun abstract_eq f (Const (@{const_name HOL.eq}, _) $ t1 $ t2) = + f t1 ##>> f t2 #>> HOLogic.mk_eq + | abstract_eq _ t = abstract_term t + +fun prove_prop_rewrite ctxt t = + prove_abstract' ctxt t prop_tac ( + abstract_eq abstract_prop (dest_prop t)) + +fun arith_rewrite_tac ctxt _ = + TRY o Simplifier.simp_tac ctxt + THEN_ALL_NEW (Arith_Data.arith_tac ctxt ORELSE' Clasimp.force_tac ctxt) + +fun prove_arith_rewrite ctxt t = + prove_abstract' ctxt t arith_rewrite_tac ( + abstract_eq (abstract_arith ctxt) (dest_prop t)) + +val lift_ite_thm = @{thm HOL.if_distrib} RS @{thm eq_reflection} + +fun ternary_conv cv = Conv.combination_conv (Conv.binop_conv cv) cv + +fun if_context_conv ctxt ct = + (case Thm.term_of ct of + Const (@{const_name HOL.If}, _) $ _ $ _ $ _ => + ternary_conv (if_context_conv ctxt) + | _ $ (Const (@{const_name HOL.If}, _) $ _ $ _ $ _) => + Conv.rewr_conv lift_ite_thm then_conv ternary_conv (if_context_conv ctxt) + | _ => Conv.sub_conv (Conv.top_sweep_conv if_context_conv) ctxt) ct + +fun lift_ite_rewrite ctxt t = + prove ctxt t (fn ctxt => + CONVERSION (HOLogic.Trueprop_conv (Conv.binop_conv (if_context_conv ctxt))) + THEN' resolve_tac ctxt @{thms refl}) + +fun prove_conj_disj_perm ctxt t = prove ctxt t Conj_Disj_Perm.conj_disj_perm_tac + +fun rewrite ctxt _ = try_provers ctxt Z3_Proof.Rewrite [ + ("rules", apply_rule ctxt), + ("conj_disj_perm", prove_conj_disj_perm ctxt), + ("prop_rewrite", prove_prop_rewrite ctxt), + ("arith_rewrite", focus_eq prove_arith_rewrite ctxt), + ("if_rewrite", lift_ite_rewrite ctxt)] [] + +fun rewrite_star ctxt = rewrite ctxt + + +(* pulling quantifiers *) + +fun pull_quant ctxt _ t = prove ctxt t quant_tac + + +(* pushing quantifiers *) + +fun push_quant _ _ _ = raise Fail "unsupported" (* FIXME *) + + +(* elimination of unused bound variables *) + +val elim_all = @{lemma "P = Q ==> (ALL x. P) = Q" by fast} +val elim_ex = @{lemma "P = Q ==> (EX x. P) = Q" by fast} + +fun elim_unused_tac ctxt i st = ( + match_tac ctxt [@{thm refl}] + ORELSE' (match_tac ctxt [elim_all, elim_ex] THEN' elim_unused_tac ctxt) + ORELSE' ( + match_tac ctxt [@{thm iff_allI}, @{thm iff_exI}] + THEN' elim_unused_tac ctxt)) i st + +fun elim_unused ctxt _ t = prove ctxt t elim_unused_tac + + +(* destructive equality resolution *) + +fun dest_eq_res _ _ _ = raise Fail "dest_eq_res" (* FIXME *) + + +(* quantifier instantiation *) + +val quant_inst_rule = @{lemma "~P x | Q ==> ~(ALL x. P x) | Q" by fast} + +fun quant_inst ctxt _ t = prove ctxt t (fn _ => + REPEAT_ALL_NEW (resolve_tac ctxt [quant_inst_rule]) + THEN' resolve_tac ctxt @{thms excluded_middle}) + + +(* propositional lemma *) + +exception LEMMA of unit + +val intro_hyp_rule1 = @{lemma "(~P ==> Q) ==> P | Q" by fast} +val intro_hyp_rule2 = @{lemma "(P ==> Q) ==> ~P | Q" by fast} + +fun norm_lemma thm = + (thm COMP_INCR intro_hyp_rule1) + handle THM _ => thm COMP_INCR intro_hyp_rule2 + +fun negated_prop (@{const HOL.Not} $ t) = HOLogic.mk_Trueprop t + | negated_prop t = HOLogic.mk_Trueprop (HOLogic.mk_not t) + +fun intro_hyps tab (t as @{const HOL.disj} $ t1 $ t2) cx = + lookup_intro_hyps tab t (fold (intro_hyps tab) [t1, t2]) cx + | intro_hyps tab t cx = + lookup_intro_hyps tab t (fn _ => raise LEMMA ()) cx + +and lookup_intro_hyps tab t f (cx as (thm, terms)) = + (case Termtab.lookup tab (negated_prop t) of + NONE => f cx + | SOME hyp => (norm_lemma (Thm.implies_intr hyp thm), t :: terms)) + +fun lemma ctxt (thms as [thm]) t = + (let + val tab = Termtab.make (map (`Thm.term_of) (Thm.chyps_of thm)) + val (thm', terms) = intro_hyps tab (dest_prop t) (thm, []) + in + prove_abstract ctxt [thm'] t prop_tac ( + fold (snd oo abstract_lit) terms #> + abstract_disj (dest_thm thm') #>> single ##>> + abstract_disj (dest_prop t)) + end + handle LEMMA () => replay_error ctxt "Bad proof state" Z3_Proof.Lemma thms t) + | lemma ctxt thms t = replay_rule_error ctxt Z3_Proof.Lemma thms t + + +(* unit resolution *) + +fun abstract_unit (t as (@{const HOL.Not} $ (@{const HOL.disj} $ t1 $ t2))) = + abstract_sub t (abstract_unit t1 ##>> abstract_unit t2 #>> + HOLogic.mk_not o HOLogic.mk_disj) + | abstract_unit (t as (@{const HOL.disj} $ t1 $ t2)) = + abstract_sub t (abstract_unit t1 ##>> abstract_unit t2 #>> + HOLogic.mk_disj) + | abstract_unit t = abstract_lit t + +fun unit_res ctxt thms t = + prove_abstract ctxt thms t prop_tac ( + fold_map (abstract_unit o dest_thm) thms ##>> + abstract_unit (dest_prop t) #>> + (fn (prems, concl) => (prems, concl))) + + +(* iff-true *) + +val iff_true_rule = @{lemma "P ==> P = True" by fast} + +fun iff_true _ [thm] _ = thm RS iff_true_rule + | iff_true ctxt thms t = replay_rule_error ctxt Z3_Proof.Iff_True thms t + + +(* iff-false *) + +val iff_false_rule = @{lemma "~P ==> P = False" by fast} + +fun iff_false _ [thm] _ = thm RS iff_false_rule + | iff_false ctxt thms t = replay_rule_error ctxt Z3_Proof.Iff_False thms t + + +(* commutativity *) + +fun comm ctxt _ t = match_instantiate ctxt t @{thm eq_commute} + + +(* definitional axioms *) + +fun def_axiom_disj ctxt t = + (case dest_prop t of + @{const HOL.disj} $ u1 $ u2 => + prove_abstract' ctxt t prop_tac ( + abstract_prop u2 ##>> abstract_prop u1 #>> HOLogic.mk_disj o swap) + | u => prove_abstract' ctxt t prop_tac (abstract_prop u)) + +fun def_axiom ctxt _ = try_provers ctxt Z3_Proof.Def_Axiom [ + ("rules", apply_rule ctxt), + ("disj", def_axiom_disj ctxt)] [] + + +(* application of definitions *) + +fun apply_def _ [thm] _ = thm (* TODO: cover also the missing cases *) + | apply_def ctxt thms t = replay_rule_error ctxt Z3_Proof.Apply_Def thms t + + +(* iff-oeq *) + +fun iff_oeq _ _ _ = raise Fail "iff_oeq" (* FIXME *) + + +(* negation normal form *) + +fun nnf_prop ctxt thms t = + prove_abstract ctxt thms t prop_tac ( + fold_map (abstract_prop o dest_thm) thms ##>> + abstract_prop (dest_prop t)) + +fun nnf ctxt rule thms = try_provers ctxt rule [ + ("prop", nnf_prop ctxt thms), + ("quant", quant_intro ctxt [hd thms])] thms + +fun nnf_pos ctxt = nnf ctxt Z3_Proof.Nnf_Pos +fun nnf_neg ctxt = nnf ctxt Z3_Proof.Nnf_Neg + + +(* theory lemmas *) + +fun arith_th_lemma_tac ctxt prems = + Method.insert_tac ctxt prems + THEN' SELECT_GOAL (Local_Defs.unfold0_tac ctxt @{thms z3div_def z3mod_def}) + THEN' Arith_Data.arith_tac ctxt + +fun arith_th_lemma ctxt thms t = + prove_abstract ctxt thms t arith_th_lemma_tac ( + fold_map (abstract_arith ctxt o dest_thm) thms ##>> + abstract_arith ctxt (dest_prop t)) + +val _ = Theory.setup (Context.theory_map (add_th_lemma_method ("arith", arith_th_lemma))) + +fun th_lemma name ctxt thms = + (case Symtab.lookup (get_th_lemma_method ctxt) name of + SOME method => method ctxt thms + | NONE => replay_error ctxt "Bad theory" (Z3_Proof.Th_Lemma name) thms) + + +(* mapping of rules to methods *) + +fun unsupported rule ctxt = replay_error ctxt "Unsupported" rule +fun assumed rule ctxt = replay_error ctxt "Assumed" rule + +fun choose Z3_Proof.True_Axiom = true_axiom + | choose (r as Z3_Proof.Asserted) = assumed r + | choose (r as Z3_Proof.Goal) = assumed r + | choose Z3_Proof.Modus_Ponens = mp + | choose Z3_Proof.Reflexivity = refl + | choose Z3_Proof.Symmetry = symm + | choose Z3_Proof.Transitivity = trans + | choose (r as Z3_Proof.Transitivity_Star) = unsupported r + | choose Z3_Proof.Monotonicity = cong + | choose Z3_Proof.Quant_Intro = quant_intro + | choose Z3_Proof.Distributivity = distrib + | choose Z3_Proof.And_Elim = and_elim + | choose Z3_Proof.Not_Or_Elim = not_or_elim + | choose Z3_Proof.Rewrite = rewrite + | choose Z3_Proof.Rewrite_Star = rewrite_star + | choose Z3_Proof.Pull_Quant = pull_quant + | choose (r as Z3_Proof.Pull_Quant_Star) = unsupported r + | choose Z3_Proof.Push_Quant = push_quant + | choose Z3_Proof.Elim_Unused_Vars = elim_unused + | choose Z3_Proof.Dest_Eq_Res = dest_eq_res + | choose Z3_Proof.Quant_Inst = quant_inst + | choose (r as Z3_Proof.Hypothesis) = assumed r + | choose Z3_Proof.Lemma = lemma + | choose Z3_Proof.Unit_Resolution = unit_res + | choose Z3_Proof.Iff_True = iff_true + | choose Z3_Proof.Iff_False = iff_false + | choose Z3_Proof.Commutativity = comm + | choose Z3_Proof.Def_Axiom = def_axiom + | choose (r as Z3_Proof.Intro_Def) = assumed r + | choose Z3_Proof.Apply_Def = apply_def + | choose Z3_Proof.Iff_Oeq = iff_oeq + | choose Z3_Proof.Nnf_Pos = nnf_pos + | choose Z3_Proof.Nnf_Neg = nnf_neg + | choose (r as Z3_Proof.Nnf_Star) = unsupported r + | choose (r as Z3_Proof.Cnf_Star) = unsupported r + | choose (r as Z3_Proof.Skolemize) = assumed r + | choose Z3_Proof.Modus_Ponens_Oeq = mp_oeq + | choose (Z3_Proof.Th_Lemma name) = th_lemma name + +fun with_tracing rule method ctxt thms t = + let val _ = trace_goal ctxt rule thms t + in method ctxt thms t end + +fun method_for rule = with_tracing rule (choose rule) + +end; diff --git a/src/main/SMT/z3_replay_rules.ML b/src/main/SMT/z3_replay_rules.ML new file mode 100644 index 0000000..966100b --- /dev/null +++ b/src/main/SMT/z3_replay_rules.ML @@ -0,0 +1,54 @@ +(* Title: HOL/Tools/SMT/z3_replay_rules.ML + Author: Sascha Boehme, TU Muenchen + +Custom rules for Z3 proof replay. +*) + +signature Z3_REPLAY_RULES = +sig + val apply: Proof.context -> cterm -> thm option +end; + +structure Z3_Replay_Rules: Z3_REPLAY_RULES = +struct + +structure Data = Generic_Data +( + type T = thm Net.net + val empty = Net.empty + val extend = I + val merge = Net.merge Thm.eq_thm +) + +fun maybe_instantiate ct thm = + try Thm.first_order_match (Thm.cprop_of thm, ct) + |> Option.map (fn inst => Thm.instantiate inst thm) + +fun apply ctxt ct = + let + val net = Data.get (Context.Proof ctxt) + val xthms = Net.match_term net (Thm.term_of ct) + + fun select ct = map_filter (maybe_instantiate ct) xthms + fun select' ct = + let val thm = Thm.trivial ct + in map_filter (try (fn rule => rule COMP thm)) xthms end + + in try hd (case select ct of [] => select' ct | xthms' => xthms') end + +val prep = `Thm.prop_of + +fun ins thm net = Net.insert_term Thm.eq_thm (prep thm) net handle Net.INSERT => net +fun del thm net = Net.delete_term Thm.eq_thm (prep thm) net handle Net.DELETE => net + +val add = Thm.declaration_attribute (Data.map o ins) +val del = Thm.declaration_attribute (Data.map o del) + +val name = Binding.name "z3_rule" + +val description = "declaration of Z3 proof rules" + +val _ = Theory.setup (Attrib.setup name (Attrib.add_del add del) description #> + Global_Theory.add_thms_dynamic (name, Net.content o Data.get)) + +end; diff --git a/src/main/SMT/z3_replay_util.ML b/src/main/SMT/z3_replay_util.ML new file mode 100644 index 0000000..34419ec --- /dev/null +++ b/src/main/SMT/z3_replay_util.ML @@ -0,0 +1,155 @@ +(* Title: HOL/Tools/SMT/z3_replay_util.ML + Author: Sascha Boehme, TU Muenchen + +Helper functions required for Z3 proof replay. +*) + +signature Z3_REPLAY_UTIL = +sig + (*theorem nets*) + val thm_net_of: ('a -> thm) -> 'a list -> 'a Net.net + val net_instances: (int * thm) Net.net -> cterm -> (int * thm) list + + (*proof combinators*) + val under_assumption: (thm -> thm) -> cterm -> thm + val discharge: thm -> thm -> thm + + (*a faster COMP*) + type compose_data = cterm list * (cterm -> cterm list) * thm + val precompose: (cterm -> cterm list) -> thm -> compose_data + val precompose2: (cterm -> cterm * cterm) -> thm -> compose_data + val compose: compose_data -> thm -> thm + + (*simpset*) + val add_simproc: Simplifier.simproc -> Context.generic -> Context.generic + val make_simpset: Proof.context -> thm list -> simpset +end; + +structure Z3_Replay_Util: Z3_REPLAY_UTIL = +struct + +(* theorem nets *) + +fun thm_net_of f xthms = + let fun insert xthm = Net.insert_term (K false) (Thm.prop_of (f xthm), xthm) + in fold insert xthms Net.empty end + +fun maybe_instantiate ct thm = + try Thm.first_order_match (Thm.cprop_of thm, ct) + |> Option.map (fn inst => Thm.instantiate inst thm) + +local + fun instances_from_net match f net ct = + let + val lookup = if match then Net.match_term else Net.unify_term + val xthms = lookup net (Thm.term_of ct) + fun select ct = map_filter (f (maybe_instantiate ct)) xthms + fun select' ct = + let val thm = Thm.trivial ct + in map_filter (f (try (fn rule => rule COMP thm))) xthms end + in (case select ct of [] => select' ct | xthms' => xthms') end +in + +fun net_instances net = + instances_from_net false (fn f => fn (i, thm) => Option.map (pair i) (f thm)) + net + +end + + +(* proof combinators *) + +fun under_assumption f ct = + let val ct' = SMT_Util.mk_cprop ct in Thm.implies_intr ct' (f (Thm.assume ct')) end + +fun discharge p pq = Thm.implies_elim pq p + + +(* a faster COMP *) + +type compose_data = cterm list * (cterm -> cterm list) * thm + +fun list2 (x, y) = [x, y] + +fun precompose f rule : compose_data = (f (Thm.cprem_of rule 1), f, rule) +fun precompose2 f rule : compose_data = precompose (list2 o f) rule + +fun compose (cvs, f, rule) thm = + discharge thm + (Thm.instantiate ([], map (dest_Var o Thm.term_of) cvs ~~ f (Thm.cprop_of thm)) rule) + + +(* simpset *) + +local + val antisym_le1 = mk_meta_eq @{thm order_class.antisym_conv} + val antisym_le2 = mk_meta_eq @{thm linorder_class.antisym_conv2} + val antisym_less1 = mk_meta_eq @{thm linorder_class.antisym_conv1} + val antisym_less2 = mk_meta_eq @{thm linorder_class.antisym_conv3} + + fun eq_prop t thm = HOLogic.mk_Trueprop t aconv Thm.prop_of thm + fun dest_binop ((c as Const _) $ t $ u) = (c, t, u) + | dest_binop t = raise TERM ("dest_binop", [t]) + + fun prove_antisym_le ctxt ct = + let + val (le, r, s) = dest_binop (Thm.term_of ct) + val less = Const (@{const_name less}, Term.fastype_of le) + val prems = Simplifier.prems_of ctxt + in + (case find_first (eq_prop (le $ s $ r)) prems of + NONE => + find_first (eq_prop (HOLogic.mk_not (less $ r $ s))) prems + |> Option.map (fn thm => thm RS antisym_less1) + | SOME thm => SOME (thm RS antisym_le1)) + end + handle THM _ => NONE + + fun prove_antisym_less ctxt ct = + let + val (less, r, s) = dest_binop (HOLogic.dest_not (Thm.term_of ct)) + val le = Const (@{const_name less_eq}, Term.fastype_of less) + val prems = Simplifier.prems_of ctxt + in + (case find_first (eq_prop (le $ r $ s)) prems of + NONE => + find_first (eq_prop (HOLogic.mk_not (less $ s $ r))) prems + |> Option.map (fn thm => thm RS antisym_less2) + | SOME thm => SOME (thm RS antisym_le2)) + end + handle THM _ => NONE + + val basic_simpset = + simpset_of (put_simpset HOL_ss @{context} + addsimps @{thms field_simps times_divide_eq_right times_divide_eq_left arith_special + arith_simps rel_simps array_rules z3div_def z3mod_def NO_MATCH_def} + addsimprocs [@{simproc numeral_divmod}, + Simplifier.make_simproc @{context} "fast_int_arith" + {lhss = [@{term "(m::int) < n"}, @{term "(m::int) \ n"}, @{term "(m::int) = n"}], + proc = K Lin_Arith.simproc}, + Simplifier.make_simproc @{context} "antisym_le" + {lhss = [@{term "(x::'a::order) \ y"}], + proc = K prove_antisym_le}, + Simplifier.make_simproc @{context} "antisym_less" + {lhss = [@{term "\ (x::'a::linorder) < y"}], + proc = K prove_antisym_less}]) + + structure Simpset = Generic_Data + ( + type T = simpset + val empty = basic_simpset + val extend = I + val merge = Simplifier.merge_ss + ) +in + +fun add_simproc simproc context = + Simpset.map (simpset_map (Context.proof_of context) + (fn ctxt => ctxt addsimprocs [simproc])) context + +fun make_simpset ctxt rules = + simpset_of (put_simpset (Simpset.get (Context.Proof ctxt)) ctxt addsimps rules) + +end + +end; diff --git a/src/SMTBackend.thy b/src/main/SMTBackend.thy similarity index 100% rename from src/SMTBackend.thy rename to src/main/SMTBackend.thy diff --git a/src/SharedMemory.thy b/src/main/SharedMemory.thy similarity index 100% rename from src/SharedMemory.thy rename to src/main/SharedMemory.thy diff --git a/src/Term_Tactics.thy b/src/main/Term_Tactics.thy similarity index 100% rename from src/Term_Tactics.thy rename to src/main/Term_Tactics.thy diff --git a/src/TestEnv.ML b/src/main/TestEnv.ML similarity index 100% rename from src/TestEnv.ML rename to src/main/TestEnv.ML diff --git a/src/TestEnv.thy b/src/main/TestEnv.thy similarity index 100% rename from src/TestEnv.thy rename to src/main/TestEnv.thy diff --git a/src/main/TestGen.thy b/src/main/TestGen.thy new file mode 100644 index 0000000..8d6d18e --- /dev/null +++ b/src/main/TestGen.thy @@ -0,0 +1,1704 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * TestGen.thy --- the core of the HOL-TestGen System. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2010 ETH Zurich, Switzerland + * 2008-2015 Achim D. Brucker, Germany + * 2009-2017 Universite Paris-Sud, France + * 2015-2017 The University of Sheffield, UK + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* The TestGen Core*} + +theory TestGen +imports + Complex_Main + Term_Tactics (*ML*) + clocks (*ML*) + log (* ML *) + TestEnv (* ML *) + BackendUtils + RandomBackend + QuickCheckBackend + (*SMTBackend*) + + +keywords "mk_test_suite" :: "qed_global" + and "test_spec" :: "thy_goal" + and "gen_test_data" :: "thy_decl_block" (*"thy_script"*) (* really ? *) + and "print_testenv" :: diag + and "print_conc_tests" :: diag + and "print_abs_tests" :: diag + and "print_thyps" :: diag + and "print_upos" :: diag + and "set_pre_safe_tac" :: diag + and "set_pre_normalize_TNF_tac" :: diag + and "set_pre_minimize_TNF_tac" :: diag +begin + + +section{*General Framework for Unit-Test*} + + +definition PO :: "bool \ bool" + where "PO x = x" + +lemma PO_norm1 [simp]: "PO(PO A) = PO A" by(simp add: PO_def) +lemma PO_norm2 [simp]: "(\ PO A) = (PO(\ A))" by(simp add: PO_def) +lemma PO_norm3 [simp]: "(A \ PO B) = (PO(A \ B))" by(simp add: PO_def) +lemma PO_norm4 [simp]: "(PO A \ B) = (PO(A \ B))" by(simp add: PO_def) +lemma PO_norm5 [simp]: "(A \ PO B) = (PO(A \ B))" by(simp add: PO_def) +lemma PO_norm6 [simp]: "(PO A \ B) = (PO(A \ B))" by(simp add: PO_def) +lemma PO_norm7 [simp]: "(A \ (PO B)) = (PO(A \ B))" by(simp add: PO_def) +lemma PO_norm8 [simp]: "((PO A) \ B) = (PO(A \ B))" by(simp add: PO_def) +lemma PO_norm9 [simp]: "(A = (PO B)) = (PO(A = B))" by(simp add: PO_def) +lemma PO_norm10 [simp]: "((PO A) = B) = (PO(A = B))" by(simp add: PO_def) +lemma PO_norm11 [simp]: "(\x. (PO (A x))) = (PO(\x. A x))" by(simp add: PO_def) +lemma PO_norm12 [simp]: "(\x. (PO (A x))) = (PO(\x. A x))" by(simp add: PO_def) + +lemma PO_I : "(PO P) \ Q \ P \ Q" by(simp add: PO_def) (* internal use only *) +lemma PO_rev[elim!]: "\ PO False \ PO False \ False" by auto +lemma PO_grow : "PO(P \ Q) \ R \ (PO P) \ (Q \ R)" by(simp add: PO_def) (* internal use only *) + + +definition THYP :: "bool \ bool" + where "THYP x = x" + +lemma THYP_triv [simp]: "THYP True = True" by(simp add: THYP_def) + +lemma THYP_I : "A \ THYP A" by(simp add: THYP_def) + +lemma THYP_D : "THYP A \ A" by(simp add: THYP_def) + +lemma THYP_E : "\ THYP A; (A \ R) \ \ R" by(simp add: THYP_def) + +lemma THYP_spec : "THYP(\x. P x) \ THYP(P x)" by(simp add: THYP_def) + +lemma THYP_app1 : "\ THYP(A \ B); A \ \ B" by(simp add: THYP_def) + (* to be used with etac in order to apply uniformity hyp *) + +lemma THYP_app1_rev : "\ A; THYP(A \ B) \ \ B" by(simp add: THYP_def) + (* to be used with etac in order to apply uniformity hyp *) + +lemma THYP_app2 : "\ THYP(A); A \ B \ \ B" by(simp add: THYP_def) + (* to be used with RS in order to apply regularity hyp *) + + +lemma THYP_E_reg : "\ size x \ k \ B; THYP((k < size x) \ B)\ \ B" + by(simp add: THYP_def, auto) + (* to be used with rtac in order to introduce regularity hyp *) + + +definition RSF :: "bool" + where "RSF = True" -- {* just to denote random solver failures *} + +lemma RSF_E : "\ RSF \ P \ \ P" + by(auto simp: RSF_def) + + +section {* Tool Setup *} + + +ML{* +infix 1 |$> + +infix 6 DOWNTO + +infix 0 COLLECT_SS + +signature TESTGEN = +sig + val mt_testenv : TestEnv.testenv + + val get_test_thm : theory -> Symtab.key -> thm list + + val trace : bool Config.T + (* switches on debug information *) + + val no_uniformity : bool Config.T + (* switches off uniformity generation *) + + val profiling : bool Config.T + (* switches on timer profiler *) + + val no_finalize : bool Config.T + (* don't generate constraints (POs) *) + + val is_neither_thyp_nor_PO : term -> bool + + val |$> : thm * tactic -> thm + val TRY' : (int -> tactic) -> int -> tactic + val COND' : (int -> thm -> bool) -> (int -> tactic) -> (int -> tactic) -> int -> tactic + val DOWNTO : int * int -> (int -> tactic) -> tactic + val DEPTH_SPLIT : (int -> tactic) -> int -> tactic + val REPEAT' : ('a -> tactic) -> 'a -> tactic + val ALLCASES : (int -> tactic) -> tactic + val to_TNF : Proof.context -> string list -> int -> thm -> thm Seq.seq + val uniformityI_tac : Proof.context -> string list -> int -> tactic + val CLOSURE : (int -> tactic) -> int -> thm -> thm Seq.seq + val case_tac_typ : Proof.context -> string list -> int -> thm -> thm Seq.seq + + val ONLY_POS : (int -> tactic) -> int -> tactic + + val finalize_tac : Proof.context -> string list -> tactic + + val abs_data_tac : Proof.context -> thm list -> int -> tactic + + val mp_fy : Proof.context -> int -> tactic + val all_ify : Proof.context -> string list -> int -> tactic + val thyp_ify : Proof.context -> int -> tactic + + val var_hyp_subst_tac : Proof.context -> int -> tactic + + val gen_test_case_tac : Proof.context -> string list -> tactic + (* "gen_test_case_tac ctxt prognames" is a + tactic that converts all subgoals of current proof state into + test-normal form - i.e. a conjunctive normal form + (instead of DNF), where the conclusions have the + form "P(progname x1 .. xn)", where progname does + not occur in the premisses (the "constraints") + of the test. As a prerequisite, a data-separation + theorem of depth k is resolved for up to >steps< number + of free variables in the current proof-state. + + NOTE: At present, the generation of data-separation + lemmas is limited to "real" free variables and will + not work for parameters of a test specification. Further, + the proof-state is assumed to have only one goal. + *) + + (* Included for complete backward compatibility *) + val gen_test_case_old_tac : Proof.context -> string list -> tactic + + val gen_test_data : string -> Context.generic -> Context.generic + (* "gen_test_data name context" constructs ground type instances of + test theorems associated to "name". In a further step, + "abstract" or local test data were resolved into the resulting test + theorems (if available in the testenv under "name"), remaining + meta variables were instantiated by a "random_solver" + that attempts to generate an instance and tries to solve + the constraints of a goal up to "iterations" times. + + The resulting simplified and "solved" test data theorems + were stored again in the test environment. + *) + + (* TODO: remove these three functions *) + val get_test_data : theory -> string -> thm list + (* get_test_data thy name extracts the test judgements + (i.e. the premisses of test data statement, that are not + test hypothesis or proof obligation.). *) + + val get_test_hyps : theory -> string -> thm list + (* get_test_hyps thy name extracts the test hypothesis + of a !!test data statement!!, not a test theorem. *) + + val get_pos : theory -> string -> thm list + (* extract remaining constraints (proof obligations) *) + + val animate : theory -> string -> unit + (* ground test data statements may be animated, if "P(progname x1 .. xn)" + is executable. "animate" presents test data statements by + unfolding "progname x1 .. xn" and simplifying on a stepwise + basis. (* for the moment, it's just asm_simp_tac with the + standard evaluation trace, in the future its more or less + the same except with slightly improved output *) + *) + val discharge_POs : string -> Proof.context -> tactic + + val setup: theory -> theory +end +*} + + +ML{* + +structure TestGen : TESTGEN = +struct + +open Thm; +open HOLogic; +open BNF_LFP_Compat ; +open Old_Datatype_Aux +(* open TestEnv; *) +(* open Clocks; *) +(* open BNF_LFP_Compat *) +open Int; +open List; +open Ctr_Sugar_Util ; (* rtac dtac etc. *) +open Rule_Insts; (* res_inst_tac etc *) + + + +val (trace, trace_setup) = Attrib.config_bool @{binding testgen_trace} (K false); +val (no_uniformity, no_uniformity_setup) = Attrib.config_bool @{binding no_uniformity} (K false); +val (profiling, profiling_setup) = Attrib.config_bool @{binding testgen_profiling} (K false); +val (no_finalize, no_finalize_setup) = Attrib.config_bool @{binding testgen_no_finalize} (K false); + + +(* ************************************************************************* *) +(* access to datatype package ... *) +(* ************************************************************************* *) + +fun isDataType thy (Type(tn,_)) = is_some (BNF_LFP_Compat.get_info thy [] tn) (* CHECK ME: empty preference list ok ? *) + |isDataType _ _ = false; + + +fun get_case_split_of_dt thy type_name = #exhaust (BNF_LFP_Compat.the_info thy [] type_name); + +fun get_nchotomy_of thy type_name = #nchotomy (BNF_LFP_Compat.the_info thy [] type_name); + + +(* ************************************************************************* *) +(* access to record package ... *) +(* ************************************************************************* *) + +fun isRecordType thy (Type(tn,_)) = is_some (Record.get_info thy tn) + |isRecordType _ _ = false; + +fun get_case_split_of_record thy type_name = #cases(the (Record.get_info thy type_name)); + + +(* ************************************************************************* *) +(* term computations ... *) +(* ************************************************************************* *) + +fun max_depth (Free h) = (0,[h]) + |max_depth (Const h)= (0,[]) + |max_depth (Abs(s,tt,t)) = max_depth t + |max_depth (s $ t) = let val (k,S) = max_depth (s) + val (k',S') = max_depth (t) + in if k > k'+1 then (k,S) + else if k = k'+1 then (k, S @ S') + else (k'+1, S') + end + |max_depth (_) = (0,[]); + +(* relies on definitions in Testing.thy ... *) +val is_thyp = exists_Const (fn (@{const_name THYP},_) => true | _ => false) + +fun is_po (Const (@{const_name Trueprop},_) $ (Const (@{const_name PO},_) $ _)) = true + |is_po _ = false; + +fun is_neither_thyp_nor_PO t = (not (is_thyp t)) andalso (not (is_po t)) + +fun is_test_data t = (is_neither_thyp_nor_PO t) andalso not (exists_subterm is_Var t) + +fun dest_TruepropAll (Const ("all", _) $ Abs(s,ty, t)) = + (Const (@{const_name "HOL.All"}, (ty --> HOLogic.boolT) --> HOLogic.boolT) + $ Abs(s,ty , dest_TruepropAll t)) + |dest_TruepropAll t = HOLogic.dest_Trueprop t; + + +fun exists_prog prog = Term.exists_subterm + (fn Free (x, _) => member (op =) prog x + | Const (c, _) => member (op =) prog (Long_Name.base_name c) (* FIXME proper internal name!? *) + | _ => false); + +(* complex type instantiations ... *) + +fun groundT_thm context bound candTs thm = + let val ctxt = Context.proof_of context + val thy = Proof_Context.theory_of ctxt + val thm = Thm.varifyT_global thm + val tvars = Term.add_tvars (list_comb(Thm.concl_of thm,Thm.prems_of thm)) [] + fun ctyp_of_sort x so t = if Sign.of_sort thy (t,so) + then (SOME((x,so), Thm.ctyp_of ctxt t) + handle _ => NONE) (* FIXME avoid handle _ *) + else NONE + fun prodS [] _ = [] + |prodS (a::R) B = (map(fn x => a::x) B) @ (prodS R B) + + fun compute_instsT [] = [[]] + |compute_instsT ((x,so)::R) = prodS (List.mapPartial (ctyp_of_sort x so) candTs) + (compute_instsT R) + in + map (fn x => Drule.instantiate_normalize (x,[]) thm) + (Library.take bound (compute_instsT tvars)) + end; + +(* test + + val thm' = groundT_thm thy + (#type_range_bound(rep_testenv te)) + (#type_candicates(rep_testenv te)) + thm + + *) + + + +(* *********************************************************************** *) +(* *) +(* SOME more universal tacticals -> tctical.ML *) +(* *) +(* *********************************************************************** *) + +fun thm |$> tac = + case Seq.pull (tac thm) of + SOME (m,_) => m + |_ => error"|$> : TACTIC FAILED" + +fun TRY' tac n = TRY(tac n) (* standard combinator, + not implemented so far in Isabelle *) + +fun REPEAT' tac n = REPEAT (tac n) + +fun COND' testfun thenf elsef x prf = + if testfun x prf then thenf x prf else elsef x prf; + + +fun (k DOWNTO (n:int)) = fn tac => (fn st => + let fun doall x = if x < n + then all_tac + else ((tac x) THEN doall(x-1)) + val m = Thm.nprems_of st + in if k <= 0 orelse k > m + then all_tac st + else doall k st + end) + + +fun DEPTH_SPLIT tac n st = +(* PRE tac should only exhausting - i.e. increase + the number of subgoals finitely many times + POST if tac never fails, DEPTH_SPLIT tac will not fail. + *) + let val m = Thm.nprems_of st + val res = st |$> (tac n) + val m' = Thm.nprems_of res + in if m' > m + then ((n + m' - m) DOWNTO n)(DEPTH_SPLIT tac) (res) + else all_tac res + end + +fun SOLVE_ASMS ctxt tac n state = + let val goal = Logic.get_goal (Thm.prop_of state) n + val params = Logic.strip_params goal + val prems = map HOLogic.dest_Trueprop (rev(Logic.strip_imp_prems goal)) + val eqTrueI2 = @{lemma "[|P;P |] ==> True" by auto} + fun decomp thm n = let val mm = (thm RS conjunct1) + in (dtac ctxt (mm RS eqTrueI2) n) THEN + (etac ctxt @{thm TrueE} n) THEN + (decomp (thm RS conjunct2) n) end + handle THM _ => (dtac ctxt (thm RS eqTrueI2) n) THEN + (etac ctxt @{thm TrueE} n) + + in case prems of + [] => all_tac state + |a::r => let val prem = List.foldl HOLogic.mk_conj a r + val prem'= (Term.subst_bounds ((rev(map Free params)),prem)) + val thm = Goal.prove ctxt [] [] (HOLogic.mk_Trueprop prem') tac + in (decomp thm n) state end + handle _ => no_tac state (* could not solve asms ... *) (* FIXME avoid handle _ *) + end + + +fun auto_solver thms {prems: thm list, context: Proof.context} state = + let val thm' = state |$> (TRY(safe_tac (context delrules [notI]))) + val goals = Thm.nprems_of thm' + fun solve_thm thm = FIRSTGOAL (fn n => (rtac context thm n) THEN + (IF_UNSOLVED (SOLVE(auto_tac context)))) + fun solve_thms [] = no_tac + |solve_thms (a::R) = solve_thm a ORELSE solve_thms R + in if goals > 0 then (solve_thms thms) thm' else all_tac thm' + end + + +(* *********************************************************************** *) +(* *) +(* SOME Testing specific tacticals *) +(* *) +(* *********************************************************************** *) + +(* PRE n>0 and n <= nprems_of thm *) +fun is_THYP n thm = is_thyp(Logic.nth_prem(n, Thm.prop_of thm)) + +fun is_PO n thm = is_po(Logic.nth_prem(n, Thm.prop_of thm)) + +fun is_TEST n thm = is_neither_thyp_nor_PO(Logic.nth_prem(n, Thm.prop_of thm)) + +fun ONLY_CASES tac = COND' is_TEST tac (K all_tac) + +fun ALLCASES_before k tac = (k DOWNTO 1) (ONLY_CASES tac) + +fun ALLCASES tac thm = (ALLCASES_before (Thm.nprems_of thm) tac thm) + +fun ONLY_POS tac = COND' is_PO tac (K all_tac) + + +fun unfold ctxt rules = asm_full_simp_tac (put_simpset HOL_basic_ss ctxt addsimps rules) + +(* Optimization potential: safe_tac against something more + specialized should do ... *) +fun if_splitter ctxt cs = asm_full_simp_tac (fold Splitter.add_split @{thms if_splits} (put_simpset HOL_basic_ss ctxt)) + THEN' TRY' (fn n => SELECT_GOAL (safe_tac cs) n) + +(* *********************************************************************** *) +(* *) +(* basic tactics ... *) +(* *) +(* *********************************************************************** *) + +fun strip_tac ctxt i = REPEAT (resolve_tac ctxt [impI, allI] i); (* declared Legacy in 2013-2*) + +(* Pre : A1 ==> .. ==> An ==> An+1 + Post: A1 --> .. --> An --> An+1 *) +fun mp_fy ctxt n = (REPEAT(etac ctxt rev_mp n)); (* Version replaced by more efficient one *) +fun mp_fy ctxt n = (REPEAT(ematch_tac ctxt [rev_mp] n)); + +fun all_ify ctxt excpt n state = + let val subgoal = Logic.nth_prem(n, Thm.prop_of state) + val params = Logic.strip_params subgoal + val frees = Term.add_frees subgoal [] + fun sbs(n,t) = res_inst_tac ctxt [((("x",0), Position.none), n)] [] spec + val vars = filter_out (member (op =) excpt o fst) (params @ frees) + in (if null vars + then all_tac + else (List.foldr(fn(x,ta) => (sbs x n) THEN ta) (prune_params_tac ctxt) vars) + (* NEW PATCH rev 12197*) + THEN + all_ify ctxt excpt n (* sbs can fail to eliminate *all* variables + due to alpha-renaming of parameters in + res_inst_tac. This recursive call makes + all_ify exhaustive.*) + (* END NEW PATCH rev 12197*) + ) + (state) + end + + +(*FIXME Free's are actually local consts, and cannot be closed in general + without disrupting Isar locality*) +fun allify_frees_tac ctxt excpt = SUBGOAL (fn (subgoal, i) => + let val thy = Proof_Context.theory_of ctxt + val a = (("'a", 0), @{sort type}) + fun spec_tac (x, T) = + rtac ctxt (Thm.instantiate ([(a, Thm.ctyp_of ctxt T)], [((("x", 0), T), Thm.cterm_of ctxt (Free (x, T)))]) + (Thm.transfer thy @{thm spec})) i + val vars = filter_out (member (op =) excpt o fst) (Term.add_frees subgoal []) + in + if null vars then all_tac + else EVERY (map spec_tac vars) + end) + +fun thyp_ify ctxt n = + EVERY[mp_fy ctxt n, all_ify ctxt [] n, rtac ctxt (@{thm THYP_def} RS @{thm iffD1}) n] + +fun vars_to_frees t = let + val termvars = Term.add_vars t [] + val freenames = Term.add_free_names t [] + val varnames = map (fn ((s, a),_) => s^(Int.toString a)) termvars + val newfreenames = Name.variant_list freenames varnames + val insts = map (fn (x as (_, T), freename) => (Var x, Free (freename, T))) (termvars ~~ newfreenames) +in + Term.subst_atomic insts t +end + +fun convert_goals_to_metahyps (test:(term -> bool)) thm = + let val thy = Thm.theory_of_thm thm + in map ((fn prop => Thm.forall_elim_vars 0 (Thm.assume (Thm.global_cterm_of thy prop))) o vars_to_frees) + (filter test (Thm.prems_of thm)) + end; + + +fun instantiate_tac ctxt insts n thm = + let val goal = Thm.global_cterm_of (Thm.theory_of_thm thm) (Logic.nth_prem(n, Thm.prop_of thm)) + in (rtac ctxt (Drule.instantiate_normalize ([],insts)(Thm.trivial goal)) n +) thm end + +fun var_hyp_subst_tac ctxt no state = + let val prms = Logic.strip_imp_prems(Logic.nth_prem(no, Thm.prop_of state)); + fun is_eq (Const (@{const_name HOL.eq}, _) $ (Var _) $ _) = true + |is_eq (Const (@{const_name HOL.eq}, _) $ _ $ (Var _)) = true + |is_eq _ = false + val i = Library.find_index(fn (Const(@{const_name Trueprop},_)$X) => is_eq X) + (prms) + fun mks thm = if i >= 0 + then ((rotate_tac i no) THEN etac ctxt @{thm thin_refl} no) thm + else no_tac thm + in ((REPEAT mks) + THEN + (full_simp_tac ctxt no)) state + end; + + +(* Code seems to be superseeded by Goals.PARALLEL_GOALS in Isabelle2013-2 ... *) +(* parallel tactic application, without any sanity checks *) + +local exception FAILED of unit in + +fun PARALLEL tac st = + let + fun try_tac g = + (case SINGLE tac g of + NONE => raise FAILED () + | SOME g' => g'); + val goals = Drule.strip_imp_prems (Thm.cprop_of st); + val results = Par_List.map (try_tac o Goal.init) goals; + in all_tac st (* ALLGOALS (fn i => retrofit i 1 (nth results (i - 1))) st *) + end + handle FAILED () => Seq.empty; + +end; + + + +(* A specialized map-reduce pattern for constraint-solving in TestGen. The + PO's were extracted, attempts to their solution parallized, and the retrofit + of the results into the test-theorem is again sequential, since it instantiates + the meta-variables. *) +fun PARALLEL_TRYSOLVE_POs ctxt test_solve_tac st = + let + val pon = Config.get ctxt TestEnv.pon + fun filter_pos filter S = + let fun h _ _ [] = [] + | h f n (a::S) = if f a then (n,a)::(h f (n+1) S) else (h f (n+1) S) + fun h_pon _ _ _ [] = [] + | h_pon f n k (a::S) = if f a then (if k = pon then (n,a):: nil + else (h_pon f (n+1) (k+1) S)) + else (h_pon f (n+1) k S) + in if pon = 0 then h filter 1 S else h_pon filter 1 1 S end; + fun try_solve ct = ((Option.map (Goal.finish ctxt)) o + (SINGLE (test_solve_tac ctxt))) + (Goal.init ct); + val goals = Drule.strip_imp_prems (Thm.cprop_of st); + val po_trms = (filter_pos (is_po o Thm.term_of) goals); + val jdmts = Par_List.map (fn(n,ct) => (n, try_solve ct)) po_trms + + in Seq.succeed(foldr (fn ((k, SOME a),thm) => (a RSN (k, thm)) + |((_, NONE),thm) => thm) st jdmts) + end + + (* val PARALLEL = PARALLEL_GOALS *) (* Experiment. See above. bu *) + + +(* applies exhaustively tactic tac ctxt i on subgoal i and all its descendants. + tac can solve current goal i. + PRE: tac must eventually terminate, also in all descendants ( otherwise CLOSURE loops). + PRE: current goal i must be in {1 .. nprems_of st}. + PRE: ctxt must be the ProofContext of st. +*) +fun CLOSURE tac i st = + let fun closure i k st = if i > k then all_tac st + else let val m = Thm.nprems_of st + in case SINGLE (tac i) st of + NONE => closure (i+1) k st + | SOME st' => let val m' = Thm.nprems_of st' + in if m' = m-1 (* tac eliminated subgoal *) + then closure i (k-1) st' + else closure i (k+(m'-m)) st' + (* superfluous case distinct *) + end + end + in closure i i st end + + + +(* *********************************************************************** *) +(* *) +(* special tactics for TestGen *) +(* *) +(* *********************************************************************** *) + +fun cpat ctxt str = Thm.cterm_of ctxt (Proof_Context.read_term_pattern ctxt str); + +(*PRE : Must not be THYP or PO *) +(* Known Bug: does not work for arbirary mixtures between HOL and Pure Quantifications *) +fun uniformityI_tac ctxt prog = + (if Config.get ctxt no_uniformity + then (fn _ => all_tac) + else SELECT_GOAL + let + val cert = Thm.cterm_of ctxt; + + val core_tac = + SUBGOAL (fn (term, i) => + let fun conv_All (Const (@{const_name All}, ty) $ Abs(n, ty', tt)) = + (Const (@{const_name Ex}, ty) $ Abs (n, ty', conv_All tt)) + |conv_All (Const (@{const_name HOL.implies}, _) $ P $ Q) = + (Const (@{const_name HOL.conj}, [boolT,boolT]--->boolT) $ P $ conv_All Q) + | conv_All tt = tt; + (* val tt = dest_TruepropAll term; *) + val tt = dest_Trueprop term; + val thyp = mk_Trueprop (@{const THYP} $ (@{const HOL.implies} $ conv_All tt $ tt)); + in rtac ctxt (Thm.instantiate ([], [((("psi",0),propT), cert thyp)]) cut_rl) i end); + + val x = cpat ctxt "?x::?'a"; + val T = Thm.typ_of_cterm x; + val count = Unsynchronized.ref 0; + fun new_var () = cert (Var (("?X" ^ Int.toString (Unsynchronized.inc count) ^ "X", 0), T)); + fun exI_tac i st = rtac ctxt (Thm.instantiate ([], [((("x",0),T), new_var ())]) @{thm exI}) i st; + fun check_tac' (t,i) = case t of + @{term Trueprop} $ t' => check_tac' (t',i) + | @{term HOL.conj} $ u $ _ => if exists_prog prog u then + no_tac + else all_tac + + val check_tac = SUBGOAL check_tac' + val intro_PO = (rtac ctxt @{thm PO_I} 1) + THEN (check_tac 1) + THEN REPEAT_DETERM ((rtac ctxt @{thm PO_grow} 1) + THEN (check_tac 1)) + + val finish_tac = (intro_PO THEN (rtac ctxt @{thm "conjI"} 1) THEN (strip_tac ctxt 2)) ORELSE (strip_tac ctxt 1) + in + EVERY [mp_fy ctxt 1, all_ify ctxt prog 1, + core_tac 1, etac ctxt @{thm THYP_app1} 1, + (REPEAT_DETERM o exI_tac) 1, + finish_tac] + end) + + +(* ************************************************************************* *) +(* data separation lemma generation ... *) +(* ************************************************************************* *) + + +fun refine_data_separation_lemma changed ctxt bal prem_no cands thm = +(* - bal controls of the refinement is in balancing mode + (only parameters whose occurrence in the term is + has strictly less than maximum height) + or not. + - prem_no indicates the number of the premise + - cands is the global list of candidates. + In balancing mode, parameters in maximal depth + position were eliminated. + - PRE maxidx is 0 for all mvars + - POST maxidx is 0 for all mvars + + *) +let + val thy = Proof_Context.theory_of ctxt + val prem = Logic.nth_prem(prem_no, Thm.prop_of thm) + val params = Logic.strip_params(prem) + val cands' = if bal then + let val term = HOLogic.dest_Trueprop(hd + (Logic.strip_assums_hyp prem)) + val (k,mp) = max_depth(Term.subst_bounds + ((rev(map Free params)),term)) + in filter(fn p => forall(fn p'=> not(p=p')) (mp)) cands end + else cands; + + val var_no = find_index (fn(s,t)=> exists(fn (name,ty)=> + s = name andalso t = ty) cands') + params + val (name,ty) = if var_no > ~1 then List.nth(params,var_no) + else raise (ERROR("no param found")) + val Type(tn,_) = ty; + val m = get_case_split_of_dt thy tn; + val n = m RSN (prem_no, thm); + (* pre: all occurrences of ?indname t1 ... tn have the same + arguments and types and occur in a critical pattern : + ?indname t1 ... tn = C x1 ... xn + ... + The Thm.adjust_maxidx_thm reflects the fact, that only one + MVar y should have been left - the others are erased due to + hypsubst internally + *) + fun find_mvar_pat indname goal_no thm = + let val goal = Logic.get_goal thm goal_no + val params = Logic.strip_params goal + fun cc(Const(@{const_name Pure.imp}, _) + $ (Const(@{const_name Trueprop}, _) + $ (Const(@{const_name HOL.eq},t) $ A $ B)) + $ C) = (case strip_comb(subst_bounds( + rev(map Free params), A)) of + (Var(s,_),S) =>(if s = indname + then (map dest_Free S, head_of B) + else cc(C)) + | (_, _) => cc C) + | cc _ = error("find_mvar_pat: No pattern found") + in cc (strip_all_body goal) end + fun lifter name S = + let fun ll k [] = if 0 <=k then Bound(k) + else error"find_mvar:impossible lifter" + |ll k ((s,t)::R) = + if s = name then Abs(s,t, ll 0 R) + else Abs(s,t, ll (k+1) R) + in ll (~(length S)) S end + fun typ_of_lifter (name:string) S = + let fun tll S [] = error"find_mvar:impossible type for lifter" + |tll S ((s,t)::R) = + if name = s + then ((rev S)@(t::(map snd R)), t) + else tll (t::S) R + in (op --->)(tll [] S) end + val (lifter_params, constr) = find_mvar_pat ("y",1) prem_no (Thm.prop_of n) + (* ("y",1) is a fact from the invariant maxidx=0 (see below) *) + val subst0 = lifter name lifter_params + val ty_lift = fastype_of subst0 + val ty_constr = fastype_of constr + val subst = Thm.cterm_of ctxt subst0 + val ty_lift_body = body_type ty_lift + val ty_lift_constr = body_type ty_constr + val tyenv = Sign.typ_match thy (ty_lift_constr,ty_lift_body) Vartab.empty + handle Type.TYPE_MATCH => Vartab.empty + (* Hack that works for cases, where non-recursive positions in a type + were GROUND types (like color in RBT_tree). *) + fun tysubs2ctyps(x,(s,t)) = ((x,s),Thm.ctyp_of ctxt t) (*(Thm.ctyp_of ctxt (TVar(x,s)),Thm.ctyp_of ctxt t) *) + val tsubst = map tysubs2ctyps (Vartab.dest tyenv) + val () = (changed:=true) + val var = (("y",1),ty_lift) + val result = (Drule.instantiate_normalize (tsubst, [(var,subst)]) n) + |$> (ALLGOALS(fn n => TRY(hyp_subst_tac_thin true ctxt n))) + |$> prune_params_tac ctxt +in Thm.adjust_maxidx_thm 0 result + (* establishes POST and makes this fact therefore invariant. *) + (* More safe, but also more expensive variant: standard result. *) +end + +(* This is the core procedure of the data_separation_lemma generation, and, + in a way, the core of the test case generator itself. + + It proceeds as follows: in a given theorem thm (usually an exhaustion lemma + itself), the premisses (having the form !!x1..xn. A x1 .. xn ==> B in + general) were selected and "refined" in a "sweep". This means that in a + sweep, for each premisse an xi is selected (which must have a data-type, + say ti) and the exhaustion theorem of ti is forward resolved into this + premisse; cleaning it up (by prems_hyp_subst_tac) also erases xi as + parameter of this premisse. + In a next sweep, another xj may be selected and so forth, up to the moment + were no parameters remained in the premisse that existed before the initial + sweep (this the list parameters is the "cand" (candidates) x1 .. xj for + all premisses which precomputed.) + + Then, the candidate list may be recomputed and the process repeated up to + n times. Since any group of sweeps erases one generation of parameters, + the parameter n corresponds to "depth+1" of the data term occuring in the + premisse increases, i.e. from: + + [| ?y = Leaf ==> ?P; + !!a tree1 tree2. ?y = Node a tree1 tree2 ==> ?P |] + ==> ?P + + we get: + + [| ?y = Leaf ==> ?P; !!a. ?y = Node a Leaf Leaf ==> ?P; + !!a aa tree1 tree2a. ?y = Node a Leaf (Node aa tree1 tree2a) ==> ?P; + !!a aa tree1a tree2a. ?y = Node a (Node aa tree1a tree2a) Leaf ==> ?P; + !!a aa tree1a tree2a ab tree1 tree2b. + ?y = Node a (Node aa tree1a tree2a) (Node ab tree1 tree2b) ==> ?P + |] ==> ?P + + This picture is slightly complicated by the fact that Isabelle does not + change the parameters, but may reintroduce parameter erased by a previous + sweep during the forward resolution step. Thus, the procedure may loop. + + Instead of not erasing the parameters during lemma-refinement (this + leads to very large liftings of the meta-variables over these + parameter lists and is thus expensive), we chose to use + a particular test for "maximal" parameters in order to avoid this problem. + + *) +fun nrefine_data_separation_lemma changed ctxt 0 thm = thm + | nrefine_data_separation_lemma changed ctxt n thm = + let + val thy = Proof_Context.theory_of ctxt + val isaDataType = fn(_,ty)=> isDataType thy ty + val thm_params = maps Logic.strip_params (Thm.prems_of thm) + val cands = distinct (op=) (filter isaDataType thm_params) + fun rds b(prno,t) = refine_data_separation_lemma changed ctxt b prno cands t + handle _ => t (* FIXME avoid handle _ *) + val () = (changed:=false) + fun sweep b th = List.foldr (rds b) th (1 upto (length(Thm.prems_of th))) + val thm1 = sweep false thm (* debalancing phase *) + fun repeat_till_stable thm = let val () = (changed:=false) + val thm' = sweep true thm + in if !changed + then repeat_till_stable thm' + else thm' + end + in nrefine_data_separation_lemma changed ctxt + (n-1) + (if !changed + then thm1 (* debalancing phase had no effect *) + else repeat_till_stable thm1) (* balancing phase *) + end; + +(* NOTE: Currently, this core routine is restricted to + "real free" variables; parameters will not be + considered as a simple means to avoid looping ... *) +fun data_separation_tac ctxt var no state = + let + val thy = Proof_Context.theory_of ctxt + val depth = Config.get ctxt TestEnv.depth + val (na,Type(tn,_)) = var + fun ds_thm tn = nrefine_data_separation_lemma (Unsynchronized.ref false) + ctxt depth + (get_case_split_of_dt thy tn) + + val data_sep_thm = Drule.export_without_context (ds_thm tn) + val brchs = length(Thm.prems_of data_sep_thm) + in ((res_inst_tac ctxt [((("y",0),Position.none),na)] [] data_sep_thm no) + THEN + (res_inst_tac ctxt [((("x",0),Position.none),na), + ((("k",0),Position.none),Int.toString depth)] + [] + @{thm THYP_E_reg} + (no + brchs - 1)) + THEN (thin_tac ctxt (na^"= _") [] (no+brchs))) state + end; + +(* Copied from Isabelle2011 old_term.ML sources, for providing old variable order *) +(*Accumulates the Frees in the term, suppressing duplicates.*) +fun add_term_frees (t, frees: term list) = case t of + Free _ => Ord_List.insert Term_Ord.term_ord t frees + | Abs (_,_,body) => add_term_frees(body,frees) + | f$t => add_term_frees (f, add_term_frees(t, frees)) + | _ => frees; + +fun term_frees t = add_term_frees(t,[]); + +(* Included for complete backward compatibility *) +fun data_separation_old_tac ctxt depth no state = + let + val thy = Proof_Context.theory_of ctxt + val prem = Logic.nth_prem(no, Thm.prop_of state) + val params = Logic.strip_params(prem) + val term = HOLogic.dest_Trueprop(Logic.strip_assums_concl prem) + val frees = map dest_Free (term_frees term) +(* val cands = filter (fn (_,ty) => isDataType thy ty) (params@frees) *) + val cands = filter (fn (_,ty) => isDataType thy ty) (frees) + fun ds_thm tn = + nrefine_data_separation_lemma (Unsynchronized.ref false) ctxt depth (get_case_split_of_dt thy tn) + in (case cands of + [] => no_tac + | (na,Type(tn,_))::_ => + let val data_sep_thm = Drule.export_without_context (ds_thm tn) + val brchs = length(Thm.prems_of data_sep_thm) + in (res_inst_tac ctxt [((("y",0),Position.none),na)] [] data_sep_thm no) + THEN (res_inst_tac ctxt [((("x",0),Position.none),na), + ((("k",0),Position.none),Int.toString depth)] [] + @{thm THYP_E_reg} + (no + brchs - 1)) + THEN (thin_tac ctxt (na^"= _") [] (no+brchs)) + end) + (state) +end + + +(* "Small", non-recursive Version of gen_test_cases: + (TODO for future releases: Integration into the testgen-Kernel). + searches parameter in goal no with typename tname and performs case-split on this + parameter. Note that the type-name must be the full name. + PRE: current goal i must be in {1 .. nprems_of st}. + PRE: ctxt must be the ProofContext of st. *) + (* op member : ('a * 'b -> bool) -> 'b list -> 'a -> bool *) + + fun case_tac_typ ctxt tnames no thm = + let + val prem = Logic.nth_prem(no, Thm.prop_of thm) + val params = Logic.strip_params(prem) + val max = Thm.nprems_of thm + val params = params (* @ free variables TODO !!! *) + fun ty_search (_,Type(s,_)) = member (op =) (tnames) (s) + |ty_search (_,_) = false + fun REP tac st = let val max' = Thm.nprems_of st + in (max'-max+no DOWNTO no) tac st end + in case List.find ty_search params of + SOME (X,_) => EVERY[Induct_Tacs.case_tac ctxt X [] NONE no , + REP (hyp_subst_tac_thin true ctxt), + unfold_tac ctxt [@{thm triv_forall_equality}] ] thm + (* probably not the most efficient - try simp only *) + | NONE => no_tac thm + end; + + + +(* ************************************************************************* *) +(* normal form computation ... *) +(* *************************** ********************************************** *) + +fun to_TNF ctxt prog no state = + let + val thy = Proof_Context.theory_of ctxt; + val term = Thm.prop_of state + val prems = Logic.prems_of_goal term no + val concl = Logic.concl_of_goal term no + fun term_string_of_goal_no () = (Pretty.unformatted_string_of o (Syntax.pretty_term ctxt)) + (Logic.list_implies(prems,concl)) + val prems' = filter (exists_prog prog) prems; + val concl' = filter (exists_prog prog) [concl]; + fun subst t = (Thm.cterm_of ctxt (Var(("P",0),HOLogic.boolT)), + Thm.cterm_of ctxt t); + (* swap_tac : tactic * term -> tactic; takes one term containing + "prog" and swap it into the conclusion as disjoint *) + val swap_accumulate = @{lemma "[| P ; ~P | Q |] ==> Q" by auto} + fun swap_tac (tm,tc) = tc THEN + (Term_Tactics.eres_terminst_tac + ctxt + [] + [subst (HOLogic.dest_Trueprop tm)] + swap_accumulate + no) + (* rev_tac swaps term "a" into conclusion *) + fun rev_tac thy a = Term_Tactics.eres_terminst_tac + ctxt + [] + [subst (HOLogic.dest_Trueprop a)] + @{thm rev_notE} + no + in ((case (prems',concl') of + ([],[]) => error ("Subgoal " ^Int.toString(no) + ^" can not be converted into TNF: \n \n" ^ + term_string_of_goal_no()) + (* case that conclusion does not contain "prog", but several + assumptions do: pick one of them, revert first *) + |(a::R,[]) =>(rev_tac thy a + THEN + (List.foldl swap_tac all_tac R)) + |(R,_) => (List.foldl swap_tac all_tac R) + ) + THEN + (simp_tac (put_simpset HOL_ss ctxt) no)) state + end; + + + +fun normalize_TNF ctxt prog top = + List.foldl (fn (arg,t) => t THEN to_TNF ctxt prog arg) + all_tac + ((length (prems_of top)) downto 1) + top + + + +(* TODO: adb *) +fun minimize_TNF ctxt = distinct_subgoals_tac +(* This should do: + - sorting assumptions in all subgoals + - sorting parameters of subgoals + - sortieren der clauses - + weniger asms zuerst (defer_tac) + - eliminieren von assumptions subsumed in other clauses + [| P;R |] ==> A; + [| P;Q;R |] ==> A + ------------------ + [| P;R |] ==> A; + [| P;R |] ==> A + durch thin_tac + - eliminieren von subsumed or's, + a particular weakness of our procedure so far. + [| P;R |] ==> A; + [| ~P;Q |] ==> B + [| R; Q|] ==> A|B; + ------------------ + [| P;R |] ==> A; + [| ~P;Q |] ==> B + - finally distinct_subgoals_tac +*) + + +(* orig version *) +(* +fun gen_test_case_tac thy clasimp depth steps prog = + EVERY[ALLGOALS (asm_full_simp_tac (snd clasimp)), + safe_tac ((global_claset_of thy) addss (global_simpset_of thy)), + normalize_TNF thy prog, + minimize_TNF, + ALLGOALS (fn n => TRY(uniformityI_tac prog n))]; +*) + + + +(* ************************************************************************* *) +(* test case generation ... *) +(* ************************************************************************* *) + +fun SORT ctxt rules n thm = let + val matches = maps (fn rule => rule ctxt n thm) rules + val sorted = map (fn (i,tac) => tac) + (sort (fn (x,y) => rev_order (int_ord (fst x, fst y))) matches) +in + (EVERY sorted) thm +end + + + +fun EXEC ctxt tac str = EVERY[if Config.get ctxt profiling + then Clocks.start_clock_tac str + else all_tac, + if Config.get ctxt trace + then print_tac ctxt ("\n:"^str^"\n") + else all_tac, + tac, + if Config.get ctxt profiling + then Clocks.stop_clock_tac str + else all_tac] + +fun EXEC' ctxt tac str n = EXEC ctxt (tac n) str + +fun PRINT_THM ctxt no = + (fn state => (if Config.get ctxt trace + then let val prem = Logic.nth_prem(no, prop_of state) + val str = (Pretty.string_of o (Syntax.pretty_term ctxt)) prem + in print_tac ctxt ("\n goal "^Int.toString(no)^"::"^str^"\n") end + else all_tac) state) + +fun finalize_tac ctxt prog = + EXEC ctxt (ALLCASES(TRY'(uniformityI_tac ctxt prog))) "main uniformity NF" + +fun gen_test_case_tac ctxt prog = let + val steps = Config.get ctxt TestEnv.steps + val testenv = TestEnv.get_testenv ctxt + val rules = TestEnv.get_test_derivation_rules testenv + + val begin_profiling_tac = if Config.get ctxt profiling + then Clocks.start_clock_tac "unnamed_test_thm" THEN Clocks.start_clock_tac "gen_test_cases" + else all_tac + + val end_profiling_tac = if Config.get ctxt profiling + then Clocks.stop_clock_tac "gen_test_cases" THEN Clocks.stop_clock_tac "unnamed_test_thm" + else all_tac + + fun pre_normalize_TNF_tac ctxt = TestEnv.get_pre_normalize_TNF_tac (TestEnv.get_testenv ctxt) ctxt + + val sort_tac = SELECT_GOAL (SORT ctxt rules 1) +in + EVERY[begin_profiling_tac, + EXEC ctxt (ALLCASES((asm_full_simp_tac ctxt))) "pre-simplification", + EXEC ctxt (REPEAT_DETERM_N steps (ALLCASES sort_tac)) "main completed", + EXEC ctxt (pre_normalize_TNF_tac ctxt) "pre_norm", + EXEC ctxt (normalize_TNF ctxt prog) "TNF", + if Config.get ctxt no_finalize + then all_tac + else finalize_tac ctxt prog, + end_profiling_tac] +end + +fun data_separation_rule ctxt no state = +let + val thy = Proof_Context.theory_of ctxt + val prem = Logic.nth_prem(no, prop_of state) + (*val params = Logic.strip_params(prem) *) + val term = HOLogic.dest_Trueprop(Logic.strip_assums_concl prem) + val frees = Term.add_frees term [] + (*val cands = filter (fn (_,ty) => isDataType thy ty) (params@frees) + : LOOPS AT PRESENT SINCE gen_test_cases active on frees *) + val cands = filter (fn (_,ty) => isDataType thy ty) (frees) + fun tac cand = TRY (CHANGED_PROP (ALLCASES (data_separation_tac ctxt cand)) + THEN (ALLCASES (TRY' (hyp_subst_tac_thin true ctxt)))) + THEN (if Config.get ctxt trace + then print_tac ctxt "After data separation" + else all_tac) +in + map (fn cand => (100, tac cand)) cands +end + +fun to_HCNF_rule ctxt no state = + [(50, EXEC ctxt (TestEnv.get_pre_safe_tac (TestEnv.get_testenv ctxt) ctxt) "Simp" + THEN EXEC ctxt (TRY (safe_tac ctxt)) "HCN")] + +fun minimize_rule ctxt no state = + [(10, EXEC ctxt (TestEnv.get_pre_minimize_TNF_tac (TestEnv.get_testenv ctxt) ctxt) "pre_minimize" + THEN EXEC ctxt (minimize_TNF ctxt) "MinimTNF" + THEN (ALLCASES(asm_full_simp_tac ctxt)) + THEN (TRY (safe_tac ctxt)))] + +(* Included for complete backward compatibility *) +fun gen_test_case_old_tac ctxt prog = + let val testenv = TestEnv.get_testenv ctxt + val depth = Config.get ctxt TestEnv.depth + val steps = Config.get ctxt TestEnv.steps + + val prep_HCNF = TestEnv.get_pre_safe_tac testenv + val to_HCNF = TRY (safe_tac ctxt) + val pre_normalize_TNF_tac = TestEnv.get_pre_normalize_TNF_tac testenv + val pre_minimize_TNF_tac = TestEnv.get_pre_minimize_TNF_tac testenv + fun trace_tac b str = (if Config.get ctxt trace + then print_tac ctxt ( "\n"^b^":"^str^"\n") + else all_tac) + fun exec phase b str = EVERY[if Config.get ctxt profiling + then Clocks.start_clock_tac str + else all_tac, + phase, trace_tac b str, + if Config.get ctxt profiling + then Clocks.stop_clock_tac str + else all_tac] + + fun standard k b = + EVERY[if Config.get ctxt trace then print_tac ctxt "Enter" else all_tac, + exec (prep_HCNF ctxt) b "Simp", + exec (to_HCNF) b "HCN", + exec (pre_normalize_TNF_tac ctxt) b "pre_norm", + exec (normalize_TNF ctxt prog) b "TNF", + exec (pre_minimize_TNF_tac ctxt) b "pre_minimize", + exec (minimize_TNF ctxt) b "MinimTNF"] + val dst = data_separation_old_tac ctxt depth + + fun pairself f (x, y) = (f x, f y); + + (*True if the two theorems have the same prop field, ignoring hyps, der, etc.*) + val eq_thm_prop = op aconv o pairself Thm.full_prop_of; + + fun main k st =(if k < steps (* not yet all frees per goal + less steps exhausted*) + then (ALLCASES(fn n => TRY(dst n) )) THEN + (* try exhaustion per goal *) + (COND(fn st' => not (eq_thm_prop(st, st'))) + (* something 's changed *) + ( (ALLCASES (fn n => TRY((hyp_subst_tac_thin true ctxt) n) ) ) (* REALLY ? *) + THEN (standard k "1") + THEN (main (k+1)) + ) + (standard k "2")) + else standard k "0" (* run out of steps *)) (st) + val begin_profiling_tac = if Config.get ctxt profiling then + Clocks.start_clock_tac "unnamed_test_thm" THEN Clocks.start_clock_tac "gen_test_cases" + else + all_tac + + val end_profiling_tac = if Config.get ctxt profiling then + Clocks.stop_clock_tac "gen_test_cases" THEN Clocks.stop_clock_tac "unnamed_test_thm" + else + all_tac + in EVERY[begin_profiling_tac, + exec (ALLCASES((asm_full_simp_tac ctxt))) "" "pre-simplification", + exec (main 0) "" "main completed", + exec (ALLCASES(TRY'(uniformityI_tac ctxt prog))) "" "main uniformity NF", + end_profiling_tac] + end; + + +(* ************************************************************************* *) +(* *) +(* testdata generation ... *) +(* *) +(* ************************************************************************* *) + + +fun abs_data_tac ctxt atdata = + let fun single_abs_data_tac thm = + Subgoal.FOCUS_PARAMS(fn {context, ...} => + SOLVE (Method.insert_tac context [thm] 1 THEN auto_tac context)) ctxt + in + FIRST' (map single_abs_data_tac atdata) + end + + +fun test_solve_tac ctxt atdata = + let val thy = Proof_Context.theory_of ctxt + val remove_po = EqSubst.eqsubst_tac ctxt [0] [@{thm PO_def}] + val total_iterations = Config.get ctxt TestEnv.iterations + val max_simple_iterations = 50 + val simple_iterations = Int.min(total_iterations, max_simple_iterations) + val further_iterations = Int.max(total_iterations - max_simple_iterations,0) + in + remove_po + THEN' PRINT_THM ctxt + THEN' (FIRST'[EXEC' ctxt (abs_data_tac ctxt atdata) "abs_data", + EXEC' ctxt (RandomBackend.random_inst_tac ctxt simple_iterations) "random_inst", + EXEC' ctxt (QuickCheckBackend.quickcheck_tac ctxt further_iterations) "quickcheck", + EXEC' ctxt (SMTBackend.testgen_smt_tac ctxt) "smt"]) + end + + + +fun is_solved n thm = + let fun is_unsolved_po i = not (null (inter (op =) (BackendUtils.premvars n thm) (BackendUtils.premvars i thm))) + andalso i <> n + in not (exists is_unsolved_po (1 upto (nprems_of thm))) + end + +(* Instantiate the remaining variables in test cases +with random terms *) + +fun finalize_test_tac ctxt n thm = let +in + (COND (is_solved n) (RandomBackend.single_rand_inst_tac ctxt (BackendUtils.premvars n thm)) all_tac) thm +end + + + +fun solve_all ctxt atdata state = + let (* adding a let expression.. in order to check if all free vars were instatiated by smt.. + if true then error code else solve_all*) + val PARALLEL_TRYSOLVE = PARALLEL_TRYSOLVE_POs ctxt (fn ctxt => test_solve_tac ctxt atdata 1) state + + val term = PARALLEL_TRYSOLVE |> Seq.list_of |> + (List.map prop_of) |> HOLogic.mk_list @{typ "term"} + val use_smt = Config.get ctxt TestEnv.use_smt; +in + if (use_smt andalso (Term.exists_subterm (Term.is_Var) term)) + then error("One or more free variables were not instantiated by the solver!") + + else state |$> PARALLEL_TRYSOLVE_POs ctxt (fn ctxt => test_solve_tac ctxt atdata 1) + |$> ALLCASES (finalize_test_tac ctxt) (* cannot parallelize here *) + +end + + + (* *********************************************************************** *) + (* Normalizer *) + (* *********************************************************************** *) + (* motivation: type instantiation may make + constraints of test-cases unsolvable. + Unsolvable cases should be removed before random-solving. + Since constraints with Mvars were not considered + by simptac and arith_tac, they were substituted + against arbitrary free vars aforehand. + +fun norm_tac ctxt n thm = + let val thy = Proof_Context.theory_of ctxt + val m = Thm.nprems_of thm + val prem = Logic.nth_prem(n, prop_of thm) + val k = length (Logic.strip_imp_prems(prem)) + val goal = Thm.trivial(cterm_of thy prem) + val tvars = OldTerm.term_vars prem + val insts = map(fn(x as Var((s,a),t))=> + (cterm_of thy x, + cterm_of thy (Free("XXX"^s^(Int.toString a),t)))) + (tvars) + val cleanup = List.foldr (fn (_,tac) => (atac (n)) THEN tac) + all_tac (1 upto k) + in ( + (rtac (instantiate ([],insts) goal) n) + THEN + (full_simp_tac (simpset_of ctxt) n) (* may erase goal n. + --> test case unsolvable. *) + THEN + (fn thm' => if (nprems_of thm' < m+k) (* goal erased *) + then cleanup thm' + else no_tac thm )(* report failure *) + )(thm) + end *) + +fun gen_test_data name context = + let val ctxt = Context.proof_of context + val te = TestEnv.get_testenv ctxt + val bound = Config.get ctxt TestEnv.type_range_bound + val candTs= #type_candicates(TestEnv.rep_testenv te) + val type_grounds = groundT_thm context bound candTs + val testthm = (the(Symtab.lookup(TestEnv.get_test_thm_tab te)name) + handle Option.Option => error("No test theorem: "^name^" available!")) + val atdata = (the(Symtab.lookup(TestEnv.get_absdata_tab te)name) + handle Option.Option => []) + val jdmts = map (solve_all ctxt atdata) (type_grounds testthm) + val te1 = TestEnv.test_thm_inst_tab_update (Symtab.update(name,jdmts)(TestEnv.get_test_thm_inst_tab te)) te + val prems = List.concat (map Thm.prems_of jdmts) + val data = map (Thm.cterm_of ctxt) (filter is_test_data prems) + val te2 = TestEnv.set_test_data_tab (name,data) te1 + val hyps = map (Thm.cterm_of ctxt) (filter is_thyp prems) + val te3 = TestEnv.set_test_thyps_tab (name,hyps) te2 + val pos = map (Thm.cterm_of ctxt) (filter is_po prems) + val te4 = TestEnv.set_unsolved_PO_tab (name,pos) te3 + + val t = LogThy.get_td_delta () + val _ = writeln (String.concat ["Test theorem (gen_test_data) '",name,"': ", + Int.toString (List.length data)," test cases in ", + Time.toString t, " seconds"]) + val _ = if not (null pos) + then writeln (String.concat ["Warning: There were ", Int.toString (List.length pos) , " unsolved POs."]) + else () + val _ = LogThy.append (String.concat [ + Context.theory_name (Proof_Context.theory_of ctxt), ", " ,name, ", ", + "test data, " ,Int.toString (List.length data), + ", " ,Time.toString t,"\n"]) + + in TestEnv.map_data (K te4) context end; + +fun select_goals pred thy name = let + val te = TestEnv.get_testenv_global thy +in + maps (convert_goals_to_metahyps pred) + (the(Symtab.lookup(TestEnv.get_test_thm_inst_tab te)name) + handle Option.Option => error("No data statement: "^name^" available!")) +end + +val get_test_data = select_goals is_test_data + +val get_test_hyps = select_goals is_thyp + +val get_pos = select_goals is_po + +fun get_test_thm thy name = let + val te = TestEnv.get_testenv_global thy +in + (the(Symtab.lookup(TestEnv.get_test_thm_inst_tab te)name) + handle Option.Option => error("No data statement: "^name^" available!")) +end + +fun discharge_POs name context = + let fun use_test_instances_tac name ctxt = + let val te = TestEnv.get_testenv ctxt + val atdata = (the(Symtab.lookup(TestEnv.get_absdata_tab te)name) + handle Option.Option => []) + in (TRY' ( EqSubst.eqsubst_tac ctxt [0] [@{thm PO_def}] + THEN' abs_data_tac ctxt atdata)) + end + in ALLGOALS(ONLY_POS (use_test_instances_tac name context)) end + +(* *********************************************************************** *) +(* *) +(* Initial test environment *) +(* *) +(* *********************************************************************** *) + +val _ = Context.>> (TestEnv.map_data + ((TestEnv.pre_safe_tac_update (fn ctxt => ALLCASES (asm_full_simp_tac ctxt))) + #> (TestEnv.add_test_derivation_rule data_separation_rule) + #> (TestEnv.add_test_derivation_rule to_HCNF_rule) + #> (TestEnv.add_test_derivation_rule minimize_rule))); + +val mt_testenv = TestEnv.get_testenv (Context.the_local_context ()); + +val _ = + Outer_Syntax.command @{command_keyword "print_testenv"} "print test environment" + (Parse.name >> (fn name => Toplevel.keep ((TestEnv.print_testenv name) o Toplevel.context_of))); + +val options = Scan.optional (Parse.$$$ "(" |-- Parse.!!! (Scan.option Parse.nat--| Parse.$$$ ")")) (NONE); + +val _ = let fun print name opts ctxt = writeln (TestEnv.print_test_data ctxt name opts) in + Outer_Syntax.command @{command_keyword "print_conc_tests"} "print concrete tests" + (options -- Parse.name >> (fn (opts, name) => + Toplevel.keep ((print name opts) o Toplevel.context_of))) + end; + +val _ = let fun print name opts ctxt = writeln (TestEnv.print_test_case ctxt name opts) in + Outer_Syntax.command @{command_keyword "print_abs_tests"} "print abstract tests" + (options -- Parse.name >> (fn (opts, name) => + Toplevel.keep ((print name opts) o Toplevel.context_of))) + end; + +val _ = let fun print name opts ctxt = writeln (TestEnv.print_test_hyps ctxt name opts) in + Outer_Syntax.command @{command_keyword "print_thyps"} "print test hypothesis" + (options -- Parse.name >> (fn (opts, name) => + Toplevel.keep ((print name opts) o Toplevel.context_of))) + end; + +val _ = let fun print name opts ctxt = writeln (TestEnv.print_unsolved_pos ctxt name opts) in + Outer_Syntax.command @{command_keyword "print_upos"} "print unsolved proof obligations" + (options -- Parse.name >> (fn (opts, name) => + Toplevel.keep ((print name opts) o Toplevel.context_of))) + end; + + +(* ************************************************************************* *) +(* micellaneous functions ... *) +(* ************************************************************************* *) + + +fun animate thy name = (* HACK - very simplistic implementation *) + let val ctxt = Proof_Context.init_global thy |> Config.put simp_trace true + fun animate_data_statement thm = + (Output.writeln ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"; + Pretty.writeln (Thm.pretty_thm ctxt thm); + Output.writeln "============================================"; + (* asm_full_simplify (global_simpset_of(theory_of_thm(thm))) thm; *) + asm_full_simplify ctxt thm; + Output.writeln "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"); + in + List.app animate_data_statement (get_test_data thy name) + end; + +(* module setup *) + +val setup = + trace_setup #> + no_uniformity_setup #> + profiling_setup #> + no_finalize_setup; + +end (*struct *); + +*} + +setup {* TestEnv.setup #> TestGen.setup *} +text{* A key concept of HOL/TestGen are \emph{test suites}. These can be seen as a kind of +container data-structure that relate to a test-specification a number of data, namely the +test theorem (the TNF), abstract and concrete test data, and specific configuration data. +Test-suites were created at the end of a test-specification statement and initial test-refinement. +Later on they were enriched and finally used to generate test drivers and test executions. + +So, a test suite with name "X" will be finally run at a test-execution executed outside HOL/TestGen +started by the shell-command "run_X". +*} + +ML{* + +fun mk_test_suite name = Toplevel.end_proof (fn _ => fn state => + let + val ctxt = Proof.context_of state; + val _ = if Config.get ctxt TestGen.profiling + then (Clocks.start_clock "unnamed_test_thm"; + Clocks.rename_clock "unnamed_test_thm" name; + Clocks.stop_clock name) + else () + + val _ = Proof.assert_backward state; + val {context = goal_ctxt, goal, ...} = Proof.goal state + fun up_thy thy = + let + fun add_to_dm name thm thy = + let + val thy = TestEnv.map_data_global (TestEnv.add_test_case (name,thm)) thy + val t = LogThy.get_tc_delta () + val num_of_tc = List.length + (List.filter (fn tc => TestGen.is_neither_thyp_nor_PO tc) (Thm.prems_of thm)) + val _ = writeln (String.concat ["Test theorem (gen_test_cases) '", + name,"': ", Int.toString num_of_tc, + " test cases in ",Time.toString t, + " seconds"]) + + val _ = LogThy.append (String.concat + [Context.theory_name thy, ", ", + name, ", " ,"test case, " ,Int.toString num_of_tc, + ", ", Time.toString t,"\n"]) + in + thy + end + in + thy |> Sign.add_path (space_implode "_" [name]) + |> (Global_Theory.add_thms [((@{binding test_thm}, goal), [])]) + |> snd + |> Sign.parent_path + |> add_to_dm (space_implode "_" [name]) goal + |> Clocks.flush_clocks + end + + in goal_ctxt |> Local_Theory.background_theory up_thy end); + + +val _ = Outer_Syntax.command + @{command_keyword mk_test_suite} + "store test state (theorem)" + (Parse.name >> mk_test_suite); + + + +(**********************) +fun gen_test_dataT name thy = + let val _ = LogThy.start_td_timer () + val _ = if Config.get_global thy TestGen.profiling + then (Clocks.init_clocks thy;Clocks.start_clock name;Clocks.start_clock "gen_test_data") + else () + + val thy = Context.theory_map (TestGen.gen_test_data name) thy (* call of the core-routine*) + + val t = LogThy.get_td_delta () + val thy = if Config.get_global thy TestGen.profiling + then (Clocks.stop_clock "gen_test_data"; Clocks.stop_clock name; Clocks.flush_clocks thy) + else thy + + val thy = Sign.add_path (space_implode "_" [name]) thy; + val thm = TestGen.get_test_thm thy name + val thy = snd(Global_Theory.add_thmss [((@{binding test_thm_inst},thm),[])] (thy)) + + val thy = Sign.parent_path thy; + in + thy + end + +val _ = Outer_Syntax.command + (*@{command_spec "gen_test_data"}*) + @{command_keyword gen_test_data} + "generate test data" + (Parse.name >> (Toplevel.theory o gen_test_dataT)); + +(**********************) + +val general_statement = + Parse_Spec.statement >> (fn x => ([], Element.Shows x)) || + Parse_Spec.long_statement; + +val _ = Outer_Syntax.local_theory_to_proof + @{command_keyword test_spec} + "define test specification" + (Scan.optional (Parse_Spec.opt_thm_name ":" --| + Scan.ahead (Parse_Spec.includes >> K "" || Parse_Spec.long_statement_keyword)) + Binding.empty_atts -- + Scan.optional Parse_Spec.includes [] -- + general_statement >> + (fn ((a, includes), (elems, concl)) => fn lthy => + let val _ = if Config.get lthy TestGen.profiling + then Clocks.init_clocks (Proof_Context.theory_of lthy) + else () + in + Specification.schematic_theorem_cmd true + "test_spec" NONE (K I) a includes elems concl false lthy + end)); + +*} +ML{* +fun set_wrapper str H src = + let val txt = Input.text_of src + val range = Input.range_of src + val txt' = str ^ "(" ^ txt ^ ")" + in H (Input.source true txt' range) end; + +(* BU : TO BE REVISED BY BU TO 2016. WAS SORT OF A HACK ANYWAY. + Chantal: did revise it; please double check*) +val _ = Outer_Syntax.command + (*@{command_spec "set_pre_safe_tac"}*) + @{command_keyword set_pre_safe_tac} + "configure gen_test_gen: set pre_safe_tac" + (Parse.ML_source >> (Toplevel.theory o + (set_wrapper + "(TestEnv.map_data_global o TestEnv.pre_safe_tac_update)" + Isar_Cmd.setup))); + + +val _ = Outer_Syntax.command + (*@{command_spec "set_pre_normalize_TNF_tac"}*) + @{command_keyword set_pre_normalize_TNF_tac} + "configure gen_test_gen: pre_normalize_TNF_tac" + (Parse.ML_source >> (Toplevel.theory o + (set_wrapper + "(TestEnv.map_data_global o TestEnv.pre_normalize_TNF_tac_update)" + Isar_Cmd.setup))) + +val _ = Outer_Syntax.command + (*@{command_spec "set_pre_minimize_TNF_tac"}*) + @{command_keyword set_pre_minimize_TNF_tac} + "configure gen_test_gen: set pre_minimize_TNF_tac" + (Parse.ML_source >> (Toplevel.theory o + (set_wrapper + "(TestEnv.map_data_global o TestEnv.pre_minimize_TNF_tac_update)" + Isar_Cmd.setup))) + + +fun pretty_cterm_style_generic f ctxt (style, (name,pos:Position.T)) = + let val termS = case (Symtab.lookup (f (TestEnv.get_testenv ctxt)) name) of + SOME k => List.map (style o Thm.term_of) k (* (nth(k,0)) *) + | NONE => error "No such test suite" + in Pretty.big_list "\\\\" (List.map (Thy_Output.pretty_term ctxt) ( termS)) end + +val pretty_thyp_style = pretty_cterm_style_generic TestEnv.get_test_thyps_tab +val pretty_conctest_style = pretty_cterm_style_generic TestEnv.get_test_data_tab +val pretty_uPO_style = pretty_cterm_style_generic TestEnv.get_unsolved_PO_tab + +(* code copy from Thy_Output (2016) since this is not exported there ... *) + +fun basic_entities name scan pretty = + Thy_Output.antiquotation name scan (fn {source, context = ctxt, ...} => + Thy_Output.output ctxt o Thy_Output.maybe_pretty_source pretty ctxt source); + +fun basic_entity name scan = basic_entities name (scan >> single); + +(* end copy *) + +val _ = Theory.setup +( (basic_entity @{binding thyps} (Term_Style.parse -- (Scan.lift (Parse.position Args.name))) + pretty_thyp_style) #> + (basic_entity @{binding uPOs} (Term_Style.parse -- (Scan.lift (Parse.position Args.name))) + pretty_uPO_style) #> + (basic_entity @{binding conctests} (Term_Style.parse -- (Scan.lift (Parse.position Args.name))) + pretty_conctest_style)) + ; +*} + + +attribute_setup test_inst = + {* Scan.lift Args.name >> (fn name => + Thm.declaration_attribute (fn thm => + TestEnv.map_data (TestEnv.add_abstest_data (name, thm)))) *} + "declare theorems for test case generation" + +attribute_setup testgen_type_candidates = + {* Scan.repeat1 Args.typ >> + (fn Ts => + Thm.declaration_attribute (fn _ => + TestEnv.map_data (TestEnv.type_candidates_update Ts))) *} + "declare testgen type candidates" + + +method_setup discharge_POs = + {* Scan.lift (Scan.option(Parse.string) >> + (fn params => fn ctxt => + let val _ = LogThy.start (); + val name = (case params of + NONE => "" + | SOME (s) => s); + in Method.SIMPLE_METHOD (CHANGED_PROP + (TestGen.discharge_POs name ctxt)) + end)) *} + "eliminate POs by test instances" + +method_setup gen_test_cases = + {* Scan.lift (Scan.option (Parse.nat -- Parse.nat) + -- Scan.repeat1 (Scan.unless (Scan.first clasimp_modifiers) Args.name)) + --| Method.sections clasimp_modifiers >> + (fn (params, prog) => fn ctxt => + let val _ = LogThy.start (); + val ctxt' = (case params of + NONE => ctxt + | SOME (depth, steps) => ctxt + |> Config.put TestEnv.depth depth + |> Config.put TestEnv.steps steps); + in Method.SIMPLE_METHOD (CHANGED_PROP + (TestGen.gen_test_case_tac ctxt' prog)) + end) *} + "generate symbolic test cases" + +method_setup gen_test_cases_old = + {* Scan.lift (Scan.option (Parse.nat -- Parse.nat) -- + Scan.repeat1 (Scan.unless (Scan.first clasimp_modifiers) Args.name)) --| + Method.sections clasimp_modifiers >> + (fn (params, prog) => fn ctxt => + let val _ = LogThy.start (); + val ctxt' = (case params of + NONE => ctxt + | SOME (depth, steps) => ctxt + |> Config.put TestEnv.depth depth + |> Config.put TestEnv.steps steps); + in Method.SIMPLE_METHOD (CHANGED_PROP + (TestGen.gen_test_case_old_tac ctxt' prog)) + end) + *} + "old variant of gen_test_cases" + +ML{* fn ctxt => HEADGOAL o (K(TestGen.thyp_ify ctxt ))*} +ML{* fn ctxt => METHOD (HEADGOAL o (K(TestGen.thyp_ify ctxt )))*} + + + +ML{* +val _ = + Theory.setup + (Method.setup @{binding thyp_ify} + (Attrib.thms >> (fn _ => fn ctxt => METHOD (HEADGOAL o (K(TestGen.thyp_ify ctxt ))))) + "making testhyps explicit" #> + Method.setup @{binding mp_ify} + (Attrib.thms >> (fn _ => fn ctxt => METHOD (HEADGOAL o (K(TestGen.mp_fy ctxt )) ))) + "fast destruction matching" #> + Method.setup @{binding all_ify} + (Attrib.thms >> (fn _ => fn ctxt => METHOD (HEADGOAL o (K(TestGen.all_ify ctxt [])) ))) + "replace all free variables by quantifiers") +*} + +end diff --git a/src/TestLib.thy b/src/main/TestLib.thy similarity index 100% rename from src/TestLib.thy rename to src/main/TestLib.thy diff --git a/src/TestRefinements.thy b/src/main/TestRefinements.thy similarity index 100% rename from src/TestRefinements.thy rename to src/main/TestRefinements.thy diff --git a/src/TestScript.thy b/src/main/TestScript.thy similarity index 100% rename from src/TestScript.thy rename to src/main/TestScript.thy diff --git a/src/TestSequence.thy b/src/main/TestSequence.thy similarity index 100% rename from src/TestSequence.thy rename to src/main/TestSequence.thy diff --git a/src/Testing.thy b/src/main/Testing.thy similarity index 100% rename from src/Testing.thy rename to src/main/Testing.thy diff --git a/src/clocks.ML b/src/main/clocks.ML similarity index 100% rename from src/clocks.ML rename to src/main/clocks.ML diff --git a/src/clocks.thy b/src/main/clocks.thy similarity index 100% rename from src/clocks.thy rename to src/main/clocks.thy diff --git a/src/codegen_C_pthread/Code_C_pthread.thy b/src/main/codegen_C_pthread/Code_C_pthread.thy similarity index 100% rename from src/codegen_C_pthread/Code_C_pthread.thy rename to src/main/codegen_C_pthread/Code_C_pthread.thy diff --git a/src/codegen_fsharp/Code_Char_Fsharp.thy b/src/main/codegen_fsharp/Code_Char_Fsharp.thy similarity index 100% rename from src/codegen_fsharp/Code_Char_Fsharp.thy rename to src/main/codegen_fsharp/Code_Char_Fsharp.thy diff --git a/src/codegen_fsharp/Code_Char_chr_Fsharp.thy b/src/main/codegen_fsharp/Code_Char_chr_Fsharp.thy similarity index 100% rename from src/codegen_fsharp/Code_Char_chr_Fsharp.thy rename to src/main/codegen_fsharp/Code_Char_chr_Fsharp.thy diff --git a/src/codegen_fsharp/Code_Integer_Fsharp.thy b/src/main/codegen_fsharp/Code_Integer_Fsharp.thy similarity index 100% rename from src/codegen_fsharp/Code_Integer_Fsharp.thy rename to src/main/codegen_fsharp/Code_Integer_Fsharp.thy diff --git a/src/codegen_fsharp/Code_String_Fsharp.thy b/src/main/codegen_fsharp/Code_String_Fsharp.thy similarity index 100% rename from src/codegen_fsharp/Code_String_Fsharp.thy rename to src/main/codegen_fsharp/Code_String_Fsharp.thy diff --git a/src/codegen_fsharp/code_fsharp.ML b/src/main/codegen_fsharp/code_fsharp.ML similarity index 100% rename from src/codegen_fsharp/code_fsharp.ML rename to src/main/codegen_fsharp/code_fsharp.ML diff --git a/src/codegen_fsharp/code_fsharp.thy b/src/main/codegen_fsharp/code_fsharp.thy similarity index 100% rename from src/codegen_fsharp/code_fsharp.thy rename to src/main/codegen_fsharp/code_fsharp.thy diff --git a/src/codegen_fsharp/examples/AQ.thy b/src/main/codegen_fsharp/examples/AQ.thy old mode 100644 new mode 100755 similarity index 100% rename from src/codegen_fsharp/examples/AQ.thy rename to src/main/codegen_fsharp/examples/AQ.thy diff --git a/src/codegen_fsharp/examples/SemiG.thy b/src/main/codegen_fsharp/examples/SemiG.thy old mode 100644 new mode 100755 similarity index 100% rename from src/codegen_fsharp/examples/SemiG.thy rename to src/main/codegen_fsharp/examples/SemiG.thy diff --git a/src/codegen_fsharp/upstream/code_ml.ML b/src/main/codegen_fsharp/upstream/code_ml.ML similarity index 100% rename from src/codegen_fsharp/upstream/code_ml.ML rename to src/main/codegen_fsharp/upstream/code_ml.ML diff --git a/src/codegen_gdb/Code_gdb_script.thy b/src/main/codegen_gdb/Code_gdb_script.thy similarity index 100% rename from src/codegen_gdb/Code_gdb_script.thy rename to src/main/codegen_gdb/Code_gdb_script.thy diff --git a/src/config.sml b/src/main/config.sml similarity index 100% rename from src/config.sml rename to src/main/config.sml diff --git a/src/debug/profiling_begin.thy b/src/main/debug/profiling_begin.thy similarity index 100% rename from src/debug/profiling_begin.thy rename to src/main/debug/profiling_begin.thy diff --git a/src/debug/profiling_end.thy b/src/main/debug/profiling_end.thy similarity index 100% rename from src/debug/profiling_end.thy rename to src/main/debug/profiling_end.thy diff --git a/src/isar_setup.ML b/src/main/isar_setup.ML similarity index 100% rename from src/isar_setup.ML rename to src/main/isar_setup.ML diff --git a/src/log.thy b/src/main/log.thy similarity index 100% rename from src/log.thy rename to src/main/log.thy diff --git a/src/new_smt_patch/SMT_patch.thy b/src/main/new_smt_patch/SMT_patch.thy similarity index 99% rename from src/new_smt_patch/SMT_patch.thy rename to src/main/new_smt_patch/SMT_patch.thy index 300816b..55dba53 100644 --- a/src/new_smt_patch/SMT_patch.thy +++ b/src/main/new_smt_patch/SMT_patch.thy @@ -5,7 +5,7 @@ section \Bindings to Satisfiability Modulo Theories (SMT) solvers based on SMT-LIB 2\ theory SMT_patch -imports Divides +imports HOL.Divides keywords "smt_status_patch" :: diag begin @@ -166,7 +166,7 @@ ML_file "~~/src/HOL/Tools/SMT/verit_isar.ML" ML_file "~~/src/HOL/Tools/SMT/verit_proof_parse.ML" *) ML_file "~~/src/HOL/Tools/SMT/conj_disj_perm.ML" ML_file "~~/src/HOL/Tools/SMT/z3_interface.ML" -ML_file "~~/src/HOL/Tools/SMT/z3_replay_util.ML" +(*ML_file "../SMT/z3_replay_util.ML"*) ML_file "~~/src/HOL/Tools/SMT/z3_replay_rules.ML" ML_file "~~/src/HOL/Tools/SMT/z3_replay_methods.ML" ML_file "z3_replay_patch.ML" diff --git a/src/new_smt_patch/smt_config_patch.ML b/src/main/new_smt_patch/smt_config_patch.ML similarity index 100% rename from src/new_smt_patch/smt_config_patch.ML rename to src/main/new_smt_patch/smt_config_patch.ML diff --git a/src/new_smt_patch/smt_normalize_patch.ML b/src/main/new_smt_patch/smt_normalize_patch.ML similarity index 100% rename from src/new_smt_patch/smt_normalize_patch.ML rename to src/main/new_smt_patch/smt_normalize_patch.ML diff --git a/src/new_smt_patch/smt_solver_patch.ML b/src/main/new_smt_patch/smt_solver_patch.ML similarity index 100% rename from src/new_smt_patch/smt_solver_patch.ML rename to src/main/new_smt_patch/smt_solver_patch.ML diff --git a/src/new_smt_patch/smt_systems_patch.ML b/src/main/new_smt_patch/smt_systems_patch.ML similarity index 100% rename from src/new_smt_patch/smt_systems_patch.ML rename to src/main/new_smt_patch/smt_systems_patch.ML diff --git a/src/new_smt_patch/smtlib_interface_patch.ML b/src/main/new_smt_patch/smtlib_interface_patch.ML similarity index 100% rename from src/new_smt_patch/smtlib_interface_patch.ML rename to src/main/new_smt_patch/smtlib_interface_patch.ML diff --git a/src/new_smt_patch/smtlib_patch.ML b/src/main/new_smt_patch/smtlib_patch.ML similarity index 100% rename from src/new_smt_patch/smtlib_patch.ML rename to src/main/new_smt_patch/smtlib_patch.ML diff --git a/src/new_smt_patch/z3_model.ML b/src/main/new_smt_patch/z3_model.ML similarity index 100% rename from src/new_smt_patch/z3_model.ML rename to src/main/new_smt_patch/z3_model.ML diff --git a/src/new_smt_patch/z3_replay_patch.ML b/src/main/new_smt_patch/z3_replay_patch.ML similarity index 100% rename from src/new_smt_patch/z3_replay_patch.ML rename to src/main/new_smt_patch/z3_replay_patch.ML diff --git a/src/smt_patch/Old_SMT_patch.thy b/src/main/smt_patch/Old_SMT_patch.thy similarity index 96% rename from src/smt_patch/Old_SMT_patch.thy rename to src/main/smt_patch/Old_SMT_patch.thy index 1bd40c3..71c5061 100644 --- a/src/smt_patch/Old_SMT_patch.thy +++ b/src/main/smt_patch/Old_SMT_patch.thy @@ -5,11 +5,11 @@ section \Old Version of Bindings to Satisfiability Modulo Theories (SMT) solvers\ theory Old_SMT_patch -imports Real (* "~~/src/HOL/Word/Word" *) +imports HOL.Real (* "~~/src/HOL/Word/Word" *) keywords "old_smt_status" :: diag begin -ML_file "~~/src/HOL/Library/Old_SMT/old_smt_utils.ML" +ML_file "../Old_SMT/old_smt_utils.ML" ML_file "old_smt_failure_patch.ML" ML_file "old_smt_config_patch.ML" @@ -115,18 +115,18 @@ definition z3mod :: "int \ int \ int" where subsection \Setup\ -ML_file "~~/src/HOL/Library/Old_SMT/old_smt_builtin.ML" -ML_file "~~/src/HOL/Library/Old_SMT/old_smt_datatypes.ML" +ML_file "../Old_SMT/old_smt_builtin.ML" +ML_file "../Old_SMT/old_smt_datatypes.ML" ML_file "old_smt_normalize_patch.ML" ML_file "old_smt_translate_patch.ML" (* This is the file that is actually patched.*) ML_file "old_smt_solver_patch.ML" -ML_file "~~/src/HOL/Library/Old_SMT/old_smtlib_interface.ML" +ML_file "../Old_SMT/old_smtlib_interface.ML" ML_file "old_z3_interface_patch.ML" ML_file "old_z3_proof_parser_patch.ML" -ML_file "~~/src/HOL/Library/Old_SMT/old_z3_proof_tools.ML" -ML_file "~~/src/HOL/Library/Old_SMT/old_z3_proof_literals.ML" -ML_file "~~/src/HOL/Library/Old_SMT/old_z3_proof_methods.ML" +ML_file "../Old_SMT/old_z3_proof_tools.ML" +ML_file "../Old_SMT/old_z3_proof_literals.ML" +ML_file "../Old_SMT/old_z3_proof_methods.ML" named_theorems old_z3_simp "simplification rules for Z3 proof reconstruction" ML_file "old_z3_proof_reconstruction_patch.ML" ML_file "old_z3_model_patch.ML" @@ -421,7 +421,7 @@ lemma [old_z3_rule]: (* for def-axiom *) "(if P then Q else \R) \ P \ R" by auto -ML_file "~~/src/HOL/Library/Old_SMT/old_smt_real.ML" +ML_file "../Old_SMT/old_smt_real.ML" (* ML_file "~~/src/HOL/Library/Old_SMT/old_smt_word.ML" *) hide_type (open) pattern diff --git a/src/smt_patch/old_smt_config_patch.ML b/src/main/smt_patch/old_smt_config_patch.ML similarity index 100% rename from src/smt_patch/old_smt_config_patch.ML rename to src/main/smt_patch/old_smt_config_patch.ML diff --git a/src/smt_patch/old_smt_failure_patch.ML b/src/main/smt_patch/old_smt_failure_patch.ML similarity index 100% rename from src/smt_patch/old_smt_failure_patch.ML rename to src/main/smt_patch/old_smt_failure_patch.ML diff --git a/src/smt_patch/old_smt_normalize_patch.ML b/src/main/smt_patch/old_smt_normalize_patch.ML similarity index 100% rename from src/smt_patch/old_smt_normalize_patch.ML rename to src/main/smt_patch/old_smt_normalize_patch.ML diff --git a/src/smt_patch/old_smt_setup_solvers_patch.ML b/src/main/smt_patch/old_smt_setup_solvers_patch.ML similarity index 100% rename from src/smt_patch/old_smt_setup_solvers_patch.ML rename to src/main/smt_patch/old_smt_setup_solvers_patch.ML diff --git a/src/smt_patch/old_smt_solver_patch.ML b/src/main/smt_patch/old_smt_solver_patch.ML similarity index 100% rename from src/smt_patch/old_smt_solver_patch.ML rename to src/main/smt_patch/old_smt_solver_patch.ML diff --git a/src/smt_patch/old_smt_translate_patch.ML b/src/main/smt_patch/old_smt_translate_patch.ML similarity index 100% rename from src/smt_patch/old_smt_translate_patch.ML rename to src/main/smt_patch/old_smt_translate_patch.ML diff --git a/src/smt_patch/old_z3_interface_patch.ML b/src/main/smt_patch/old_z3_interface_patch.ML similarity index 100% rename from src/smt_patch/old_z3_interface_patch.ML rename to src/main/smt_patch/old_z3_interface_patch.ML diff --git a/src/smt_patch/old_z3_model_patch.ML b/src/main/smt_patch/old_z3_model_patch.ML similarity index 100% rename from src/smt_patch/old_z3_model_patch.ML rename to src/main/smt_patch/old_z3_model_patch.ML diff --git a/src/smt_patch/old_z3_proof_parser_patch.ML b/src/main/smt_patch/old_z3_proof_parser_patch.ML similarity index 100% rename from src/smt_patch/old_z3_proof_parser_patch.ML rename to src/main/smt_patch/old_z3_proof_parser_patch.ML diff --git a/src/smt_patch/old_z3_proof_reconstruction_patch.ML b/src/main/smt_patch/old_z3_proof_reconstruction_patch.ML similarity index 100% rename from src/smt_patch/old_z3_proof_reconstruction_patch.ML rename to src/main/smt_patch/old_z3_proof_reconstruction_patch.ML diff --git a/src/version.thy b/src/main/version.thy similarity index 100% rename from src/version.thy rename to src/main/version.thy diff --git a/src/test/Automata.thy b/src/test/Automata.thy new file mode 100644 index 0000000..f3018b8 --- /dev/null +++ b/src/test/Automata.thy @@ -0,0 +1,391 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * Automata.thy --- the base testing theory for automatas in sequence testing. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007 ETH Zurich, Switzerland + * 2009-2017 Univ. Paris-Sud, France + * 2009-2015 Achim D. Brucker, Germany + * 2015-2017 The University of Sheffield, UK + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Basic Automaton Theory *} + +theory Automata imports Main +begin + +text{* Re-Definition of the following type synonyms from Monad-Theory - +apart from that, these theories are independent. *} +type_synonym ('o, '\) MON\<^sub>S\<^sub>E = "'\ \ ('o \ '\)" (* = '\ \ ('o \ '\)option *) +type_synonym ('o, '\) MON\<^sub>S\<^sub>B = "'\ \ ('o \ '\) set" +type_synonym ('o, '\) MON\<^sub>S\<^sub>B\<^sub>E = "'\ \ (('o \ '\) set) option" + + +subsection{* Deterministic I/O Automaton *} + + +class fin = + assumes finite: "finite(UNIV::'a set)" + + +record ('\, '\) DA = + init :: "'\" + step :: "'\ \ '\ \ '\" + accept :: "'\ set" + + +typedef ('\::fin, '\::fin) DFA = "{x::('\, '\) DA. True}" +proof - + show "\x. x \ {x. True}" by auto +qed + +record ('\, '\) pDA = + init :: "'\" + step :: "'\ \ '\ \ '\" + accept :: "'\ set" + +typ "'a option" +record ('\, '\) NDA = + init :: "'\" + step :: "('\ \ '\ \ '\)set" + accept :: "'\ set" + + +type_synonym ('\, 'o, '\) ioNDA = "('\ \ 'o, '\) NDA" + +record ('\, '\) EFSM = + init :: "'\" + step :: "('\ \ '\ \ '\)set" + accept :: "'\ set" + + +record ('\, 'o, '\) det_io_atm = + init :: "'\" + step :: "'\ \ ('o, '\) MON\<^sub>S\<^sub>E" + + +subsection{* Non-deterministic I/O automaton *} + +text{* + We will use two styles of non-deterministic automaton: Labelled Transition + Systems (LTS), which are intensively used in the literature, but tend to anihilate + the difference between input and output, and non-deterministic automaton, which + make this difference explicit and which have a closer connection to monads used + for the operational aspects of testing. +*} + +text{* + There we are: labelled transition systems. +*} + +record ('\, 'o, '\) lts = + init :: "'\ set" + step :: "('\ \ ('\ \ 'o) \ '\) set" (* false : this must be "('\ \ ('\ + 'o) \ '\) set" *) + +text{* And, equivalently; non-deterministic io automata. *} + +record ('\, 'o, '\) ndet_io_atm = + init :: "'\ set" + step :: "'\ \ ('o, '\) MON\<^sub>S\<^sub>B" + + +text{* + First, we will prove the fundamental equivalence of these two notions. + + We refrain from a formal definition of explicit conversion functions + and leave this internally in this proof (i.e. the existential witnesses). +*} + +definition det2ndet :: "('\, 'o, '\) det_io_atm \ ('\, 'o, '\) ndet_io_atm" +where "det2ndet A = \ndet_io_atm.init = {det_io_atm.init A}, + ndet_io_atm.step = + \ \ \. if \ \ dom(det_io_atm.step A \) + then {the(det_io_atm.step A \ \)} + else {} \" + +text{* + The following theorem establishes the fact that deterministic + automaton can be injectively embedded in non-deterministic ones. +*} +lemma det2ndet_injective : "inj det2ndet" + apply(auto simp: inj_on_def det2ndet_def) + apply(tactic {* Record.split_simp_tac @{context} [] (K ~1) 1*}, simp) + apply(simp (no_asm_simp) add: fun_eq_iff, auto) + apply(drule_tac x=x in fun_cong, drule_tac x=xa in fun_cong) + apply(case_tac "xa \ dom (step x)", simp_all) + apply(case_tac "xa \ dom (stepa x)", + simp_all add: fun_eq_iff[symmetric], auto) + apply(case_tac "xa \ dom (stepa x)", auto simp: fun_eq_iff[symmetric]) + apply(erule contrapos_np, simp) + apply(drule Product_Type.split_paired_All[THEN iffD2])+ + apply(simp only: Option.not_Some_eq) +done + + +text{* + We distinguish two forms of determinism - global determinism, where for each + state and input \emph{at most} one output-successor state + is assigned. +*} +definition deterministic :: "('\, 'o, '\) ndet_io_atm \ bool" +where "deterministic atm = (((\ x. ndet_io_atm.init atm = {x}) \ + (\ \ out. \ p1 \ step atm \ out. + \ p2 \ step atm \ out. + p1 = p2)))" + +text{* In contrast, transition relations *} +definition \deterministic :: "('\, 'o, '\) ndet_io_atm \ bool" +where "\deterministic atm = (\ x. ndet_io_atm.init atm = {x} \ + (\ \ out. + \ p1 \ step atm \ out. + \ p2 \ step atm \ out. + fst p1 = fst p2 \ snd p1 = snd p2))" + + +lemma det2ndet_deterministic: "deterministic (det2ndet atm)" + by(auto simp:deterministic_def det2ndet_def) + +lemma det2ndet_\deterministic: "\deterministic (det2ndet atm)" + by(auto simp: \deterministic_def det2ndet_def) + + +text{* + The following theorem establishes the isomorphism of the two concepts + IO-automata and LTS. We will therefore concentrate in the sequel on IO-Automata, + which have a slightly more realistic operational behaviour: you give the program + under test an input and get a possible set of responses rather than "agreeing + with the program under test" on a set of input-output-pairs. +*} + +definition ndet2lts :: "('\, 'o, '\) ndet_io_atm \ ('\, 'o, '\) lts" +where "ndet2lts A = \lts.init = init A, + lts.step = {(s,io,s').(snd io,s') \ step A (fst io) s}\" + +definition lts2ndet :: " ('\,'o,'\) lts \ ('\, 'o, '\) ndet_io_atm" +where "lts2ndet A = \init = lts.init A, + step = \ i s. {(out,s'). (s, (i,out), s') + \ lts.step A}\" + +lemma ndet_io_atm_isomorph_lts : "bij ndet2lts" + apply (auto simp: bij_def inj_on_def surj_def ndet2lts_def) + apply (tactic {* Record.split_simp_tac @{context} [] (K ~1) 1*}, simp) + apply (rule ext, rule ext, simp add: set_eq_iff) + apply (rule_tac x = "lts2ndet y" in exI) + apply (simp add: lts2ndet_def) + done + + +(* Lemma missing: for deterministic ndet_atm's, det2ndet is even bijective, +i.e the definition above is indeed a characterization. *) + +text{* + The following well-formedness property is important: for every state, + there is a valid transition. Otherwise, some states may never be part of an + (infinite) trace. +*} + +definition is_enabled :: "['\ \ ('o, '\) MON\<^sub>S\<^sub>B, '\ ] \ bool" +where "is_enabled rel \ = (\ \. rel \ \ \ {})" + +definition is_enabled' :: "['\ \ ('o, '\) MON\<^sub>S\<^sub>E, '\ ] \ bool" +where "is_enabled' rel \ = (\ \. \ \ dom(rel \))" + + +definition live_wff:: "('\, 'o, '\) ndet_io_atm \ bool" +where "live_wff atm = (\ \. \ \. step atm \ \ \ {})" + +lemma life_wff_charn: "live_wff atm = (\ \. is_enabled (step atm) \)" + by(auto simp: live_wff_def is_enabled_def) + +text{* + There are essentially two approaches: either we disallow non-enabled transition + systems---via life\_wff\_charn---or we restrict our machinery for traces and prefixed closed + sets of runs over them +*} + +section{* Rich Traces and its Derivatives *} +text{* + The easiest way to define the concept of traces is on LTS. Via the injections described + above, we can define notions like deterministic automata rich trace, and i/o automata rich + trace. Moreover, we can easily project event traces or state traces from rich traces. *} + +type_synonym ('\, 'o, '\) trace = "nat \ ('\ \ ('\ \ 'o) \ '\)" +type_synonym ('\, 'o) etrace = "nat \ ('\ \ 'o)" (* event traces *) +type_synonym '\ \trace = "nat \ '\" +type_synonym '\ in_trace = "nat \ '\" +type_synonym 'o out_trace = "nat \ 'o" +type_synonym ('\, 'o, '\) run = "('\ \ ('\ \ 'o) \ '\) list" +type_synonym ('\, 'o) erun = "('\ \ 'o) list" +type_synonym '\ \run = "'\ list" +type_synonym '\ in_run = "'\ list" +type_synonym 'o out_run = "'o list" + +definition rtraces ::"('\, 'o, '\) ndet_io_atm \ ('\, 'o, '\) trace set" +where "rtraces atm = { t. fst(t 0) \ init atm \ + (\ n. fst(t (Suc n)) = snd(snd(t n))) \ + (\ n. if is_enabled (step atm) (fst(t n)) + then t n \ {(s,io,s'). (snd io,s') + \ step atm (fst io) s} + else t n = (fst(t n),undefined,fst(t n)))}" + +lemma init_rtraces[elim!]: "t \ rtraces atm \ fst(t 0) \ init atm" +by(auto simp: rtraces_def) + +lemma post_is_pre_state[elim!]: "t \ rtraces atm \ fst(t (Suc n)) = snd(snd(t n))" +by(auto simp: rtraces_def) + + +lemma enabled_transition[elim!]: +"\t \ rtraces atm; is_enabled (step atm) (fst(t n)) \ + \ t n \ {(s,io,s'). (snd io,s') \ step atm (fst io) s}" +apply(simp add: rtraces_def split_def, safe) +apply (erule allE)+ +apply (auto simp add: split: if_split_asm) +done + +lemma nonenabled_transition[elim!]: "\t \ rtraces atm; \ is_enabled (step atm) (fst(t n)) \ + \ t n = (fst(t n),undefined,fst(t n))" +by(simp add: rtraces_def split_def) + + +text{* + The latter definition solves the problem of inherently finite traces, i.e. those that reach + a state in which they are no longer enabled. They are represented by stuttering steps on + the same state. +*} + +definition fin_rtraces :: "('\, 'o, '\) ndet_io_atm \ ('\, 'o, '\) trace set" +where "fin_rtraces atm = { t . t \ rtraces atm \ + (\ n. \ is_enabled (step atm) (fst(t n)))}" + +lemma fin_rtraces_are_rtraces : "fin_rtraces atm \ rtraces atm" +by(auto simp: rtraces_def fin_rtraces_def) + + +definition \traces ::"('\, 'o, '\) ndet_io_atm \ '\ \trace set" +where "\traces atm = {t . \ rt \ rtraces atm. t = fst o rt }" + +definition etraces ::"('\, 'o, '\) ndet_io_atm \ ('\, 'o) etrace set" +where "etraces atm = {t . \ rt \ rtraces atm. t = fst o snd o rt }" + +definition in_trace :: "('\, 'o) etrace \ '\ in_trace" +where "in_trace rt = fst o rt" + +definition out_trace :: "('\, 'o) etrace \ 'o out_trace" +where "out_trace rt = snd o rt" + +definition prefixes :: "(nat \ '\) set \ '\ list set" +where "prefixes ts = {l. \ t \ ts. \ (n::int). l = map (t o nat) [0..n]}" + +definition rprefixes :: "['\ \ ('o, '\) MON\<^sub>S\<^sub>B, + ('\, 'o, '\) trace set] \ ('\, 'o, '\) run set" +where "rprefixes rel ts = {l. \ t \ ts. \ n. (is_enabled rel (fst(t (nat n))) \ + l = map (t o nat) [0..n])}" +definition eprefixes :: "['\ \ ('o, '\) MON\<^sub>S\<^sub>B, + ('\, 'o, '\) trace set] \ ('\, 'o) erun set" +where "eprefixes rel ts = (map (fst o snd)) ` (rprefixes rel ts)" + +definition \prefixes :: "['\ \ ('o, '\) MON\<^sub>S\<^sub>B, + ('\, 'o, '\) trace set] \ '\ \run set" +where "\prefixes rel ts = (map fst) ` (rprefixes rel ts)" + + + +section{* Extensions: Automata with Explicit Final States *} + + +text{* + We model a few widely used variants of automata as record extensions. In particular, we + define automata with final states and internal (output) actions. +*} + +record ('\, 'o, '\) det_io_atm' = "('\, 'o, '\) det_io_atm" + + final :: "'\ set" + +text{* + A natural well-formedness property to be required from this type of atm is as follows: whenever + an atm' is in a final state, the transition operation is undefined. +*} + +definition final_wff:: "('\, 'o, '\) det_io_atm' \ bool" +where "final_wff atm' = + (\\ \ final atm'. \\. \ \ dom (det_io_atm.step atm' \))" + +text{* + Another extension provides the concept of internal actions, which are considered as part of + the output alphabet here. If internal actions are also used for synchronization, further + extensions admitting internal input actions will be necessary, too, which we do not model here. +*} + +record ('\, 'o, '\) det_io_atm'' = "('\, 'o, '\) det_io_atm'" + + internal :: "'o set" + +text{* + A natural well-formedness property to be required from this type of atm is as follows: whenever + an atm' is in a final state, the transition operation is required to provide a state that is + again final and an output that is considered internal. +*} + +definition final_wff2:: "('\, 'o, '\) det_io_atm'' \ bool" +where "final_wff2 atm'' = (\\ \ final atm''. + \ \. \ \ dom (det_io_atm.step atm'' \) \ + (let (out, \') = the(det_io_atm.step atm'' \ \) + in out \ internal atm'' \ \' \ final atm''))" + +text{* + Of course, for this type of extended automata, it is also possible to impose the additional + requirement that the step function is total---undefined steps would then be represented as + steps leading to final states. + + The standard extensions on deterministic automata are also redefined for the non-deterministic + (specification) case. +*} + +record ('\, 'o, '\) ndet_io_atm' = "('\, 'o, '\) ndet_io_atm" + + final :: "'\ set" + +definition final_wff_ndet_io_atm2 :: "('\, 'o, '\) ndet_io_atm' \ bool" + where "final_wff_ndet_io_atm2 atm' = (\\ \ final atm'. \\. ndet_io_atm.step atm' \ \ = {})" + +record ('\, 'o, '\) ndet_io_atm'' = "('\, 'o, '\) ndet_io_atm'" + + internal :: "'o set" + +definition final_wff2_ndet_io_atm2 :: "('\, 'o, '\) ndet_io_atm'' \ bool" + where "final_wff2_ndet_io_atm2 atm'' = + (\\ \ final atm''. + \\. step atm'' \ \ \ {} \ step atm'' \ \ \ internal atm'' \ final atm'')" + +end diff --git a/src/test/EFSM_Toolkit.thy b/src/test/EFSM_Toolkit.thy new file mode 100644 index 0000000..67205c8 --- /dev/null +++ b/src/test/EFSM_Toolkit.thy @@ -0,0 +1,167 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * TestRefinements.thy --- for sequence testing. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007, ETH Zurich, Switzerland + * 2009 B. Wolff, Univ. Paris-Sud, France + * 2009 Achim D. Brucker, Germany + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Test-Refinements (IOCO and Friends) *} + +theory EFSM_Toolkit imports Monads Automata +begin + + +definition impl :: "['\\'\\bool, '\ \ ('o,'\)MON\<^sub>S\<^sub>B] \ '\ \ ('o,'\)MON\<^sub>S\<^sub>E" +where "impl pre post \ = (\ \. if pre \ \ + then Some(SOME(out,\'). (out,\') \ post \ \) + else undefined)" + +definition strong_impl :: "['\\'\\bool, '\\('o,'\)MON\<^sub>S\<^sub>B] \ '\\('o, '\)MON\<^sub>S\<^sub>E" +where "strong_impl pre post \ = + (\ \. if pre \ \ + then Some(SOME(out,\'). (out,\') \ post \ \) + else None)" + +definition strong_impl' :: "['\\'\\bool, '\\('o,'\)MON\<^sub>S\<^sub>B] \ '\\('o, '\)MON\<^sub>S\<^sub>E" +where "strong_impl' pre post \ = + (\ \. if pre \ \ + then Some(SOME(out,\'). (out,\') \ post \ \) + else + (if post \ \ \ {} + then Some(SOME(out,\'). (out,\') \ post \ \) + else None))" + + +definition implementable :: "['\ \ '\ \ bool,'\ \ ('o,'\)MON\<^sub>S\<^sub>B] \ bool" +where "implementable pre post =(\ \ \. pre \ \ \(\ out \'. (out,\') \ post \ \ ))" + +definition is_strong_impl :: "['\ \ '\ \ bool, + '\ \ ('o,'\)MON\<^sub>S\<^sub>B, + '\ \ ('o, '\)MON\<^sub>S\<^sub>E] \ bool" +where "is_strong_impl pre post ioprog = + (\ \ \. (\pre \ \ \ ioprog \ \ = None) \ + (pre \ \ \ (\ x. ioprog \ \ = Some x)))" + +lemma is_strong_impl : + "is_strong_impl pre post (strong_impl pre post)" +by(simp add: is_strong_impl_def strong_impl_def) + + +text{* This following characterization of implementable +specifications has actually a quite complicated form due to the fact +that post expects its arguments in curried form - should +be improved \ldots *} +lemma implementable_charn: + "\implementable pre post; pre \ \ \ \ + (the(strong_impl pre post \ \)) \ post \ \" +apply(auto simp: implementable_def strong_impl_def) +apply(erule_tac x=\ in allE) +apply(erule_tac x=\ in allE) +apply(simp add: Eps_case_prod) +apply(rule someI_ex, auto) +done + + +locale efsm_det = + fixes pre :: "'\ \ '\ \ bool" + fixes post :: "'\ \ '\ \ ('o \ '\) set" + fixes efsm :: "'\ \ '\ \ ('o \ '\) option" + fixes in_ev:: "'\" + fixes out_ev:: "'\ \ 'o" + fixes upd :: "'\ \ '\" + fixes E :: "'\ \ bool" + assumes SPEC: "efsm \ (strong_impl pre post)" + assumes pre_red : "\ \. pre \ in_ev = E \" + assumes post_red : "\ \. pre \ in_ev \ (SOME(res,\'). (res,\') \ post in_ev \) = (out_ev \,upd \)" +begin + +lemma impl_def:"efsm in_ev = (\\. if E \ then Some(out_ev \, upd \) else None)" + by(rule ext, auto simp: SPEC strong_impl_def pre_red post_red) + +lemma exec_succes: + "(efsm in_ev \ = Some(b,\')) = (E \ \ (b=out_ev \) \ \'= upd \)" + by(auto simp: impl_def split:if_split_asm) + + +lemma exec_failure: + "(efsm in_ev \ = None) = (\ E \)" + by(auto simp: impl_def split:if_split_asm) + + + +lemma exec_mbindFSave_If[simp]: +"(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (in_ev # S) efsm; return (P s))) = + (if E \ + then ((upd \) \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S efsm; return (P (out_ev \ # s)))) + else (\ \ (return (P []))))" +by(auto simp: exec_mbindFSave impl_def) + +lemma exec_mbindFSave_E: +assumes A:"(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (in_ev # S) efsm; return (P s)))" + and B:"E \ \ ((upd \) \ (s\mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S efsm;return(P(out_ev \ # s)))) \ Q" + and C:"\ E \ \\ \ (return (P [])) \ Q" +shows "Q" +apply(insert A, subst (asm) exec_mbindFSave_If, case_tac "E \", simp_all only: if_True if_False) +by(auto elim: B C) + + + +lemma exec_mbindFStop[simp]: +"(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (in_ev#S) efsm; return (P s))) = + (E \ \ ((upd \) \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S efsm; return (P (out_ev \ # s)))))" +apply(rule iffI) +apply(erule Monads.exec_mbindFStop_E,simp add: exec_succes,auto) +apply(subst exec_bind_SE_success[OF exec_succes[THEN iffD2]], auto) +done + + +lemma exec_mbindFStop_E: +assumes A:"(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (in_ev # S) efsm; return (P s)))" + and B:"E \ \ ((upd \) \ (s\mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S efsm;return(P(out_ev \ # s)))) \ Q" +shows "Q" +by(insert A, rule B, simp_all del: mbind'_bind) + +(* +lemmas impl_1'' = exec_failure +lemmas impl_1' = exec_succes +*) + +end + +end diff --git a/src/test/Interleaving.thy b/src/test/Interleaving.thy new file mode 100644 index 0000000..f63c0ea --- /dev/null +++ b/src/test/Interleaving.thy @@ -0,0 +1,244 @@ +theory Interleaving +imports Main +begin + +(* +fun interleave :: "'a list => 'a list => 'a list set" +where "interleave [] [] = {[]}" + |"interleave (A) [] = {A}" + |"interleave [] B = {B}" + |"interleave (a#A)(b#B) = (let S = interleave A (b#B); + S' = interleave (a#A) (B) + in (\ x. a # x) ` S \ (\ x. b # x) ` S')" +*) + +fun interleave :: "'a list => 'a list => 'a list set" +where "interleave [] [] = {[]}" + | "interleave (A) [] = {A}" + | "interleave [] B = {B}" + |"interleave (a # A) (b # B) = + (\x. a # x) ` interleave A (b # B) \ + (\x. b # x) ` interleave (a # A) B" + +find_theorems interleave + +value"interleave [1::int,2,3] [11,12]" + + +fun interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t :: "'a list => 'a list => 'a list list" +where "interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t [] [] = [[]]" + |"interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t (A) [] = [A]" + |"interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t [] B = [B]" + |"interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t (a#A)(b#B) = (let S = interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t A (b#B); + S' = interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t (a#A) (B) + in map (\ x. a # x) S @ map (\ x. b # x) S')" + + +value "interleave' [1::int,2,1,2] [2,1]" +value "interleave [1::int,2] [1]" + +(* Junk --- should be associativ application of interleave *) +fun interleave2 :: "'a list => 'a list => 'a list => 'a list set" +where "interleave2 [] [] [] = {[]}" + |"interleave2 (A) [] [] = {A}" + |"interleave2 [] B [] = {B}" + |"interleave2 [] [] C = {C}" + |"interleave2 (A)(B) [] = interleave A B" + |"interleave2 (A)[] (C) = interleave A C" + |"interleave2 [] B (C) = interleave B C" + |"interleave2 (a#A)(b#B) (c#C) = (let S = interleave2 A (b#B) (c#C) ; + S' = interleave2 (a#A) (B) (c#C) ; + S'' = interleave2 (a#A) (b#B) (C) + in (\ x. a # x) ` ( S) \ (\ x. b # x) ` S' \ (\ x. c # x) ` S'')" + +lemma replace : "interleave2 A B C = ((\x\interleave A B. interleave X C))" +oops + +(* +fun interleave1 :: "'a list => 'a list => 'a list => 'a list => 'a list set" +where "interleave1 [] [] [] [] = {[]}" + |"interleave1 (A) [] [] [] = {A}" + |"interleave1 [] B [] []= {B}" + |"interleave1 [] [] C []= {C}" + |"interleave1 [] [] [] D= {D}" + |"interleave1 (A)(B) (C) []= interleave2 A B C" + |"interleave1 (A)(B) [] D = interleave2 A B D" + |"interleave1 (A)[] (C) D= interleave2 A C D" + |"interleave1 [] B (C) D= interleave2 B C D" + |"interleave1 (a#A)(b#B) (c#C) (d#D) = (let S = interleave1 A (b#B) (c#C) (d#D); + S' = interleave1 (a#A) (B) (c#C) (d#D); + S'' = interleave1 (a#A) (b#B) (C) (d#D); + S'''= interleave1 (a#A) (b#B) (c#C) (D) + in (\ x. a # x) ` S \ (\ x. b # x) ` S' \ (\ x. c # x) ` S'' \ (\ x. d # x) ` S''')" + +value "int(card(interleave1 [] [] [2::int,1,2,1] [1,2]))" +*) +fun bus_interleave :: " ('a::order) list \ 'a list \ 'a list set" +where "bus_interleave [] [] = {[]}" + |"bus_interleave (A) [] = {A}" + |"bus_interleave [] B = {B}" + |"bus_interleave (a#A)(b#B) = + (if a \ b then (\ x. a # x) ` bus_interleave A (b#B) + else if b \ a then (\ x. b # x) ` bus_interleave (a#A) (B) + else (let S = bus_interleave A (b#B); + S' = bus_interleave (a#A) (B) + in (\ x. a # x) ` S \ (\ x. b # x) ` S'))" + +fun sync_interleave :: "('a \ bool) \ ('a \ 'a \ bool) \ ('a \ bool) \'a list \ 'a list \ 'a list set" +where "sync_interleave L S R [] [] = {[]}" + |"sync_interleave L S R (A) [] = {A}" + |"sync_interleave L S R [] B = {B}" + |"sync_interleave L S R (a#A)(b#B) = + (if L a \ \ L b then sync_interleave L S R (A) (b#B) + else if \ L a \ L b then sync_interleave L S R (a#A) (B) + else + if L a \ L b \ S a b then (\ x. a # x) ` sync_interleave L S R (A) (B) + else (let IL = sync_interleave L S R A (b#B); + IL' = sync_interleave L S R (a#A) (B) + in (\ x. a # x) ` IL \ (\ x. b # x) ` IL'))" + +fun sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t :: "'a \'a list \ 'a list \ 'a list list" +where "sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n [] [] = [[]]" + |"sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n (A) [] = [A]" + |"sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n [] B = [B]" + |"sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n (a#A)(b#B) = + (if a = n \ b \ n then sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n (A) (b#B) + else if a \ n \ b = n then sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n (a#A) (B) + else + if a = n \ b= n then map (\ x. n # x) (sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n (A) (B) ) + else (let S = sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n A (b#B); + S' = sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n (a#A) (B) + in map (\ x. a # x) S @ map (\ x. b # x) S'))" + +value "((sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t 2 [1::int,3,2 ] [1,2]))" + + + +value "sync_interleave' 2 [1::int,3,2 ] [1,2]" + +fun sync_interleave1 :: "'a list \'a list \ 'a list \ 'a list set" +where + "sync_interleave1 [] [] [] = {[]}" + |"sync_interleave1 n [] [] = {[]}" + |"sync_interleave1 [] A [] = {A}" + |"sync_interleave1 [] [] B = {B}" + |"sync_interleave1 n (A) [] = {A}" + |"sync_interleave1 n [] B = {B}" + |"sync_interleave1 [] A B = interleave A B" + |"sync_interleave1 (n#N) (a#A)(b#B) = + (if a = n \ b \ n then sync_interleave1 (n#N) (A) (b#B) + else if a \ n \ b = n then sync_interleave1 (n#N) (a#A) (B) + else + if a = n \ b= n then (\ x. n # x) ` sync_interleave1 (n#N) (A) (B) + else + (let S = sync_interleave1 (n#N) A (b#B); + S' = sync_interleave1 (n#N) (a#A) (B) + in (\ x. a # x) ` S \ (\ x. b # x) ` S'))" + +value "((sync_interleave1 [1,3] [1::int,3,2 ] [1,2]))" +value "((sync_interleave1 [3,1] [1::int,3,2 ] [1,2]))" +value "(sync_interleave1 [3,1] [1::int,2 ] [1,3,2])" + +find_theorems " interleave" +value "int (card (interleave [1::int,2,3,4 ] [8,9,10,11]))" + +term" \ X" + + +lemma "x \ set A \ x \ \(set ` interleave A B)" +proof (induct A B rule: interleave.induct) + case 1 + then show ?case by simp +next + case (2 a A) + then show ?case by simp +next + case (3 b B) + then show ?case by simp +next + case (4 a A b B) + from 4 have "x \ \(set ` interleave (a # A) B)" by simp + then show ?case by auto +qed + + +lemma commute1[simp]: "interleave A [] = interleave [] A" by(case_tac A, simp_all) + + +lemma interleave_commute'[rule_format]: "\B. interleave A B = interleave B A" +proof(induct A) + show "\B. interleave [] B = interleave B []" by(simp) +next + show "\a A. \B. interleave A B = interleave B A \ \B. interleave (a # A) B = interleave B (a # A)" + apply(rule allI,induct_tac B, simp_all) + apply(subst Set.Un_commute) + apply (elim allE) + apply (erule ssubst)+ + apply (simp only: triv_forall_equality) + done +qed + + +lemma interleave1 [simp] : "(x \ interleave [] A) = (x = A)" +by(induct A, simp_all only: interleave.simps, simp_all) + + +lemma interleave2 [simp] : "(x \ interleave A []) = (x = A)" +by(induct A, simp_all only: interleave.simps, simp_all) + + + +lemma length_interleave_lemma: + "\n < k . length A + length B \ n + \ (x::'a list) \ (interleave A B) + \ length x = (length A + length B)" +proof(induct k arbitrary: A B x) + case 0 fix A::"'a list" and B::"'a list" and x::"'a list" + have * : "\A B. length A + length B \ 0 \ length A = 0" by auto + have ** : "\A B. length A + length B \ 0 \ length B = 0" by auto + show "\n<0. length A + length B \ n + \ x \ interleave A B + \ length x = length A + length B" + by auto + case (Suc k) fix A::"'a list" and B::"'a list" and x::"'a list" + assume H: "\(A::'a list) B x. \n n + \ x \ interleave A B + \ length x = length A + length B" + have H': "\(A::'a list) B x n. x \ interleave A B \ n length A + length B \ n + \ length x = length A + length B" + by(erule H[THEN spec, THEN mp, THEN mp, THEN mp], auto) + + show "\n n + \ x \ interleave A B + \ length x = length A + length B" + apply(case_tac "A", simp_all) + apply(case_tac "B", simp_all) + apply(auto) + apply(case_tac "n", simp_all) + apply(case_tac "list",auto) + apply(rule trans, erule H', auto) + apply(rule trans, erule H', auto) + apply(case_tac "n", simp_all) + apply(case_tac "list",auto) + apply(rule trans, erule H', auto) + apply(rule trans, erule H', auto) + done +qed + + +lemma length_interleave: "x \ (interleave A B) \ length x = (length A + length B)" +apply(rule length_interleave_lemma[rule_format, of "length A + length B" "length A + length B + 1"]) +by auto + + +lemma cardinality: "card(interleave A B) = ( length A * length B + 1)" (* really ? *) +oops + +lemma causality : + "\ k i. i < k \ (\ C \ (interleave A B). (\ l m. l < m \ A!i = C!l \ A!k = C!m))" (* really ? *) +oops + + + +end + diff --git a/src/test/Monads.thy b/src/test/Monads.thy new file mode 100644 index 0000000..b03d619 --- /dev/null +++ b/src/test/Monads.thy @@ -0,0 +1,1256 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * Monads.thy --- a base testing theory for sequential computations. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007 ETH Zurich, Switzerland + * 2009-2017 Univ. Paris-Sud, France + * 2009-2012 Achim D. Brucker, Germany + * 2015-2017 University Sheffield, UK + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Basic Monad Theory for Sequential Computations *} + +theory Monads imports Main +begin + +section{* A Generic (Approximative) Notion of Monad *} + +text {* + A (fake) monad is a structure with a return and a bind satisfying three laws. + Since we cannot use generic type constructors in HOL, + we formulate monads using only one type 'a and its monadic representation 'am. +*} + +locale Monad = +fixes returm :: "'a \ 'am" + and bind :: "'am \ ('a \ 'am) \ 'am" (infixr ">>=" 70) +assumes left_unit : "(returm x) >>= f = f x" + and right_unit: "m >>= returm = m" + and assoc : "(m >>= f) >>= g = m >>= (\ x . f x >>= g)" +begin + +definition pipe :: "'am \ 'am \ 'am" +where "pipe f g \ bind f (\ _. g)" + +lemma pipe_left_unit : "pipe (returm x) f = f" +by(simp add: left_unit pipe_def) + + +lemma pipe_right_unit : "pipe f (returm x) = f" +oops (* Not possible to formulate in this Monad Framework generically ! ! ! + In the concrete SE-Monad, we have the (interesting) special case: + "(m;- (return ())) = m" *) + +lemma pipe_assoc : "pipe f (pipe g h) = pipe (pipe f g) h" +by(simp add: assoc pipe_def) + +primrec Mrun :: "('b \ 'a \ 'am) \ 'b list \ 'a \ 'am" +where "Mrun m [] = returm" + | "Mrun m (b#bs) = (\ a . Mrun m bs a >>= m b)" + +end + +section{*General Framework for Monad-based Sequence-Test *} +text{* As such, Higher-order Logic as a purely functional specification + formalism has no built-in mechanism for state and state-transitions. + Forms of testing involving state require therefore explicit mechanisms + for their treatment inside the logic; a well-known technique to model + states inside purely functional languages are \emph{monads} made popular + by Wadler and Moggi and extensively used in Haskell. \HOL is powerful + enough to represent the most important \emph{instances} of standard monads; + however, it is not possible to represent monads as such due to well-known + limitations of the Hindley-Milner type-system (no type-constructor-classes). *} + +text{* Here is a variant for state-exception monads, that models precisely + transition functions with preconditions. Next, we declare the + state-backtrack-monad. In all of them, our concept of i/o stepping + functions can be formulated; these are functions mapping input + to a given monad. Later on, we will build the usual concepts of: + \begin{enumerate} + \item deterministic i/o automata, + \item non-deterministic i/o automata, and + \item labelled transition systems (LTS) + \end{enumerate} +*} + +subsection{* Definition : Standard State Exception Monads *} + +subsubsection{* Definition : Core Types and Operators *} + +type_synonym ('o, '\) MON\<^sub>S\<^sub>E = "'\ \ ('o \ '\)" (* = '\ \ ('o \ '\)option *) + +definition bind_SE :: "('o,'\)MON\<^sub>S\<^sub>E \ ('o \ ('o','\)MON\<^sub>S\<^sub>E) \ ('o','\)MON\<^sub>S\<^sub>E" +where "bind_SE f g = (\\. case f \ of None \ None + | Some (out, \') \ g out \')" + +notation bind_SE ("bind\<^sub>S\<^sub>E") + +syntax (xsymbols) + "_bind_SE" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>E,('o','\)MON\<^sub>S\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>E" + ("(2 _ \ _; _)" [5,8,8]8) +translations + "x \ f; g" == "CONST bind_SE f (% x . g)" + +definition unit_SE :: "'o \ ('o, '\)MON\<^sub>S\<^sub>E" ("(return _)" 8) +where "unit_SE e = (\\. Some(e,\))" +notation unit_SE ("unit\<^sub>S\<^sub>E") + +text{* In the following, we prove the required Monad-laws *} + +lemma bind_right_unit[simp]: "(x \ m; return x) = m" + apply (simp add: unit_SE_def bind_SE_def) + apply (rule ext) + apply (case_tac "m \", simp_all) + done + +lemma bind_left_unit [simp]: "(x \ return c; P x) = P c" + by (simp add: unit_SE_def bind_SE_def) + +lemma bind_assoc[simp]: "(y \ (x \ m; k x); h y) = (x \ m; (y \ k x; h y))" + apply (simp add: unit_SE_def bind_SE_def, rule ext) + apply (case_tac "m \", simp_all) + apply (case_tac "a", simp_all) + done + +interpretation SE : Monad unit_SE bind_SE + by unfold_locales (simp_all) + + +subsubsection{* Definition : More Operators and their Properties *} + +definition fail_SE :: "('o, '\)MON\<^sub>S\<^sub>E" +where "fail_SE = (\\. None)" +notation fail_SE ("fail\<^sub>S\<^sub>E") + +definition assert_SE :: "('\ \ bool) \ (bool, '\)MON\<^sub>S\<^sub>E" +where "assert_SE P = (\\. if P \ then Some(True,\) else None)" +notation assert_SE ("assert\<^sub>S\<^sub>E") + +definition assume_SE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>E" +where "assume_SE P = (\\. if \\ . P \ then Some((), SOME \ . P \) else None)" +notation assume_SE ("assume\<^sub>S\<^sub>E") + + +lemma bind_left_fail_SE[simp] : "(x \ fail\<^sub>S\<^sub>E; P x) = fail\<^sub>S\<^sub>E" + by (simp add: fail_SE_def bind_SE_def) + + +text{* We also provide a "Pipe-free" - variant of the bind operator. +Just a "standard" programming sequential operator without output frills. *} +(* TODO: Eliminate/Modify this. Is a consequence of the Monad-Instantiation. *) + + +definition bind_SE' :: "[('\, '\)MON\<^sub>S\<^sub>E, ('\, '\)MON\<^sub>S\<^sub>E] \ ('\, '\)MON\<^sub>S\<^sub>E" (infixr ";-" 60) +where "f ;- g = (_ \ f ; g)" + +lemma bind_assoc'[simp]: "((m;- k);- h ) = (m;- (k;- h))" +by(simp add:bind_SE'_def) + + +lemma bind_left_unit' [simp]: "((return c);- P) = P" + by (simp add: bind_SE'_def) + + +lemma bind_left_fail_SE'[simp]: "(fail\<^sub>S\<^sub>E;- P) = fail\<^sub>S\<^sub>E" + by (simp add: bind_SE'_def) + +lemma bind_right_unit'[simp]: "(m;- (return ())) = m" + by (simp add: bind_SE'_def) + +text{* The bind-operator in the state-exception monad yields already + a semantics for the concept of an input sequence on the meta-level: *} +lemma syntax_test: "(o1 \ f1 ; o2 \ f2; return (post o1 o2)) = X" +oops + +definition if_SE :: "['\ \ bool, ('\, '\)MON\<^sub>S\<^sub>E, ('\, '\)MON\<^sub>S\<^sub>E] \ ('\, '\)MON\<^sub>S\<^sub>E" +where "if_SE c E F = (\\. if c \ then E \ else F \)" + +syntax (xsymbols) + "_if_SE" :: "['\ \ bool,('o,'\)MON\<^sub>S\<^sub>E,('o','\)MON\<^sub>S\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>E" + ("(if\<^sub>S\<^sub>E _ then _ else _fi)" [5,8,8]8) +translations + "(if\<^sub>S\<^sub>E cond then T1 else T2 fi)" == "CONST if_SE cond T1 T2" + + +subsubsection{* Theory of a Monadic While *} + +text{* Prerequisites *} +fun replicator :: "[('a, '\)MON\<^sub>S\<^sub>E, nat] \ (unit, '\)MON\<^sub>S\<^sub>E" (infixr "^^^" 60) +where "f ^^^ 0 = (return ())" + | "f ^^^ (Suc n) = (f ;- f ^^^ n)" + + +fun replicator2 :: "[('a, '\)MON\<^sub>S\<^sub>E, nat, ('b, '\)MON\<^sub>S\<^sub>E] \ ('b, '\)MON\<^sub>S\<^sub>E" (infixr "^:^" 60) +where "(f ^:^ 0) M = (M )" + | "(f ^:^ (Suc n)) M = (f ;- ((f ^:^ n) M))" + + +text{* First Step : Establishing an embedding between partial functions and relations *} +(* plongement *) +definition Mon2Rel :: "(unit, '\)MON\<^sub>S\<^sub>E \ ('\ \ '\) set" +where "Mon2Rel f = {(x, y). (f x = Some((), y))}" +(* ressortir *) +definition Rel2Mon :: " ('\ \ '\) set \ (unit, '\)MON\<^sub>S\<^sub>E " +where "Rel2Mon S = (\ \. if \\'. (\, \') \ S then Some((), SOME \'. (\, \') \ S) else None)" + +lemma Mon2Rel_Rel2Mon_id: assumes det:"single_valued R" shows "(Mon2Rel \ Rel2Mon) R = R" +apply (simp add: comp_def Mon2Rel_def Rel2Mon_def,auto) +apply (case_tac "\\'. (a, \') \ R", auto) +apply (subst (2) some_eq_ex) +using det[simplified single_valued_def] by auto + + +lemma Rel2Mon_Id: "(Rel2Mon \ Mon2Rel) x = x" +apply (rule ext) +apply (auto simp: comp_def Mon2Rel_def Rel2Mon_def) +apply (erule contrapos_pp, drule HOL.not_sym, simp) +done + +lemma single_valued_Mon2Rel: "single_valued (Mon2Rel B)" +by (auto simp: single_valued_def Mon2Rel_def) + +text{* Second Step : Proving an induction principle allowing to establish that lfp remains + deterministic *} + + +(* A little complete partial order theory due to Tobias Nipkow *) +definition chain :: "(nat \ 'a set) \ bool" +where "chain S = (\i. S i \ S(Suc i))" + +lemma chain_total: "chain S ==> S i \ S j \ S j \ S i" +by (metis chain_def le_cases lift_Suc_mono_le) + +definition cont :: "('a set => 'b set) => bool" +where "cont f = (\S. chain S \ f(UN n. S n) = (UN n. f(S n)))" + +lemma mono_if_cont: fixes f :: "'a set \ 'b set" + assumes "cont f" shows "mono f" +proof + fix a b :: "'a set" assume "a \ b" + let ?S = "\n::nat. if n=0 then a else b" + have "chain ?S" using `a \ b` by(auto simp: chain_def) + hence "f(UN n. ?S n) = (UN n. f(?S n))" + using assms by(simp add: cont_def) + moreover have "(UN n. ?S n) = b" using `a \ b` by (auto split: if_splits) + moreover have "(UN n. f(?S n)) = f a \ f b" by (auto split: if_splits) + ultimately show "f a \ f b" by (metis Un_upper1) +qed + +lemma chain_iterates: fixes f :: "'a set \ 'a set" + assumes "mono f" shows "chain(\n. (f^^n) {})" +proof- + { fix n have "(f ^^ n) {} \ (f ^^ Suc n) {}" using assms + by(induction n) (auto simp: mono_def) } + thus ?thesis by(auto simp: chain_def) +qed + +theorem lfp_if_cont: + assumes "cont f" shows "lfp f = (\n. (f ^^ n) {})" (is "_ = ?U") +proof + show "lfp f \ ?U" + proof (rule lfp_lowerbound) + have "f ?U = (UN n. (f^^Suc n){})" + using chain_iterates[OF mono_if_cont[OF assms]] assms + by(simp add: cont_def) + also have "\ = (f^^0){} \ \" by simp + also have "\ = ?U" + apply(auto simp del: funpow.simps) + by (metis empty_iff funpow_0 old.nat.exhaust) + finally show "f ?U \ ?U" by simp + qed +next + { fix n p assume "f p \ p" + have "(f^^n){} \ p" + proof(induction n) + case 0 show ?case by simp + next + case Suc + from monoD[OF mono_if_cont[OF assms] Suc] `f p \ p` + show ?case by simp + qed + } + thus "?U \ lfp f" by(auto simp: lfp_def) +qed + + +lemma single_valued_UN_chain: + assumes "chain S" "(!!n. single_valued (S n))" + shows "single_valued(UN n. S n)" +proof(auto simp: single_valued_def) + fix m n x y z assume "(x, y) \ S m" "(x, z) \ S n" + with chain_total[OF assms(1), of m n] assms(2) + show "y = z" by (auto simp: single_valued_def) +qed + +lemma single_valued_lfp: +fixes f :: "('a \ 'a) set \ ('a \ 'a) set" +assumes "cont f" "\r. single_valued r \ single_valued (f r)" +shows "single_valued(lfp f)" +unfolding lfp_if_cont[OF assms(1)] +proof(rule single_valued_UN_chain[OF chain_iterates[OF mono_if_cont[OF assms(1)]]]) + fix n show "single_valued ((f ^^ n) {})" + by(induction n)(auto simp: assms(2)) +qed + + +text{* Third Step: Definition of the Monadic While *} +definition \ :: "['\ \ bool,('\ \ '\) set] \ (('\ \ '\) set \ ('\ \ '\) set)" +where "\ b cd = (\cw. {(s,t). if b s then (s, t) \ cd O cw else s = t})" + + +definition while_SE :: "['\ \ bool, (unit, '\)MON\<^sub>S\<^sub>E] \ (unit, '\)MON\<^sub>S\<^sub>E" +where "while_SE c B \ (Rel2Mon(lfp(\ c (Mon2Rel B))))" + +syntax (xsymbols) + "_while_SE" :: "['\ \ bool, (unit, '\)MON\<^sub>S\<^sub>E] \ (unit, '\)MON\<^sub>S\<^sub>E" + ("(while\<^sub>S\<^sub>E _ do _ od)" [8,8]8) +translations + "while\<^sub>S\<^sub>E c do b od" == "CONST while_SE c b" + +lemma cont_\: "cont (\ c b)" +by (auto simp: cont_def \_def) + +text{* The fixpoint theory now allows us to establish that the lfp constructed over + @{term Mon2Rel} remains deterministic *} + +theorem single_valued_lfp_Mon2Rel: "single_valued (lfp(\ c (Mon2Rel B)))" +apply(rule single_valued_lfp, simp_all add: cont_\) +apply(auto simp: \_def single_valued_def) +apply(metis single_valued_Mon2Rel[of "B"] single_valued_def) +done + + +lemma Rel2Mon_if: + "Rel2Mon {(s, t). if b s then (s, t) \ Mon2Rel c O lfp (\ b (Mon2Rel c)) else s = t} \ = + (if b \ then Rel2Mon (Mon2Rel c O lfp (\ b (Mon2Rel c))) \ else Some ((), \))" +by (simp add: Rel2Mon_def) + +lemma Rel2Mon_homomorphism: + assumes determ_X: "single_valued X" and determ_Y: "single_valued Y" + shows "Rel2Mon (X O Y) = (Rel2Mon X) ;- (Rel2Mon Y)" +proof - + have relational_partial_next_in_O: "\x E F. (\y. (x, y) \ (E O F)) \ (\y. (x, y) \ E)" + by (auto) + have some_eq_intro: "\ X x y . single_valued X \ (x, y) \ X \ (SOME y. (x, y) \ X) = y" + by (auto simp: single_valued_def) + + show ?thesis +apply (simp add: Rel2Mon_def bind_SE'_def bind_SE_def) +apply (rule ext, rename_tac "\") +apply (case_tac " \ \'. (\, \') \ X O Y") +apply (simp only: HOL.if_True) +apply (frule relational_partial_next_in_O) +apply (auto simp: single_valued_relcomp some_eq_intro determ_X determ_Y relcomp.relcompI) +by blast +qed + + + +text{* Putting everything together, the theory of embedding and the invariance of + determinism of the while-body, gives us the usual unfold-theorem: *} +theorem while_SE_unfold: +"(while\<^sub>S\<^sub>E b do c od) = (if\<^sub>S\<^sub>E b then (c ;- (while\<^sub>S\<^sub>E b do c od)) else return () fi)" +apply (simp add: if_SE_def bind_SE'_def while_SE_def unit_SE_def) +apply (subst lfp_unfold [OF mono_if_cont, OF cont_\]) +apply (rule ext) +apply (subst \_def) +apply (auto simp: Rel2Mon_if Rel2Mon_homomorphism bind_SE'_def Rel2Mon_Id [simplified comp_def] + single_valued_Mon2Rel single_valued_lfp_Mon2Rel ) +done + + +subsection{* Chaining Executions : Definitions of Multi-bind Operators *} + +text{* In order to express execution sequences inside \HOL --- rather +than arguing over a certain pattern of terms on the meta-level --- and +in order to make our theory amenable to formal reasoning over execution sequences, +we represent them as lists of input and generalize the bind-operator +of the state-exception monad accordingly. The approach is straightforward, +but comes with a price: we have to encapsulate all input and output data +into one type, and restrict ourselves to a uniform step function. +Assume that we have a typed interface to a module with +the operations $op_1$, $op_2$, \ldots, $op_n$ with the inputs +$\iota_1$, $\iota_2$, \ldots, $\iota_n$ (outputs are treated analogously). +Then we can encode for this interface the general input - type: +\begin{displaymath} +\texttt{datatype}\ \texttt{in}\ =\ op_1\ ::\ \iota_1\ |\ ...\ |\ \iota_n +\end{displaymath} +Obviously, we loose some type-safety in this approach; we have to express +that in traces only \emph{corresponding} input and output belonging to the +same operation will occur; this form of side-conditions have to be expressed +inside \HOL. From the user perspective, this will not make much difference, +since junk-data resulting from too weak typing can be ruled out by adopted +front-ends. +*} + +text{* Note that the subsequent notion of a test-sequence allows the io stepping +function (and the special case of a program under test) to stop execution +\emph{within} the sequence; such premature terminations are characterized by an +output list which is shorter than the input list. + +Intuitively, @{text mbind} corresponds to a sequence of operation calls, separated by +";", in Java. The operation calls may fail (raising an exception), which means that +the state is maintained and the exception can still be caught at the end of the +execution sequence. + +*} + +fun mbind :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" +where "mbind [] iostep \ = Some([], \)" + | "mbind (a#S) iostep \ = + (case iostep a \ of + None \ Some([], \) + | Some (out, \') \ (case mbind S iostep \' of + None \ Some([out],\') + | Some(outs,\'') \ Some(out#outs,\'')))" + +notation mbind ("mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e") (* future name: mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e *) + +text{* This definition is fail-safe; in case of an exception, the current state is maintained, + the computation as a whole is marked as success. + Compare to the fail-strict variant @{text "mbind'"}: *} + +lemma mbind_unit [simp]: + "mbind [] f = (return [])" + by(rule ext, simp add: unit_SE_def) + +text{* The characteristic property of @{term mbind} --- which distinguishes it from + @{text mbind} defined in the sequel --- is that it never fails; it ``swallows'' internal + errors occuring during the computation. *} +lemma mbind_nofailure [simp]: + "mbind S f \ \ None" + apply(rule_tac x=\ in spec) + apply(induct S, auto simp:unit_SE_def) + apply(case_tac "f a x", auto) + apply(erule_tac x="b" in allE) + apply(erule exE, erule exE, simp) + done + +text{* In contrast, we define a fail-strict sequential execution operator. +He has more the characteristic to fail globally whenever one of its operation +steps fails. + +Intuitively speaking, @{text mbind'} corresponds to an execution of operations +where a results in a System-Halt. Another interpretation of @{text mbind'} is to +view it as a kind of @{term foldl} foldl over lists via @{term bind\<^sub>S\<^sub>E}.*} + +fun mbind' :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" +where "mbind' [] iostep \ = Some([], \)" | + "mbind' (a#S) iostep \ = + (case iostep a \ of + None \ None + | Some (out, \') \ (case mbind' S iostep \' of + None \ None (* fail-strict *) + | Some(outs,\'') \ Some(out#outs,\'')))" +notation mbind' ("mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p") (* future name: mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p *) + +lemma mbind'_unit [simp]: + "mbind' [] f = (return [])" + by(rule ext, simp add: unit_SE_def) + +lemma mbind'_bind [simp]: + "(x \ mbind' (a#S) F; M x) = (a \ (F a); (x \ mbind' S F; M (a # x)))" + by(rule ext, rename_tac "z",simp add: bind_SE_def split: option.split) + +declare mbind'.simps[simp del] (* use only more abstract definitions *) + +text{* The next @{text mbind} sequential execution operator is called +Fail-Purge. He has more the characteristic to never fail, just "stuttering" +above operation steps that fail. Another alternative in modeling. *} + +fun mbind'' :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" +where "mbind'' [] iostep \ = Some([], \)" | + "mbind'' (a#S) iostep \ = + (case iostep a \ of + None \ mbind'' S iostep \ + | Some (out, \') \ (case mbind'' S iostep \' of + None \ None (* does not occur *) + | Some(outs,\'') \ Some(out#outs,\'')))" + +notation mbind'' ("mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e") (* future name: mbind\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e\<^sub>F\<^sub>a\<^sub>i\<^sub>l *) +declare mbind''.simps[simp del] (* use only more abstract definitions *) + + +text{* mbind' as failure strict operator can be seen as a foldr on bind - + if the types would match \ldots *} + +subsubsection{* Definition : Miscellaneous Operators and their Properties *} + +definition try_SE :: "('o,'\) MON\<^sub>S\<^sub>E \ ('o option,'\) MON\<^sub>S\<^sub>E" ("try\<^sub>S\<^sub>E") +where "try\<^sub>S\<^sub>E ioprog = (\\. case ioprog \ of + None \ Some(None, \) + | Some(outs, \') \ Some(Some outs, \'))" +text{* In contrast, mbind as a failure safe operator can roughly be seen + as a foldr on bind - try: + m1 ; try m2 ; try m3; ... Note, that the rough equivalence only holds for + certain predicates in the sequence - length equivalence modulo None, + for example. However, if a conditional is added, the equivalence + can be made precise: *} + +lemma mbind_try: + "(x \ mbind (a#S) F; M x) = + (a' \ try\<^sub>S\<^sub>E(F a); + if a' = None + then (M []) + else (x \ mbind S F; M (the a' # x)))" +apply(rule ext) +apply(simp add: bind_SE_def try_SE_def) +apply(case_tac "F a x", auto) +apply(simp add: bind_SE_def try_SE_def) +apply(case_tac "mbind S F b", auto) +done + +text{* On this basis, a symbolic evaluation scheme can be established + that reduces mbind-code to try\_SE\_code and ite-cascades. *} + +definition alt_SE :: "[('o, '\)MON\<^sub>S\<^sub>E, ('o, '\)MON\<^sub>S\<^sub>E] \ ('o, '\)MON\<^sub>S\<^sub>E" (infixl "\\<^sub>S\<^sub>E" 10) +where "(f \\<^sub>S\<^sub>E g) = (\ \. case f \ of None \ g \ + | Some H \ Some H)" + +definition malt_SE :: "('o, '\)MON\<^sub>S\<^sub>E list \ ('o, '\)MON\<^sub>S\<^sub>E" +where "malt_SE S = foldr alt_SE S fail\<^sub>S\<^sub>E" +notation malt_SE ("\\<^sub>S\<^sub>E") + +lemma malt_SE_mt [simp]: "\\<^sub>S\<^sub>E [] = fail\<^sub>S\<^sub>E" +by(simp add: malt_SE_def) + +lemma malt_SE_cons [simp]: "\\<^sub>S\<^sub>E (a # S) = (a \\<^sub>S\<^sub>E (\\<^sub>S\<^sub>E S))" +by(simp add: malt_SE_def) + + + + +subsection{* Definition and Properties of Valid Execution Sequences*} + +text{* A key-notion in our framework is the \emph{valid} execution +sequence, \ie{} a sequence that: +\begin{enumerate} +\item terminates (not obvious since while), +\item results in a final @{term True}, +\item does not fail globally (but recall the FailSave and FailPurge + variants of @{term mbind}-operators, that handle local exceptions in + one or another way). +\end{enumerate} +Seen from an automata perspective (where the monad - operations correspond to +the step function), valid execution sequences can be used to model ``feasible paths'' +across an automaton. *} + +definition valid_SE :: "'\ \ (bool,'\) MON\<^sub>S\<^sub>E \ bool" (infix "\" 15) +where "(\ \ m) = (m \ \ None \ fst(the (m \)))" +text{* This notation consideres failures as valid -- a definition +inspired by I/O conformance. *} + +subsubsection{* Valid Execution Sequences and their Symbolic Execution *} +lemma exec_unit_SE [simp]: "(\ \ (return P)) = (P)" +by(auto simp: valid_SE_def unit_SE_def) + +lemma exec_unit_SE' [simp]: "(\\<^sub>0 \ (\\. Some (f \, \))) = (f \\<^sub>0)" +by(simp add: valid_SE_def ) + +lemma exec_fail_SE [simp]: "(\ \ fail\<^sub>S\<^sub>E) = False" +by(auto simp: valid_SE_def fail_SE_def) + +lemma exec_fail_SE'[simp]: "\(\\<^sub>0 \ (\\. None))" +by(simp add: valid_SE_def ) + +text{* The following the rules are in a sense the heart of the entire symbolic execution approach *} +lemma exec_bind_SE_failure: +"A \ = None \ \(\ \ ((s \ A ; M s)))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def) + +lemma exec_bind_SE_success: +"A \ = Some(b,\') \ (\ \ ((s \ A ; M s))) = (\' \ (M b))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def ) + +lemma exec_bind_SE_success': (* atomic boolean Monad "Query Functions" *) +"M \ = Some(f \,\) \ (\ \ M) = f \" +by(simp add: valid_SE_def unit_SE_def bind_SE_def ) + + + + +lemma exec_bind_SE_success'': +"\ \ ((s \ A ; M s)) \ \ v \'. the(A \) = (v,\') \ \' \ (M v)" +apply(auto simp: valid_SE_def unit_SE_def bind_SE_def) +apply(cases "A \", simp_all) +apply(drule_tac x="A \" and f=the in arg_cong, simp) +apply(rule_tac x="fst aa" in exI) +apply(rule_tac x="snd aa" in exI, auto) +done + + +lemma exec_bind_SE_success''': +"\ \ ((s \ A ; M s)) \ \ a. (A \) = Some a \ (snd a) \ (M (fst a))" +apply(auto simp: valid_SE_def unit_SE_def bind_SE_def) +apply(cases "A \", simp_all) +apply(drule_tac x="A \" and f=the in arg_cong, simp) +apply(rule_tac x="fst aa" in exI) +apply(rule_tac x="snd aa" in exI, auto) +done + + +lemma exec_bind_SE_success'''' : +"\ \ ((s \ A ; M s)) \ \ v \'. A \ = Some(v,\') \ \' \ (M v)" +apply(auto simp: valid_SE_def unit_SE_def bind_SE_def) +apply(cases "A \", simp_all) +apply(drule_tac x="A \" and f=the in arg_cong, simp) +apply(rule_tac x="fst aa" in exI) +apply(rule_tac x="snd aa" in exI, auto) +done + + + + +text{* Recall \verb+mbind_unit+ for the base case. *} + +lemma valid_mbind_mt : "(\ \ ( s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e [] f; unit\<^sub>S\<^sub>E (P s))) = P [] " by simp +lemma valid_mbind_mtE: "\ \ ( s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e [] f; unit\<^sub>S\<^sub>E (P s)) \ (P [] \ Q) \ Q" +by(auto simp: valid_mbind_mt) + +lemma valid_mbind'_mt : "(\ \ ( s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p [] f; unit\<^sub>S\<^sub>E (P s))) = P [] " by simp +lemma valid_mbind'_mtE: "\ \ ( s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p [] f; unit\<^sub>S\<^sub>E (P s)) \ (P [] \ Q) \ Q" +by(auto simp: valid_mbind'_mt) + +lemma valid_mbind''_mt : "(\ \ ( s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e [] f; unit\<^sub>S\<^sub>E (P s))) = P [] " +by(simp add: mbind''.simps valid_SE_def bind_SE_def unit_SE_def) +lemma valid_mbind''_mtE: "\ \ ( s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e [] f; unit\<^sub>S\<^sub>E (P s)) \ (P [] \ Q) \ Q" +by(auto simp: valid_mbind''_mt) + + +lemma exec_mbindFSave_failure: +"ioprog a \ = None \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; M s)) = (\ \ (M []))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def) + +lemma exec_mbindFStop_failure: +"ioprog a \ = None \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (a#S) ioprog ; M s)) = (False)" +by(simp add: exec_bind_SE_failure) + +lemma exec_mbindFPurge_failure: +"ioprog a \ = None \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (a#S) ioprog ; M s)) = (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (S) ioprog ; M s))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def mbind''.simps) + + +lemma exec_mbindFSave_success : +"ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; M s)) = + (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog ; M (b#s)))" +unfolding valid_SE_def unit_SE_def bind_SE_def +by(cases "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog \'", auto) + +lemma exec_mbindFStop_success : +"ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (a#S) ioprog ; M s)) = + (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S ioprog ; M (b#s)))" +unfolding valid_SE_def unit_SE_def bind_SE_def +by(cases "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S ioprog \'", auto simp: mbind'.simps) + +lemma exec_mbindFPurge_success : +"ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (a#S) ioprog ; M s)) = + (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog ; M (b#s)))" +unfolding valid_SE_def unit_SE_def bind_SE_def +by(cases "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog \'", auto simp: mbind''.simps) + +lemma exec_mbindFSave: +"(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; return (P s))) = + (case ioprog a \ of + None \ (\ \ (return (P []))) + | Some(b,\') \ (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog ; return (P (b#s)))))" +apply(case_tac "ioprog a \") +apply(auto simp: exec_mbindFSave_failure exec_mbindFSave_success split: prod.splits) +done + +lemma mbind_eq_sexec: +assumes * : "\b \'. f a \ = Some(b,\') \ + (os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S f; P (b#os)) = (os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S f; P' (b#os))" +shows "( a \ f a; x \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S f; P (a # x)) \ = + ( a \ f a; x \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S f; P'(a # x)) \" + apply(cases "f a \ = None") + apply(subst bind_SE_def, simp) + apply(subst bind_SE_def, simp) + apply auto + apply(subst bind_SE_def, simp) + apply(subst bind_SE_def, simp) +apply(simp add: *) +done + + +lemma mbind_eq_sexec': +assumes * : "\b \'. f a \ = Some(b,\') \ + (P (b))\' = (P' (b))\'" +shows "( a \ f a; P (a)) \ = + ( a \ f a; P'(a)) \" + apply(cases "f a \ = None") + apply(subst bind_SE_def, simp) + apply(subst bind_SE_def, simp) + apply auto + apply(subst bind_SE_def, simp) + apply(subst bind_SE_def, simp) + apply(simp add: *) +done + +lemma mbind'_concat: +"(os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (S@T) f; P os) = (os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S f; os' \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p T f; P (os @ os'))" +proof (rule ext, rename_tac "\", induct S arbitrary: \ P) + case Nil show ?case by simp +next + case (Cons a S) show ?case + apply(insert Cons.hyps, simp) + by(rule mbind_eq_sexec',simp) +qed + +lemma assert_suffix_inv : + "\ \ ( _ \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p xs istep; assert\<^sub>S\<^sub>E (P)) + \ \\. P \ \ (\ \ (_ \ istep x; assert\<^sub>S\<^sub>E (P))) + \ \ \ ( _ \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (xs @ [x]) istep; assert\<^sub>S\<^sub>E (P))" +apply(subst mbind'_concat, simp) +unfolding bind_SE_def assert_SE_def valid_SE_def +apply(auto split: option.split option.split_asm) +apply(case_tac "aa",simp_all) +apply(case_tac "P bb",simp_all) +apply (metis option.distinct(1)) +apply(case_tac "aa",simp_all) +apply(case_tac "P bb",simp_all) +by (metis option.distinct(1)) + + + +text{* Universal splitting and symbolic execution rule *} +lemma exec_mbindFSave_E: +assumes seq : "(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; (P s)))" + and none: "ioprog a \ = None \ (\ \ (P [])) \ Q" + and some: "\ b \'. ioprog a \ = Some(b,\') \ (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog;(P (b#s)))) \ Q " +shows "Q" +using seq +proof(cases "ioprog a \") + case None assume ass:"ioprog a \ = None" show "Q" + apply(rule none[OF ass]) + apply(insert ass, erule_tac ioprog1=ioprog in exec_mbindFSave_failure[THEN iffD1],rule seq) + done +next + case (Some aa) assume ass:"ioprog a \ = Some aa" show "Q" + apply(insert ass,cases "aa",simp, rename_tac "out" "\'") + apply(erule some) + apply(insert ass,simp) + apply(erule_tac ioprog1=ioprog in exec_mbindFSave_success[THEN iffD1],rule seq) + done +qed + +text{* The next rule reveals the particular interest in deduction; + as an elimination rule, it allows for a linear conversion of a validity judgement + @{term "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p"} over an input list @{term "S"} into a constraint system; without any + branching ... Symbolic execution can even be stopped tactically whenever + @{term "ioprog a \ = Some(b,\')"} comes to a contradiction. *} +lemma exec_mbindFStop_E: +assumes seq : "(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (a#S) ioprog ; (P s)))" + and some: "\b \'. ioprog a \ = Some(b,\') \ (\'\ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S ioprog;(P(b#s)))) \ Q" +shows "Q" +using seq +proof(cases "ioprog a \") + case None assume ass:"ioprog a \ = None" show "Q" + apply(insert ass seq) + apply(drule_tac \=\ and S=S and M=P in exec_mbindFStop_failure, simp) + done +next + case (Some aa) assume ass:"ioprog a \ = Some aa" show "Q" + apply(insert ass,cases "aa",simp, rename_tac "out" "\'") + apply(erule some) + apply(insert ass,simp) + apply(erule_tac ioprog1=ioprog in exec_mbindFStop_success[THEN iffD1],rule seq) + done +qed + + +lemma exec_mbindFPurge_E: +assumes seq : "(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (a#S) ioprog ; (P s)))" + and none: "ioprog a \ = None \ (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog;(P (s)))) \ Q" + and some: "\ b \'. ioprog a \ = Some(b,\') \ (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog;(P (b#s)))) \ Q " +shows "Q" +using seq +proof(cases "ioprog a \") + case None assume ass:"ioprog a \ = None" show "Q" + apply(rule none[OF ass]) + apply(insert ass, erule_tac ioprog1=ioprog in exec_mbindFPurge_failure[THEN iffD1],rule seq) + done +next + case (Some aa) assume ass:"ioprog a \ = Some aa" show "Q" + apply(insert ass,cases "aa",simp, rename_tac "out" "\'") + apply(erule some) + apply(insert ass,simp) + apply(erule_tac ioprog1=ioprog in exec_mbindFPurge_success[THEN iffD1],rule seq) + done +qed + + +lemma assert_disch1 :" P \ \ (\ \ (x \ assert\<^sub>S\<^sub>E P; M x)) = (\ \ (M True))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def) + +lemma assert_disch2 :" \ P \ \ \ (\ \ (x \ assert\<^sub>S\<^sub>E P ; M s))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def) + +lemma assert_disch3 :" \ P \ \ \ (\ \ (assert\<^sub>S\<^sub>E P))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def) + +lemma assert_disch4 :" P \ \ (\ \ (assert\<^sub>S\<^sub>E P))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def) + +lemma assert_simp : "(\ \ assert\<^sub>S\<^sub>E P) = P \" +by (meson assert_disch3 assert_disch4) + +lemmas assert_D = assert_simp[THEN iffD1] (* legacy *) + +lemma assert_bind_simp : "(\ \ (x \ assert\<^sub>S\<^sub>E P; M x)) = (P \ \ (\ \ (M True)))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def split: HOL.if_split_asm) + +lemmas assert_bindD = assert_bind_simp[THEN iffD1] (* legacy *) + + +lemma assume_D : "(\ \ (_ \ assume\<^sub>S\<^sub>E P; M)) \ \ \. (P \ \ (\ \ M) )" +apply(auto simp: bind_SE_def assume_SE_def valid_SE_def split: HOL.if_split_asm) +apply(rule_tac x="Eps P" in exI, auto) +apply(subst Hilbert_Choice.someI,assumption,simp) +done + + +lemma assume_E : +assumes * : "\ \ ( _ \ assume\<^sub>S\<^sub>E P; M) " +and ** : "\ \. P \ \ \ \ M \ Q" +shows "Q" +apply(insert *) +by(insert *[THEN assume_D], auto intro: **) + +lemma assume_E' : +assumes * : "\ \ assume\<^sub>S\<^sub>E P ;- M" +and ** : "\ \. P \ \ \ \ M \ Q" +shows "Q" +by(insert *[simplified "bind_SE'_def", THEN assume_D], auto intro: **) + + +text{* These two rule prove that the SE Monad in connection with the notion of valid sequence +is actually sufficient for a representation of a Boogie-like language. The SBE monad with explicit +sets of states --- to be shown below --- is strictly speaking not necessary (and will therefore +be discontinued in the development). *} + +term "if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi" + +lemma if_SE_D1 : "P \ \ (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = (\ \ B\<^sub>1)" +by(auto simp: if_SE_def valid_SE_def) + +lemma if_SE_D1' : "P \ \ (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi);-M) = (\ \ (B\<^sub>1;-M))" +by(auto simp: if_SE_def valid_SE_def bind_SE'_def bind_SE_def) + + +lemma if_SE_D2 : "\ P \ \ (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = (\ \ B\<^sub>2)" +by(auto simp: if_SE_def valid_SE_def) + +lemma if_SE_D2' : "\ P \ \ (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi);-M) = (\ \ B\<^sub>2;-M)" +by(auto simp: if_SE_def valid_SE_def bind_SE'_def bind_SE_def) + + +lemma if_SE_split_asm : +"(\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = ((P \ \ (\ \ B\<^sub>1)) \ (\ P \ \ (\ \ B\<^sub>2)))" +by(cases "P \",auto simp: if_SE_D1 if_SE_D2) + +lemma if_SE_split_asm': +"(\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi);-M) = ((P \ \ (\ \ B\<^sub>1;-M)) \ (\ P \ \ (\ \ B\<^sub>2;-M)))" +by(cases "P \",auto simp: if_SE_D1' if_SE_D2') + + +lemma if_SE_split: +"(\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = ((P \ \ (\ \ B\<^sub>1)) \ (\ P \ \ (\ \ B\<^sub>2)))" +by(cases "P \", auto simp: if_SE_D1 if_SE_D2) + + +lemma if_SE_split': +"(\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi);-M) = ((P \ \ (\ \ B\<^sub>1;-M)) \ (\ P \ \ (\ \ B\<^sub>2;-M)))" +by(cases "P \", auto simp: if_SE_D1' if_SE_D2') + +lemma if_SE_execE: + assumes A: "\ \ ((if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi))" + and B: "P \ \ \ \ (B\<^sub>1) \ Q" + and C: "\ P \\ \ \ (B\<^sub>2) \ Q" + shows "Q" +by(insert A [simplified if_SE_split],cases "P \", simp_all, auto elim: B C) + + +lemma if_SE_execE': + assumes A: "\ \ ((if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi);-M)" + and B: "P \ \ \ \ (B\<^sub>1;-M) \ Q" + and C: "\ P \\ \ \ (B\<^sub>2;-M) \ Q" + shows "Q" +by(insert A [simplified if_SE_split'],cases "P \", simp_all, auto elim: B C) + + +lemma exec_while : +"(\ \ ((while\<^sub>S\<^sub>E b do c od) ;- M)) = + (\ \ ((if\<^sub>S\<^sub>E b then c ;- (while\<^sub>S\<^sub>E b do c od) else unit\<^sub>S\<^sub>E ()fi) ;- M))" +apply(subst while_SE_unfold) +by(simp add: bind_SE'_def ) + +lemmas exec_whileD = exec_while[THEN iffD1] + +lemma if_SE_execE'': +"\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi) ;- M +\ (P \ \ \ \ B\<^sub>1 ;- M \ Q) +\ (\ P \ \ \ \ B\<^sub>2 ;- M \ Q) +\ Q" +by(auto elim: if_SE_execE') + + +lemma [code]: + "(\ \ m) = (case (m \) of None \ False | (Some (x,y)) \ x)" + apply(simp add: valid_SE_def) + apply(cases "m \ = None", simp_all) + apply(insert not_None_eq, auto) +done + + +text{* Test-Refinements will be stated in terms of the failsave @{term mbind}, opting + more generality. The following lemma allows for an optimization both in + test execution as well as in symbolic execution for an important special case of + the post-codition: Whenever the latter has the constraint that the length of input and + output sequence equal each other (that is to say: no failure occured), failsave mbind + can be reduced to failstop mbind ... *} +lemma mbindFSave_vs_mbindFStop : + "(\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s ioprog); return(length \s = length os \ P \s os))) = + (\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s ioprog); return(P \s os)))" + apply(rule_tac x=P in spec) + apply(rule_tac x=\ in spec) + proof(induct "\s") + case Nil show ?case by(simp_all add: mbind_try try_SE_def del: Monads.mbind.simps) + case (Cons a \s) show ?case + apply(rule allI, rename_tac "\",rule allI, rename_tac "P") + apply(insert Cons.hyps) + apply(case_tac "ioprog a \") + apply(simp only: exec_mbindFSave_failure exec_mbindFStop_failure, simp) + apply(simp add: split_paired_all del: Monads.mbind.simps ) + apply(rename_tac "\'") + apply(subst exec_mbindFSave_success, assumption) + apply(subst (2) exec_bind_SE_success, assumption) + apply(erule_tac x="\'" in allE) + apply(erule_tac x="\\s s. P (a # \s) (aa # s)" in allE) (* heureka ! *) + apply(simp) + done + qed + + +lemma mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e_vs_mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p: +assumes A: "\ \ \. ioprog \ \ \ None" +shows "(\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s ioprog); P os)) = + (\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s ioprog); P os))" +proof(induct "\s") + case Nil show ?case by simp +next + case (Cons a \s) + from Cons.hyps + have B:"\ S f \. mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S f \ \ None " by simp + have C:"\\. mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s ioprog \ = mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s ioprog \" + apply(induct \s, simp) + apply(rule allI,rename_tac "\") + apply(simp add: Monads.mbind'.simps(2)) + apply(insert A, erule_tac x="a" in allE) + apply(erule_tac x="\" and P="\\ . ioprog a \ \ None" in allE) + apply(auto split:option.split) + done + show ?case + apply(insert A,erule_tac x="a" in allE,erule_tac x="\" in allE) + apply(simp, elim exE) + apply(rename_tac "out" "\'") + apply(insert B, erule_tac x=\s in allE, erule_tac x=ioprog in allE, erule_tac x=\' in allE) + apply(subst(asm) not_None_eq, elim exE) + apply(subst Monads.exec_bind_SE_success) + apply(simp split: option.split, auto) + apply(rule_tac s="(\ a b c. a # (fst c)) out \' (aa, b)" in trans, simp,rule refl) + apply(rule_tac s="(\ a b c. (snd c)) out \' (aa, b)" in trans, simp,rule refl) + apply(simp_all) + apply(subst Monads.exec_bind_SE_success, assumption) + apply(subst Monads.exec_bind_SE_success) + apply(rule_tac s="Some (aa, b)" in trans,simp_all add:C) + apply(subst(asm) Monads.exec_bind_SE_success, assumption) + apply(subst(asm) Monads.exec_bind_SE_success) + apply(rule_tac s="Some (aa, b)" in trans,simp_all add:C) + done +qed + + +section{* Valid Test Sequences in the State Exception Backtrack Monad *} +text{* This is still an unstructured merge of executable monad concepts +and specification oriented high-level properties initiating test procedures. *} + + + +section{* Hoare *} +(* STILL VERY EXPERIMENTAL STUFF *) + +definition hoare\<^sub>1 :: "('\ \ bool) \ ('\, '\)MON\<^sub>S\<^sub>E \ ('\ \ '\ \ bool) \ bool" ("\\<^sub>1 ({(1_)}/ (_)/ {(1_)})" 50) +where "(\\<^sub>1{P} M {Q} ) = (\\. \ \ (_ \ assume\<^sub>S\<^sub>E P ; x \ M; assert\<^sub>S\<^sub>E (Q x)))" + + +definition hoare\<^sub>2 :: "('\ \ bool) \ ('\, '\)MON\<^sub>S\<^sub>E \ ('\ \ '\ \ bool) \ bool" ("\\<^sub>2 ({(1_)}/ (_)/ {(1_)})" 50) +where "(\\<^sub>2{P} M {Q} ) = (\\. P \ \ (\ \ (x \ M; assert\<^sub>S\<^sub>E (Q x))))" + + + +find_theorems "assert\<^sub>S\<^sub>E" + +find_theorems "assume\<^sub>S\<^sub>E" + +lemma "P \ \ (_ \ assume\<^sub>S\<^sub>E P ; x \ M; assert\<^sub>S\<^sub>E (\\. (x=X) \ Q x \))" +oops + +lemma "\\. \ X. \ \ (_ \ assume\<^sub>S\<^sub>E P ; x \ M; assert\<^sub>S\<^sub>E (\\. x=X \ Q x \))" +oops + + +lemma "\ X. \ \ (_ \ assume\<^sub>S\<^sub>E P ; x \ M; assert\<^sub>S\<^sub>E (\\. x=X \ Q x \)) + \ + \ \ (_ \ assume\<^sub>S\<^sub>E P ; x \ M; assert\<^sub>S\<^sub>E (\\. Q x \))" +unfolding valid_SE_def assume_SE_def assert_SE_def bind_SE_def +by(auto split: if_split HOL.if_split_asm Option.option.split Option.option.split_asm) + + +lemma monadic_sequence_rule: + "\ X \\<^sub>1. (\ \ (_ \ assume\<^sub>S\<^sub>E (\\'. (\=\') \ P \) ; x \ M; assert\<^sub>S\<^sub>E (\\. (x=X) \ (\=\\<^sub>1) \ Q x \))) + \ + (\\<^sub>1 \ (_ \ assume\<^sub>S\<^sub>E (\\. (\=\\<^sub>1) \ Q x \) ; y \ M'; assert\<^sub>S\<^sub>E (\\. R x y \))) + \ + \ \ (_ \ assume\<^sub>S\<^sub>E (\\'. (\=\') \ P \) ; x \ M; y \ M'; assert\<^sub>S\<^sub>E (R x y))" +apply(elim exE impE conjE) +apply(drule Monads.assume_D) +apply(elim exE impE conjE) +unfolding valid_SE_def assume_SE_def assert_SE_def bind_SE_def +apply(auto split: if_split HOL.if_split_asm Option.option.split Option.option.split_asm) +apply (metis (mono_tags, lifting) option.simps(3) someI_ex) +oops + + + +subsection{* Legacy Bindings *} + + +lemma valid_true[simp]: (* legacy: special case *) + "(\ \ (s \ return x ; return (P s))) = P x" by simp + + +(* +lemmas valid_both = exec_mbindFSave (* legacy *) +lemmas valid_success = exec_mbindFSave_success (* legacy *) +lemmas valid_success'' = exec_mbindFSave_success(* legacy *) +lemmas valid_success' = exec_bind_SE_success (* legacy *) +lemmas valid_failure = exec_mbindFSave_failure (* legacy : *) +lemmas valid_failure' = exec_bind_SE_failure (* legacy *) +lemmas valid_failure''=valid_failure (* legacy : *) +lemmas valid_failure''' = exec_mbindFStop_failure (* legacy : *) +lemmas valid_propagate_fail = exec_fail_SE (* legacy *) +lemmas valid_propagate_fail' = exec_fail_SE' (* legacy *) +lemmas valid_propoagate_3' = valid_propagate_fail' (* legacy *) +lemmas valid_propagate_2 = exec_bind_SE_success''(* legacy *) +lemmas valid_propagate_1 = exec_unit_SE (* legacy *) +lemmas valid_successElem = exec_bind_SE_success' (* legacy *) +lemmas valid_propagate_2' = exec_bind_SE_success'''(* legacy *) +lemmas valid_propagate_2'' = exec_bind_SE_success'''' (* legacy *) +lemmas valid_propoagate_3 = exec_unit_SE' (* legacy *) + *) +(* legacy ?: +lemma valid_success'': +"ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind (a#S) ioprog ; return (P s))) = + (\' \ (s \ mbind S ioprog ; return (P (b#s))))" +unfolding valid_SE_def unit_SE_def bind_SE_def +by(cases "mbind S ioprog \'", auto) +*) + +subsection{* State Backtrack Monads *} +text{*This subsection is still rudimentary and as such an interesting +formal analogue to the previous monad definitions. It is doubtful that it is +interesting for testing and as a cmputational stucture at all. +Clearly more relevant is ``sequence'' instead of ``set,'' which would +rephrase Isabelle's internal tactic concept. *} + +type_synonym ('o, '\) MON\<^sub>S\<^sub>B = "'\ \ ('o \ '\) set" + +definition bind_SB :: "('o, '\)MON\<^sub>S\<^sub>B \ ('o \ ('o', '\)MON\<^sub>S\<^sub>B) \ ('o', '\)MON\<^sub>S\<^sub>B" +where "bind_SB f g \ = \ ((\(out, \). (g out \)) ` (f \))" +notation bind_SB ("bind\<^sub>S\<^sub>B") + +definition unit_SB :: "'o \ ('o, '\)MON\<^sub>S\<^sub>B" ("(returns _)" 8) +where "unit_SB e = (\\. {(e,\)})" +notation unit_SB ("unit\<^sub>S\<^sub>B") + +syntax (xsymbols) + "_bind_SB" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>B,('o','\)MON\<^sub>S\<^sub>B] \ ('o','\)MON\<^sub>S\<^sub>B" + ("(2 _ := _; _)" [5,8,8]8) +translations + "x := f; g" == "CONST bind_SB f (% x . g)" + + + +lemma bind_left_unit_SB : "(x := returns a; m x) = m a" + by (rule ext,simp add: unit_SB_def bind_SB_def) + +lemma bind_right_unit_SB: "(x := m; returns x) = m" + by (rule ext, simp add: unit_SB_def bind_SB_def) + + +lemma bind_assoc_SB: "(y := (x := m; k x); h y) = (x := m; (y := k x; h y))" + by (rule ext, simp add: unit_SB_def bind_SB_def split_def) + + + +subsection{* State Backtrack Exception Monad (vulgo: Boogie-PL) *} +text{* The following combination of the previous two Monad-Constructions +allows for the semantic foundation of a simple generic assertion language +in the style of Schirmers Simpl-Language or Rustan Leino's Boogie-PL language. +The key is to use the exceptional element None for violations of +the assert-statement. *} +type_synonym ('o, '\) MON\<^sub>S\<^sub>B\<^sub>E = "'\ \ (('o \ '\) set) option" + + +definition bind_SBE :: "('o,'\)MON\<^sub>S\<^sub>B\<^sub>E \ ('o \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E) \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E" +where "bind_SBE f g = (\\. case f \ of None \ None + | Some S \ (let S' = (\(out, \'). g out \') ` S + in if None \ S' then None + else Some(\ (the ` S'))))" + +syntax (xsymbols) + "_bind_SBE" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>B\<^sub>E,('o','\)MON\<^sub>S\<^sub>B\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E" + ("(2 _ :\ _; _)" [5,8,8]8) +translations + "x :\ f; g" == "CONST bind_SBE f (% x . g)" + +definition unit_SBE :: "'o \ ('o, '\)MON\<^sub>S\<^sub>B\<^sub>E" ("(returning _)" 8) +where "unit_SBE e = (\\. Some({(e,\)}))" + +definition assert_SBE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" +where "assert_SBE e = (\\. if e \ then Some({((),\)}) + else None)" +notation assert_SBE ("assert\<^sub>S\<^sub>B\<^sub>E") + +definition assume_SBE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" +where "assume_SBE e = (\\. if e \ then Some({((),\)}) + else Some {})" +notation assume_SBE ("assume\<^sub>S\<^sub>B\<^sub>E") + + +definition havoc_SBE :: " (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" +where "havoc_SBE = (\\. Some({x. True}))" +notation havoc_SBE ("havoc\<^sub>S\<^sub>B\<^sub>E") + + +lemma bind_left_unit_SBE : "(x :\ returning a; m x) = m a" + by (rule ext,simp add: unit_SBE_def bind_SBE_def) + +lemma bind_right_unit_SBE: "(x :\ m; returning x) = m" + apply (rule ext, simp add: unit_SBE_def bind_SBE_def) + apply (case_tac "m x", simp_all add:Let_def) + apply (rule HOL.ccontr, simp add: Set.image_iff) + done + + +lemmas aux = trans[OF HOL.neq_commute,OF Option.not_None_eq] + + +lemma bind_assoc_SBE: "(y :\ (x :\ m; k x); h y) = (x :\ m; (y :\ k x; h y))" +proof (rule ext, rename_tac z, simp add: unit_SBE_def bind_SBE_def, + case_tac "m z", simp_all add: Let_def Set.image_iff, safe, goal_cases) print_cases + case (1 z a aa b ab ba aaa baa) then show ?case + using case_prod_conv option.case_eq_if by force +next + case (2 z a aa b ab ba) then show ?case + apply(rule_tac x="(aa,b)" in bexI, simp_all add:split_def) + apply(erule_tac x="(aa,b)" in ballE) + apply(auto simp: aux image_def split_def intro!: rev_bexI) + done +next + case (3 z a aa b) then show ?case + by (metis (no_types, lifting) case_prod_conv option.case_eq_if) +next + case (4 z a aa b) then show ?case + apply(auto simp: aux image_def split_def intro!: rev_bexI) + apply (auto simp add: Let_def split: if_split_asm option.split_asm) + apply fastforce + done +next + case (5 z a aa b ab ba aaa baa) then show ?case + apply simp apply((erule_tac x="(ab,ba)" in ballE)+) + apply(simp_all add: aux Option.not_None_eq, (erule exE)+, simp add:split_def) + apply(erule rev_bexI,case_tac "None\(\p. h (fst p) (snd p))`y",auto simp:split_def) + done + +next + case (6 z a aa b aaa ba) then show ?case + apply simp apply((erule_tac x="(aaa,ba)" in ballE)+) + apply(simp_all add: aux, (erule exE)+, simp add:split_def) + apply(erule rev_bexI, case_tac "None\(\p. h(fst p)(snd p))`y",auto simp:split_def) + done +qed + + +interpretation SBE : Monad unit_SBE bind_SBE + by unfold_locales (simp_all add: bind_left_unit_SBE bind_right_unit_SBE bind_assoc_SBE) + + +definition valid_SBE :: "'\ \ ('a,'\) MON\<^sub>S\<^sub>B\<^sub>E \ bool" (infix "\\<^sub>S\<^sub>B\<^sub>E" 15) +where "\ \\<^sub>S\<^sub>B\<^sub>E m \ (m \ \ None)" +text{* This notation consideres all non-failures as valid. *} + + +lemma assume_assert: "(\ \\<^sub>S\<^sub>B\<^sub>E ( _ :\ assume\<^sub>S\<^sub>B\<^sub>E P ; assert\<^sub>S\<^sub>B\<^sub>E Q)) = (P \ \ Q \)" + by(simp add: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) + +lemma assert_intro: "Q \ \ \ \\<^sub>S\<^sub>B\<^sub>E (assert\<^sub>S\<^sub>B\<^sub>E Q)" + by(simp add: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) + +lemma assume_dest: + "\ \ \\<^sub>S\<^sub>B\<^sub>E (x :\ assume\<^sub>S\<^sub>B\<^sub>E Q; M x); Q \' \ \ \ \\<^sub>S\<^sub>B\<^sub>E M ()" + apply(auto simp: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) + apply(cases "Q \",simp_all) + oops + +text{* This still needs work. What would be needed is a kind + of wp - calculus that comes out of that. So far: nope. *} + +(* TODO: IF THEN ELSE and WHILE + Monadic Laws + Operational Rules. *) + + + + + +end diff --git a/src/test/Observers.thy b/src/test/Observers.thy new file mode 100644 index 0000000..aa38341 --- /dev/null +++ b/src/test/Observers.thy @@ -0,0 +1,200 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * Observers.thy --- the base testing theory for reactive sequence testing. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007, ETH Zurich, Switzerland + * 2009 B. Wolff, Univ. Paris-Sud, France + * 2009 Achim D. Brucker, Germany + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Observers managing Abstract State*} + +theory Observers imports Monads +begin + + +section{* IO-stepping Function Transfomers *} + +text{* The following adaption combinator converts an input-output + program under test of type: $\iota \Rightarrow \sigma + \rightharpoonup o \times \sigma$ with program state $\sigma$ into a + state transition program that can be processed by mbind. The key + idea to turn mbind into a test-driver for a reactive system is by + providing an internal state $\sigma'$, managed by the test driver, + and external, problem-specific functions ``rebind'' and + ``substitute'' that operate on this internal state. For example, + this internal state can be instantiated with an environment $var + \rightharpoonup value$. The output (or parts of it) can then be + bound to vars in the environment. In contrast, substitute can then + explicit substitute variables occuring in value representations + into pure values, e.g. is can substitue $c~("X")$ into $c~3$ + provided the environment contained the map with $X \leadsto 3$. *} + +text{* The state of the test-driver consists of two parts: the state + of the observer (or: adaptor) $\sigma$ and the internal state $\sigma'$ of the + the step-function of the system under test $ioprog$ is allowed to use. *} + +definition observer :: "['\ \ 'o \ '\, '\ \ '\ \ '\, '\\'\' \ '\ \ 'o \ bool] + \ ('\ \ '\' \ 'o \'\') + \ ('\ \ ('\\'\' \ '\\'\'))" + +where "observer rebind substitute postcond ioprog = + (\ input. (\ (\, \'). let input'= substitute \ input in + case ioprog input' \' of + None \ None (* ioprog failure - eg. timeout ... *) + | Some (output, \''') \ let \'' = rebind \ output in + (if postcond (\'',\''') input' output + then Some(\'', \''') + else None (* postcond failure *) )))" + +text{* The subsequent $observer$ version is more powerful: it admits also preconditions +of $ioprog$, which make reference to the observer state $\sigma_{obs}$. The observer-state +may contain an environment binding values to explicit variables. In such a scenario, the +$precond\_solve$ may consist of a \emph{solver} that constructs a solution from +\begin{enumerate} +\item this environment, +\item the observable state of the $ioprog$, +\item the abstract input + (which may be related to a precondition which contains references to explicit + variables) +\end{enumerate} +such that all the explicit variables contained in the preconditions and the +explicit variables in the abstract input are substituted against values +that make the preconditions true. The values must be stored in the +environment and are reported in the observer-state $\sigma_{obs}$. +*} + +definition observer1 :: "['\_obs \ 'o_c \ '\_obs, + '\_obs \ '\ \ '\_a \ ('\_c \ '\_obs), + '\_obs \ '\ \ '\_c \ 'o_c \ bool] + \ ('\_c \ ('o_c, '\)MON\<^sub>S\<^sub>E) + \ ('\_a \ ('o_c, '\_obs \'\)MON\<^sub>S\<^sub>E) " + +where "observer1 rebind precond_solve postcond ioprog = + (\ in_a. (\ (\_obs, \). let (in_c,\_obs') = precond_solve \_obs \ in_a + in case ioprog in_c \ of + None \ None (* ioprog failure - eg. timeout ... *) + | Some (out_c, \') \(let \_obs'' = rebind \_obs' out_c + in if postcond \_obs'' \' in_c out_c + then Some(out_c, (\_obs', \')) + else None (* postcond failure *) )))" + + +definition observer2 :: "['\_obs \ 'o_c \ '\_obs, '\_obs \ '\_a \ '\_c, '\_obs \ '\ \ '\_c \ 'o_c \ bool] + \ ('\_c \ ('o_c, '\)MON\<^sub>S\<^sub>E) + \ ('\_a \ ('o_c, '\_obs \'\)MON\<^sub>S\<^sub>E) " + +where "observer2 rebind substitute postcond ioprog = + (\ in_a. (\ (\_obs, \). let in_c = substitute \_obs in_a + in case ioprog in_c \ of + None \ None (* ioprog failure - eg. timeout ... *) + | Some (out_c, \') \(let \_obs' = rebind \_obs out_c + in if postcond \_obs' \' in_c out_c + then Some(out_c, (\_obs', \')) + else None (* postcond failure *) )))" + +text{* Note that this version of the observer is just a +monad-transformer; it transforms the i/o stepping function $ioprog$ +into another stepping function, which is the combined sub-system +consisting of the observer and, for example, a program under test +$\PUT$. The observer takes the \emph{abstract} input $in_a$, +substitutes explicit variables in it by concrete values stored by its +own state $\sigma_{obs}$ and constructs \emph{concrete} input $in_c$, +runs $ioprog$ in this context, and evaluates the return: the concrete +output $out_c$ and the successor state $\sigma'$ are used to extract +from concrete output concrete values and stores them inside its own +successor state $\sigma_{obs}'$. Provided that a post-condition is +passed succesfully, the output and the combined successor-state is +reported as success. + +Note that we made the following testability assumptions: +\begin{enumerate} +\item $ioprog$ behaves wrt. to the reported state and input as a function, i.e. it behaves + deterministically, and +\item it is not necessary to destinguish internal failure and post-condition-failure. + (Modelling Bug? This is superfluous and blind featurism ... One could do this by + introducing an own "weakening"-monad endo-transformer.) +\end{enumerate} + +*} + +text{* observer2 can actually be decomposed into two combinators - one +dealing with the management of explicit variables and one that tackles +post-conditions. *} + +definition observer3 :: "['\_obs \ 'o \ '\_obs, '\_obs \ '\_a \ '\_c] + \ ('\_c \ ('o, '\)MON\<^sub>S\<^sub>E) + \ ('\_a \ ('o, '\_obs \'\)MON\<^sub>S\<^sub>E) " + +where "observer3 rebind substitute ioprog = + (\ in_a. (\ (\_obs, \). + let in_c = substitute \_obs in_a + in case ioprog in_c \ of + None \ None (* ioprog failure - eg. timeout ... *) + | Some (out_c, \') \(let \_obs' = rebind \_obs out_c + in Some(out_c, (\_obs', \')) )))" + + + +definition observer4 :: "['\ \ '\ \ 'o \ bool] + \ ('\ \ ('o, '\)MON\<^sub>S\<^sub>E) + \ ('\ \ ('o, '\)MON\<^sub>S\<^sub>E)" + +where "observer4 postcond ioprog = + (\ input. (\ \. case ioprog input \ of + None \ None (* ioprog failure - eg. timeout ... *) + | Some (output, \') \ (if postcond \' input output + then Some(output, \') + else None (* postcond failure *) )))" + +text{* The following lemma explains the relationsship between $observer2$ and the +decoposed versions $observer3$ and $observer4$. The full equality does not hold - +the reason is that the two kinds of preconditions are different in a subtle way: +the postcondition may make reference to the abstract state. (See our example +\verb+Sequence_test+ based on a symbolic environment in the observer state.) +If the postcondition does not do this, they are equivalent. *} + +find_theorems name:"prod" name:"case" name:"beta" +lemma observer_decompose: + " observer2 r s (\ x. pc) io = (observer3 r s (observer4 pc io))" + apply(rule ext, rule ext) + apply(auto simp: observer2_def observer3_def + observer4_def Let_def case_prod_beta) + apply(case_tac "io (s a x) b", auto) +done + +end diff --git a/src/test/TestEnv.ML b/src/test/TestEnv.ML new file mode 100644 index 0000000..ce89494 --- /dev/null +++ b/src/test/TestEnv.ML @@ -0,0 +1,458 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * TestEnv.ML --- environment for configuration parameters + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007 ETH Zurich, Switzerland + * 2008-2013 Achim D. Brucker, Germany + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) + +signature TESTENV = +sig + type test_derivation_rule = Proof.context -> int -> thm -> (int * tactic) list + + (* Core data structure for the test environment. + Contains tables, parameters, all sorts of "screws" + to control the test generation process. *) + type testinfo = + {test_thm_tab : thm Symtab.table, (* name to testthm *) + + type_candicates : typ list, (* type grounding list *) + + cod_term_tab : (typ list -> term) Symtab.table, + (* random generators for terms, + tab assigns to each type constuctor + name a generator that gets the + instances of type constructor as args.*) + abstest_data_tab: (thm list) Symtab.table, + (* assigns name to list of + abstract (local) test data statements *) + test_data_tab : (thm list) Symtab.table, + + pre_safe_tac : Proof.context -> tactic, + pre_normalize_TNF_tac : Proof.context -> tactic, + pre_minimize_TNF_tac : Proof.context -> tactic, + test_derivation_rules : test_derivation_rule Unsynchronized.ref list + }; + + val prelude : bool Config.T + val wrapper : bool Config.T + val toString : string Config.T + val setup_code : string Config.T + val dataconv_code : string Config.T + + val depth : int Config.T (* data structure depth *) + val breadth : int Config.T (* no of free's in testspec *) + + val bound : int Config.T (* global bound for data statements *) + val case_breadth : int Config.T (* no of test data per case, + weakening uniformity *) + val iterations : int Config.T (* number of random attempts to solve a case *) + + val type_range_bound : int Config.T (* effectively used elements + type grounding list *) + + val SMT : bool Config.T + val get_smt_facts : Proof.context -> thm list + + type testenv + val rep_testenv : testenv -> testinfo + + val get_data : Proof.context -> testenv + val map_data : (testenv -> testenv) -> Context.generic -> Context.generic + val get_data_global : theory -> testenv + val map_data_global : (testenv -> testenv) -> theory -> theory + val print_data : Proof.context -> unit + + + val add_test_case : string * thm -> testenv -> testenv + (* add test theorem of "name". + The test-theorem is assumed to consist of either clauses in TNF or + of Test Hypotheses. *) + + val del_test_case : string -> testenv -> testenv + val add_test_data : string * thm -> testenv -> testenv + (* used to insert abstract test data registrated under + "string" into the test environment; abstract test data + (or "local test theorems") were used in gen_test_data *) + val del_test_data : string -> testenv -> testenv + (* erase abstract test data from test environment *) + + val thm_tab : testenv -> thm Symtab.table + val absdata_tab : testenv -> thm list Symtab.table + val jdmt_tab : testenv -> thm list Symtab.table + + val thm_tab_update : thm Symtab.table -> testenv -> testenv + val jdmt_tab_update : thm list Symtab.table -> testenv -> testenv + + val get_pre_safe_tac : testenv -> Proof.context -> tactic + val get_pre_normalize_TNF_tac : testenv -> Proof.context -> tactic + val get_pre_minimize_TNF_tac : testenv -> Proof.context -> tactic + + val get_test_derivation_rules : testenv -> test_derivation_rule list + val add_test_derivation_rule : test_derivation_rule -> testenv -> testenv + + val type_candidates_update : typ list -> testenv -> testenv + val pre_safe_tac_update : (Proof.context -> tactic) -> testenv -> testenv + val pre_normalize_TNF_tac_update : (Proof.context -> tactic) -> testenv -> testenv + val pre_minimize_TNF_tac_update : (Proof.context -> tactic) -> testenv -> testenv + + val setup: theory -> theory +end + +structure TestEnv : TESTENV = +struct + +open HOLogic; + +type test_derivation_rule = Proof.context -> int -> thm -> (int * tactic) list + +type testinfo ={test_thm_tab : thm Symtab.table, (* name to testthm *) + + type_candicates : typ list, (* type grounding list *) + + cod_term_tab : (typ list -> term) Symtab.table, + (* random generators for terms, + tab assigns to each type constuctor + name a generator that gets the + instances of type constructor as args.*) + abstest_data_tab: (thm list) Symtab.table, + (* assigns name to list of + data statements *) + test_data_tab : (thm list) Symtab.table, + pre_safe_tac : Proof.context -> tactic, + pre_normalize_TNF_tac : Proof.context -> tactic, + pre_minimize_TNF_tac : Proof.context -> tactic, + test_derivation_rules : test_derivation_rule Unsynchronized.ref list + }; + +val (prelude, prelude_setup) = Attrib.config_bool @{binding testgen_prelude} (K true); +val (wrapper, wrapper_setup) = Attrib.config_bool @{binding testgen_wrapper} (K true); +val (toString, toString_setup) = Attrib.config_string @{binding testgen_toString} (K ""); +val (setup_code, setup_code_setup) = Attrib.config_string @{binding testgen_setup_code} (K ""); +val (dataconv_code, dataconv_code_setup) = Attrib.config_string @{binding testgen_dataconv_code} (K ""); +val (depth, depth_setup) = Attrib.config_int @{binding testgen_depth} (K 3); +val (breadth, breadth_setup) = Attrib.config_int @{binding testgen_breadth} (K 1); +val (bound, bound_setup) = Attrib.config_int @{binding testgen_bound} (K 200); +val (case_breadth, case_breadth_setup) = Attrib.config_int @{binding testgen_case_breadth} (K 1); +val (iterations, iterations_setup) = Attrib.config_int @{binding testgen_iterations} (K 25); +val (type_range_bound, type_range_bound_setup) = Attrib.config_int @{binding testgen_type_range_bound} (K 1); +val (SMT, SMT_setup) = Attrib.config_bool @{binding testgen_SMT} (K false); + +structure TestGen_SMT + = Named_Thms (val name = @{binding "testgen_smt_facts"} val description = "Facts for HOL-TestGen SMT solving"); + +val get_smt_facts = TestGen_SMT.get; + +datatype testenv = Testenv of testinfo + +fun rep_testenv (Testenv X) = X; + + +val initial_testenv = Testenv + {test_thm_tab = Symtab.empty, + + type_candicates = [HOLogic.intT,HOLogic.unitT, + HOLogic.boolT, + HOLogic.mk_setT HOLogic.intT, + HOLogic.listT HOLogic.intT], + cod_term_tab = Symtab.empty, + abstest_data_tab= Symtab.empty, + test_data_tab = Symtab.empty, + pre_safe_tac = fn ctxt => all_tac, + pre_normalize_TNF_tac = fn ctxt => all_tac, + pre_minimize_TNF_tac = fn ctxt => all_tac, + test_derivation_rules = [] +}; + +fun merge_testenv + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}, + Testenv{test_thm_tab=tttab', + type_candicates=tc', + cod_term_tab=ctt', abstest_data_tab=tdt', + test_data_tab=tjt',pre_safe_tac=pst',pre_normalize_TNF_tac=pnt',pre_minimize_TNF_tac=pmt', + test_derivation_rules = tdr'}) = + + Testenv{test_thm_tab = Symtab.merge (Thm.eq_thm) (tttab,tttab'), + type_candicates = distinct (op=) (tc@tc'), + cod_term_tab = Symtab.empty, + (* don't know what a senseful + definition of override could + be; therefore constraint to the + simplest and most conservative + one *) + abstest_data_tab = Symtab.merge (fn (t1, t2) => List.all Thm.eq_thm (map2 (fn a => fn b => (a, b)) t1 t2)) (tdt,tdt'), + test_data_tab = Symtab.merge (fn (t1, t2) => List.all Thm.eq_thm (map2 (fn a => fn b => (a, b)) t1 t2)) (tjt,tjt'), + pre_safe_tac = pst, + pre_normalize_TNF_tac = pnt, + pre_minimize_TNF_tac = pmt, + test_derivation_rules = distinct (op=) (tdr@tdr')}; + + +(* Context data *) + +structure Data = Generic_Data +( + type T = testenv + val empty = initial_testenv + val extend = I + val merge = merge_testenv +); + +val get_data = Data.get o Context.Proof; +val map_data = Data.map; + +val get_data_global = Data.get o Context.Theory; +val map_data_global = Context.theory_map o map_data; + +fun print_data ctxt = + let val {test_thm_tab, + type_candicates, + cod_term_tab, abstest_data_tab,test_data_tab,pre_safe_tac, + pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules} = + rep_testenv (get_data ctxt) + val depth = Config.get ctxt depth + val breadth = Config.get ctxt breadth + val bound = Config.get ctxt bound + val iterations = Config.get ctxt iterations + fun H (n,thm) = [Pretty.str (n^":"), Display.pretty_thm ctxt thm] + fun H2(n,thms)= + [Pretty.str (n^":"), Pretty.str "======"] @ + map (Display.pretty_thm ctxt) thms @ [Pretty.str "======"] + in [Pretty.str ">>> Testenvironment >>>>>>>>>>>>>>>>>", + Pretty.str "+++ Control Data: +++++++++++++++++++", + Pretty.str ("*** default depth: "^Int.toString(depth)), + Pretty.str ("*** default breadth: "^Int.toString(breadth)), + Pretty.str ("*** bound: "^Int.toString(bound)), + Pretty.str ("*** iterations: "^Int.toString(iterations)), + Pretty.str "+++ Testtheoremtable: +++++++++++++++"] @ + maps H (Symtab.dest test_thm_tab) @ + [Pretty.str "+++ Testjudgements: +++++++++++++++++"] @ + maps H2 (Symtab.dest test_data_tab) @ + [Pretty.str "+++ Testdatatable: ++++++++++++++++++"] @ + maps H2 (Symtab.dest abstest_data_tab) @ + [Pretty.str "<<< Testenvironment <<<<<<<<<<<<<<<<<"] + end |> Pretty.chunks |> Pretty.writeln; + + +fun add_test_case (name,thm) + (Testenv {test_thm_tab, + type_candicates, + cod_term_tab, abstest_data_tab,test_data_tab,pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, + test_derivation_rules}) = + Testenv({test_thm_tab=Symtab.update(name,thm)test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=abstest_data_tab, + test_data_tab=test_data_tab, pre_safe_tac=pre_safe_tac,pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun del_test_case name (Testenv {test_thm_tab, + type_candicates,cod_term_tab, abstest_data_tab, + test_data_tab,pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, + test_derivation_rules}) = + Testenv({test_thm_tab=Symtab.delete_safe name test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, abstest_data_tab=abstest_data_tab, + test_data_tab=test_data_tab, pre_safe_tac=pre_safe_tac,pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + + + +fun add_test_data (name,thm) + (Testenv {test_thm_tab, + type_candicates, + cod_term_tab, abstest_data_tab,test_data_tab,pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, + test_derivation_rules}) = + Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=Symtab.cons_list (name,thm) abstest_data_tab, + test_data_tab=test_data_tab,pre_safe_tac=pre_safe_tac,pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun del_test_data name + (Testenv {test_thm_tab, + type_candicates, + cod_term_tab,abstest_data_tab,test_data_tab,pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, + test_derivation_rules}) = + Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=Symtab.delete_safe name abstest_data_tab, + test_data_tab=test_data_tab,pre_safe_tac=pre_safe_tac,pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun thm_tab te = #test_thm_tab (rep_testenv te) +fun absdata_tab te = #abstest_data_tab(rep_testenv te) +fun jdmt_tab te = #test_data_tab(rep_testenv te) +(* In the sequel we also use the term "test judgement" synonymously + for "test data statement". *) + +fun thm_tab_update thm_tab + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) = + (Testenv{test_thm_tab=thm_tab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + +fun jdmt_tab_update jdmt_tab + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) = + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=jdmt_tab,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + + +fun type_candidates_update (type_candidates) + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) + = + (Testenv{test_thm_tab=tttab,type_candicates=type_candidates, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + +fun pre_safe_tac_update (pre_safe_tat) + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) + = + (Testenv{test_thm_tab=tttab,type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pre_safe_tat,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + + +fun pre_normalize_TNF_tac_update (pre_normalize_TNF_tac) + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) + = + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + + +fun pre_minimize_TNF_tac_update (pre_minimize_TNF_tac) + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, + pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) + = + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = tdr}); + + + + +fun get_pre_safe_tac (Testenv{pre_safe_tac=pst, ...}) = pst; +fun get_pre_normalize_TNF_tac (Testenv{pre_normalize_TNF_tac=pnt, ...}) = pnt; +fun get_pre_minimize_TNF_tac (Testenv{pre_minimize_TNF_tac=pmt, ...}) = pmt; + +fun get_test_derivation_rules (Testenv{test_derivation_rules = tdr, ...}) = map (op !) tdr; + +fun add_test_derivation_rule (tdr) + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, + pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdrs}) + = + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, + pre_minimize_TNF_tac=pmt, + test_derivation_rules = (Unsynchronized.ref tdr)::tdrs}); + +(* module setup *) + +val setup = + prelude_setup #> + wrapper_setup #> + toString_setup #> + setup_code_setup #> + dataconv_code_setup #> + depth_setup #> + breadth_setup #> + bound_setup #> + case_breadth_setup #> + iterations_setup #> + SMT_setup #> + TestGen_SMT.setup; + +end (*struct *); + + diff --git a/src/test/TestEnv.thy b/src/test/TestEnv.thy new file mode 100644 index 0000000..e5ad3c7 --- /dev/null +++ b/src/test/TestEnv.thy @@ -0,0 +1,726 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * TestEnv.ML --- environment for configuration parameters + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007 ETH Zurich, Switzerland + * 2008-2013 Achim D. Brucker, Germany + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +theory TestEnv +imports Main +begin +ML{* + +(* TODO : not correctly implemented : no ABSTRACT testcases. *) + +signature TESTENV = +sig + type test_derivation_rule = Proof.context -> int -> thm -> (int * tactic) list + + (* Core data structure for the test environment. + Contains tables, parameters, all sorts of "screws" + to control the test generation process. *) + type testinfo = + {test_thm_tab : thm Symtab.table, (* name to testthm *) + + test_thm_inst_tab : (thm list) Symtab.table, (* instantiated testthm *) + + type_candicates : typ list, (* type grounding list *) + + cod_term_tab : (typ list -> term) Symtab.table, + (* random generators for terms, + tab assigns to each type constuctor + name a generator that gets the + instances of type constructor as args.*) + abstest_data_tab : (thm list) Symtab.table, + (* assigns name to list of + abstract (local) test data statements given + by the user *) + + test_data_tab : (cterm list) Symtab.table, + (* concrete test data table *) + unsolved_PO_tab : (cterm list) Symtab.table, + (* proof obligations left unresolved in selection phase *) + test_thyps_tab : (cterm list) Symtab.table, + (* test-hypothesis *) + + + pre_safe_tac : Proof.context -> tactic, + pre_normalize_TNF_tac : Proof.context -> tactic, + pre_minimize_TNF_tac : Proof.context -> tactic, + test_derivation_rules : test_derivation_rule Unsynchronized.ref list + }; + + val prelude : bool Config.T + val wrapper : bool Config.T + val toString : string Config.T + val setup_code : string Config.T + val dataconv_code : string Config.T + + val depth : int Config.T (* data structure depth *) + val steps : int Config.T (* no of iterations of splittings *) + + val bound : int Config.T (* global bound for data statements *) + val case_steps : int Config.T (* no of test data per case, + weakening uniformity *) + val iterations : int Config.T (* number of random attempts to solve a case *) + + val type_range_bound : int Config.T (* effectively used elements + type grounding list *) + + val SMT : bool Config.T (* use the SMT backend (default: false) *) + val SMT2 : bool Config.T (* use the SMT2 backend (default: false ; false if SMT is set to true) *) + val get_smt_facts : Proof.context -> thm list + + val pon : int Config.T (* test data generation for only the chosen PO; + 0 for all the POs *) + val smt_model : bool Config.T + + val use_metis : bool Config.T (* if true, use metis to check SMT models *) + val use_smt : bool Config.T (* if true, use smt to check SMT models *) + + type testenv + val rep_testenv : testenv -> testinfo + + val get_testenv : Proof.context -> testenv + val map_data : (testenv -> testenv) -> Context.generic -> Context.generic + val get_testenv_global : theory -> testenv + val map_data_global : (testenv -> testenv) -> theory -> theory + val print_testenv : string -> Proof.context -> unit + val get_test_data : Proof.context -> string -> thm list option + + val get_test_data_tab : testenv -> cterm list Symtab.table + val get_unsolved_PO_tab: testenv -> cterm list Symtab.table + val get_test_thyps_tab : testenv -> cterm list Symtab.table + + val add_test_data_tab : string * cterm -> testenv -> testenv + val add_unsolved_PO_tab: string * cterm -> testenv -> testenv + val add_test_thyps_tab : string * cterm -> testenv -> testenv + + val set_test_data_tab : string * (cterm list) -> testenv -> testenv + val set_unsolved_PO_tab: string * (cterm list) -> testenv -> testenv + val set_test_thyps_tab : string * (cterm list) -> testenv -> testenv + + val add_test_case : string * thm -> testenv -> testenv + (* add test theorem of "name". + The test-theorem is assumed to consist of either clauses in TNF or + of Test Hypotheses. *) + + val del_test_case : string -> testenv -> testenv + val add_abstest_data : string * thm -> testenv -> testenv + (* used to insert abstract test data registrated under + "string" into the test environment; abstract test data + (or "local test theorems") were used in gen_test_data *) + val del_abstest_data : string -> testenv -> testenv + (* erase abstract test data from test environment *) + + val get_test_thm_tab : testenv -> thm Symtab.table + val get_absdata_tab : testenv -> thm list Symtab.table + val get_test_thm_inst_tab : testenv -> thm list Symtab.table + + val test_thm_tab_update : thm Symtab.table -> testenv -> testenv + val test_thm_inst_tab_update : thm list Symtab.table -> testenv -> testenv + + val get_pre_safe_tac : testenv -> Proof.context -> tactic + val get_pre_normalize_TNF_tac : testenv -> Proof.context -> tactic + val get_pre_minimize_TNF_tac : testenv -> Proof.context -> tactic + + val get_test_derivation_rules : testenv -> test_derivation_rule list + val add_test_derivation_rule : test_derivation_rule -> testenv -> testenv + + val type_candidates_update : typ list -> testenv -> testenv + val pre_safe_tac_update : (Proof.context -> tactic) -> testenv -> testenv + val pre_normalize_TNF_tac_update : (Proof.context -> tactic) -> testenv -> testenv + val pre_minimize_TNF_tac_update : (Proof.context -> tactic) -> testenv -> testenv + + val print_test_case : Proof.context -> string -> int option -> string + val print_test_data : Proof.context -> string -> int option -> string + val print_test_hyps : Proof.context -> string -> int option -> string + val print_unsolved_pos : Proof.context -> string -> int option -> string + + val setup: theory -> theory +end +*} + +ML{* + +structure TestEnv : TESTENV = +struct + +open HOLogic; + +type test_derivation_rule = Proof.context -> int -> thm -> (int * tactic) list + +type testinfo ={test_thm_tab : thm Symtab.table, (* name to testthm *) + + test_thm_inst_tab : (thm list) Symtab.table, + + type_candicates : typ list, (* type grounding list *) + + cod_term_tab : (typ list -> term) Symtab.table, + (* random generators for terms, + tab assigns to each type constuctor + name a generator that gets the + instances of type constructor as args.*) + abstest_data_tab : (thm list) Symtab.table, + (* assigns name to list of data statements *) + test_data_tab : (cterm list) Symtab.table, + unsolved_PO_tab : (cterm list) Symtab.table, + test_thyps_tab : (cterm list) Symtab.table, + pre_safe_tac : Proof.context -> tactic, + pre_normalize_TNF_tac : Proof.context -> tactic, + pre_minimize_TNF_tac : Proof.context -> tactic, + test_derivation_rules : test_derivation_rule Unsynchronized.ref list + }; + +val (prelude, prelude_setup) = Attrib.config_bool @{binding testgen_prelude} (K true); +val (wrapper, wrapper_setup) = Attrib.config_bool @{binding testgen_wrapper} (K true); +val (toString, toString_setup) = Attrib.config_string @{binding testgen_toString} (K ""); +val (setup_code, setup_code_setup) = Attrib.config_string @{binding testgen_setup_code} (K ""); +val (dataconv_code, dataconv_code_setup) = Attrib.config_string @{binding testgen_dataconv_code} (K ""); +val (depth, depth_setup) = Attrib.config_int @{binding testgen_depth} (K 3); +val (steps, steps_setup) = Attrib.config_int @{binding testgen_steps} (K 1); +val (bound, bound_setup) = Attrib.config_int @{binding testgen_bound} (K 200); +val (case_steps, case_steps_setup) = Attrib.config_int @{binding testgen_case_steps} (K 1); +val (iterations, iterations_setup) = Attrib.config_int @{binding testgen_iterations} (K 25); +val (type_range_bound, type_range_bound_setup) = Attrib.config_int @{binding testgen_type_range_bound} (K 1); +val (SMT, SMT_setup) = Attrib.config_bool @{binding testgen_SMT} (K false); +val (SMT2, SMT2_setup) = Attrib.config_bool @{binding testgen_SMT2} (K false); +val (pon, pon_setup) = Attrib.config_int @{binding testgen_pon} (K 0); +val (smt_model, smt_model_setup) = Attrib.config_bool @{binding testgen_smt_model} (K false); +val (use_metis, use_metis_setup) = Attrib.config_bool @{binding testgen_use_metis} (K false); +val (use_smt, use_smt_setup) = Attrib.config_bool @{binding testgen_use_smt} (K false); + +structure TestGen_SMT + = Named_Thms (val name = @{binding "testgen_smt_facts"} + val description = "Facts for HOL-TestGen SMT solving"); + +val get_smt_facts = TestGen_SMT.get; + +datatype testenv = Testenv of testinfo + +fun rep_testenv (Testenv X) = X; + + +val initial_testenv = Testenv + {test_thm_tab = Symtab.empty, + type_candicates = [HOLogic.intT,HOLogic.unitT, + HOLogic.boolT, + HOLogic.mk_setT HOLogic.intT, + HOLogic.listT HOLogic.intT], + cod_term_tab = Symtab.empty, + abstest_data_tab= Symtab.empty, + test_thm_inst_tab = Symtab.empty, + test_data_tab = Symtab.empty, + unsolved_PO_tab = Symtab.empty, + test_thyps_tab = Symtab.empty, + + pre_safe_tac = fn ctxt => all_tac, + pre_normalize_TNF_tac = fn ctxt => all_tac, + pre_minimize_TNF_tac = fn ctxt => all_tac, + test_derivation_rules = [] + }; + +fun merge_testenv + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab = ttdt, + unsolved_PO_tab = uPOt, + test_thyps_tab = ttht, + test_thm_inst_tab=tjt, pre_safe_tac=pst, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}, + Testenv{test_thm_tab=tttab', + type_candicates=tc', + cod_term_tab=ctt', abstest_data_tab=tdt', + test_data_tab = ttdt', + unsolved_PO_tab = uPOt', + test_thyps_tab = ttht', + test_thm_inst_tab=tjt',pre_safe_tac=pst', + pre_normalize_TNF_tac=pnt',pre_minimize_TNF_tac=pmt', + test_derivation_rules = tdr'}) = + + Testenv{test_thm_tab = Symtab.merge (Thm.eq_thm) (tttab,tttab'), + type_candicates = distinct (op=) (tc@tc'), + cod_term_tab = Symtab.empty, + (* don't know what a senseful + definition of override could + be; therefore constraint to the + simplest and most conservative + one *) + abstest_data_tab = Symtab.merge (fn (t1, t2) => List.all Thm.eq_thm (map2 (fn a => fn b => (a, b)) t1 t2)) (tdt,tdt'), + test_thm_inst_tab = Symtab.merge (fn (t1, t2) => List.all Thm.eq_thm (map2 (fn a => fn b => (a, b)) t1 t2)) (tjt,tjt'), + test_data_tab = Symtab.merge (fn (t1, t2) => List.all Thm.aconvc (map2 (fn a => fn b => (a, b)) t1 t2)) (ttdt,ttdt'), + unsolved_PO_tab = Symtab.merge (fn (t1, t2) => List.all Thm.aconvc (map2 (fn a => fn b => (a, b)) t1 t2)) (uPOt,uPOt'), + test_thyps_tab = Symtab.merge (fn (t1, t2) => List.all Thm.aconvc (map2 (fn a => fn b => (a, b)) t1 t2)) (ttht,ttht'), + pre_safe_tac = pst, + pre_normalize_TNF_tac = pnt, + pre_minimize_TNF_tac = pmt, + test_derivation_rules = distinct (op=) (tdr@tdr')}; + + +structure Data = Generic_Data +( + type T = testenv + val empty = initial_testenv + val extend = I + val merge = merge_testenv +); + +val get_testenv = Data.get o Context.Proof; +val map_data = Data.map; + +val get_testenv_global = Data.get o Context.Theory; +val map_data_global = Context.theory_map o map_data; + +fun print_testenv envname ctxt = + let val {test_thm_tab, type_candicates, + cod_term_tab, abstest_data_tab,test_thm_inst_tab,pre_safe_tac, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules} = + rep_testenv (get_testenv ctxt) + val depth = Config.get ctxt depth + val steps = Config.get ctxt steps + val bound = Config.get ctxt bound + val iterations = Config.get ctxt iterations + fun H (n,thm) = if n = envname orelse envname = "" + then [Pretty.str (n^":"), Thm.pretty_thm ctxt thm] + else [] + fun H2(n,thms)= if n = envname orelse envname = "" + then [Pretty.str (n^":"), Pretty.str "======"] @ + map (Thm.pretty_thm ctxt) thms @ + [Pretty.str "======"] + else [] + in [Pretty.str ">>> Testenvironment >>>>>>>>>>>>>>>>>", + Pretty.str "+++ Control Data: +++++++++++++++++++", + Pretty.str ("*** default depth: "^Int.toString(depth)), + Pretty.str ("*** default steps: "^Int.toString(steps)), + Pretty.str ("*** bound: "^Int.toString(bound)), + Pretty.str ("*** iterations: "^Int.toString(iterations)), + Pretty.str "+++ Testtheoremtable: +++++++++++++++"] @ +(* TODO bu: add cterm construction ... *) + maps H (Symtab.dest test_thm_tab) @ + [Pretty.str "+++ Testjudgements: +++++++++++++++++"] @ + maps H2 (Symtab.dest test_thm_inst_tab) @ + [Pretty.str "+++ Testdatatable: ++++++++++++++++++"] @ + maps H2 (Symtab.dest abstest_data_tab) @ + [Pretty.str "<<< Testenvironment <<<<<<<<<<<<<<<<<"] + end |> Pretty.chunks |> Pretty.writeln; + + +fun add_test_case (name,thm) + (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = + Testenv({test_thm_tab=Symtab.update(name,thm)test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_safe_tac=pre_safe_tac, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun del_test_case name (Testenv {test_thm_tab, + type_candicates,cod_term_tab, abstest_data_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + test_thm_inst_tab,pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, + test_derivation_rules}) = + Testenv({test_thm_tab=Symtab.delete_safe name test_thm_tab, type_candicates=type_candicates, + cod_term_tab=cod_term_tab, abstest_data_tab=abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab, pre_safe_tac=pre_safe_tac, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + + + +fun add_abstest_data (name,thm) + (Testenv {test_thm_tab, type_candicates, + cod_term_tab, abstest_data_tab,test_thm_inst_tab,pre_safe_tac, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_normalize_TNF_tac,pre_minimize_TNF_tac, + test_derivation_rules}) + = Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=Symtab.cons_list (name,thm) abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab,pre_safe_tac=pre_safe_tac, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun del_abstest_data name + (Testenv {test_thm_tab, + type_candicates, + cod_term_tab,abstest_data_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + test_thm_inst_tab,pre_safe_tac, + pre_normalize_TNF_tac,pre_minimize_TNF_tac, + test_derivation_rules}) + = Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=Symtab.delete_safe name abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab,pre_safe_tac=pre_safe_tac, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun get_test_thm_tab te = #test_thm_tab (rep_testenv te) +fun get_absdata_tab te = #abstest_data_tab(rep_testenv te) +fun get_test_thm_inst_tab te = #test_thm_inst_tab(rep_testenv te) + + + +fun get_test_data_tab te = #test_data_tab (rep_testenv te) +fun get_unsolved_PO_tab te = #unsolved_PO_tab (rep_testenv te) +fun get_test_thyps_tab te = #test_thyps_tab(rep_testenv te) + + +fun add_test_data_tab (name,ct) + (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = + Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab, + test_data_tab = Symtab.cons_list(name,ct) test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_safe_tac=pre_safe_tac, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + +fun add_unsolved_PO_tab (name,ct) + (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = + Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab, + test_data_tab = test_data_tab, + unsolved_PO_tab = Symtab.cons_list(name,ct) unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_safe_tac=pre_safe_tac, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun add_test_thyps_tab (name,ct) + (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = + Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = Symtab.cons_list(name,ct) test_thyps_tab, + pre_safe_tac=pre_safe_tac, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun set_test_data_tab (name,ctl) + (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = + Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab, + test_data_tab = Symtab.update(name,ctl) test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_safe_tac=pre_safe_tac, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + +fun set_unsolved_PO_tab (name,ctl) + (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = + Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab, + test_data_tab = test_data_tab, + unsolved_PO_tab = Symtab.update(name,ctl) unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_safe_tac=pre_safe_tac, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun set_test_thyps_tab (name,ctl) + (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = + Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = Symtab.update(name,ctl) test_thyps_tab, + pre_safe_tac=pre_safe_tac, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +(* In the sequel we also use the term "test judgement" synonymously for "test data statement". *) + +fun test_thm_tab_update thm_tab + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, + abstest_data_tab=tdt, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + test_thm_inst_tab=tjt,pre_safe_tac=pst, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) = + (Testenv{test_thm_tab=thm_tab, + type_candicates=tc, + cod_term_tab=ctt, + abstest_data_tab=tdt, + test_thm_inst_tab=tjt,pre_safe_tac=pst, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + +fun test_thm_inst_tab_update jdmt_tab + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, + abstest_data_tab=tdt, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + test_thm_inst_tab=tjt,pre_safe_tac=pst, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) = + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, + abstest_data_tab=tdt, + test_thm_inst_tab=jdmt_tab, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, + pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + + +fun type_candidates_update (type_candidates) + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, + abstest_data_tab=tdt, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + test_thm_inst_tab=tjt,pre_safe_tac=pst, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) + =(Testenv{test_thm_tab=tttab,type_candicates=type_candidates, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt,pre_safe_tac=pst, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + +fun pre_safe_tac_update (pre_safe_tat) + (Testenv{test_thm_tab=tttab, type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt,pre_safe_tac=pst, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) + =(Testenv{test_thm_tab=tttab,type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt, pre_safe_tac=pre_safe_tat, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + + +fun pre_normalize_TNF_tac_update (pre_normalize_TNF_tac) + (Testenv{test_thm_tab=tttab, type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt,pre_safe_tac=pst, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) + = (Testenv{test_thm_tab=tttab, type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt, pre_safe_tac=pst, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + + +fun pre_minimize_TNF_tac_update (pre_minimize_TNF_tac) + (Testenv{test_thm_tab=tttab, type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt, pre_safe_tac=pst, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_normalize_TNF_tac=pnt, pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) + =(Testenv{test_thm_tab=tttab, type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt, pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = tdr}); + + + + +fun get_pre_safe_tac (Testenv{pre_safe_tac=pst, ...}) = pst; +fun get_pre_normalize_TNF_tac (Testenv{pre_normalize_TNF_tac=pnt, ...}) = pnt; +fun get_pre_minimize_TNF_tac (Testenv{pre_minimize_TNF_tac=pmt, ...}) = pmt; + +fun get_test_derivation_rules (Testenv{test_derivation_rules = tdr, ...}) = map (op !) tdr; + +fun get_test_data ctxt name = let val Testenv {test_thm_inst_tab,...} = (get_testenv ctxt) + in Symtab.lookup test_thm_inst_tab name end + +fun add_test_derivation_rule (tdr) + (Testenv{test_thm_tab=tttab, type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_minimize_TNF_tac=pmt, test_derivation_rules = tdrs}) + = (Testenv{test_thm_tab=tttab, type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt, pre_safe_tac=pst, pre_normalize_TNF_tac=pnt, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_minimize_TNF_tac=pmt, + (* OH MY GOD --- WHAT CRAP IS THIS >>>>> bu *) + test_derivation_rules = (Unsynchronized.ref tdr)::tdrs}); + +(* module setup *) + + + +fun print_test_data_generic f ctxt name index_opt = + let val ctrms = case (Symtab.lookup (f (get_testenv ctxt)) name) of + SOME k => k + | NONE => error "No such test suite" + val print_cterm = Pretty.string_of o (Syntax.pretty_term ctxt) o Thm.term_of + in case index_opt of + NONE => String.concatWith "\n" (map print_cterm ctrms) + |SOME ind => print_cterm (nth ctrms ind) + end + + +val print_test_data = print_test_data_generic get_test_data_tab; +val print_test_case = print_test_data_generic get_test_data_tab; (* temporary hack by bu - + should be own implem with own + table*) + +val print_test_hyps = print_test_data_generic get_test_thyps_tab; + +val print_unsolved_pos = print_test_data_generic get_unsolved_PO_tab; + + +val setup = + prelude_setup #> + wrapper_setup #> + toString_setup #> + setup_code_setup #> + dataconv_code_setup #> + depth_setup #> + steps_setup #> + bound_setup #> + case_steps_setup #> + iterations_setup #> + SMT_setup #> + SMT2_setup #> + pon_setup #> + smt_model_setup #> + use_metis_setup #> + use_smt_setup #> + TestGen_SMT.setup; + +end (*struct *); + +*} + +end diff --git a/src/TestGen.thy b/src/test/TestGen.thy similarity index 100% rename from src/TestGen.thy rename to src/test/TestGen.thy diff --git a/src/test/TestLib.thy b/src/test/TestLib.thy new file mode 100644 index 0000000..b3ccc0a --- /dev/null +++ b/src/test/TestLib.thy @@ -0,0 +1,57 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * Testing.thy --- wiring everything together. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007 ETH Zurich, Switzerland + * 2008-2012 Achim D. Brucker, Germany + * 2009-2012 Université Paris-Sud, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Summing up the Library *} + +theory + TestLib +imports + Monads + Automata + Interleaving + Observers + EFSM_Toolkit + TestRefinements +begin + +end diff --git a/src/test/TestRefinements.thy b/src/test/TestRefinements.thy new file mode 100644 index 0000000..4fa18cf --- /dev/null +++ b/src/test/TestRefinements.thy @@ -0,0 +1,248 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * TestRefinements.thy --- for sequence testing. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007, ETH Zurich, Switzerland + * 2009 B. Wolff, Univ. Paris-Sud, France + * 2009 Achim D. Brucker, Germany + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Test-Refinements (IOCO and Friends) *} + +theory TestRefinements imports Monads Automata +begin + + + +text{* converts infinite trace sets to prefix-closed sets of finite traces, +reconciling the most common different concepts of traces ... *} + +consts cnv :: "(nat \ '\) \ '\ list" +(* NAIV !!! *) + +(* I WOULD PREFER A SYMMETRIC NOTION OF REFINEMENT ... However, this makes the use +of mbind less clear and evident. Sufficient conditions when these two things +are equivalent need to be explored ... *) + + +definition input_refine :: + "[('\,'o,'\) det_io_atm,('\,'o,'\) ndet_io_atm] \ bool" +where + input_refine_def: + "input_refine I SP = + (({det_io_atm.init I} = ndet_io_atm.init SP) \ + (\ t \ cnv ` (in_trace `(etraces SP)). + (det_io_atm.init I) + \ (os \ (mbind t (det_io_atm.step I)) ; + return(length t = length os))))" +notation input_refine ("(_/ \\<^sub>I _)" [51, 51] 50) + +text{* This testing notion essentially says: whenever we can run an +input sequence succesfully on the PUT (the program does not throw an exception), +it is ok. *} + +definition input_output_refine :: + "[('\,'o,'\) det_io_atm,('\,'o,'\) ndet_io_atm] \ bool" +where + input_output_refine_def: + "input_output_refine i s \ + ({det_io_atm.init i} = ndet_io_atm.init s) \ + (\ t \ prefixes (etraces s). + (det_io_atm.init i) + \ (os \ (mbind (map fst t) (det_io_atm.step i)); + return((map snd t) = os)))" + +notation input_output_refine ("(_/ \\<^sub>IO _)" [51, 51] 50) +text{* Our no-frills-approach to I/O conformance testing: +no quiescense, and strict alternation between input and output. *} + +text{* PROBLEM : Tretmanns / Belinfantes notion of ioco is inherently +different from Gaudels, which is refusal based. +See definition in accompanying IOCO.thy file: +\begin{verbatim} + +definition ioco :: "[('\,'o option,'\)IO_TS,('\,'o option,'\)IO_TS] \ bool" + (infixl "ioco" 200) + "i ioco s \ (\ t \ Straces(s). + out i (i after t) \ out s (s after t))" + +\end{verbatim} + +*} + + +definition after :: "[('\, 'o, '\) ndet_io_atm, ('\ \ 'o) list] \ '\ set" + (infixl "after" 100) +where "atm after l = {\' . \ t \ rtraces atm. (\' = fst(t (length l)) \ + (\ n \ {0 .. (length l) - 1}. l!n = fst(snd(t n))))}" + +definition out :: "[('\, 'o, '\) ndet_io_atm,'\ set, '\] \ 'o set" +where "out atm ss \ = {a. \\ \ ss. \\'. (a,\') \ ndet_io_atm.step atm \ \}" + +(* OLD: +definition ioco :: "[('\, 'o, '\) det_io_atm,('\, 'o, '\) ndet_io_atm] \ bool" + (infixl "ioco" 200) +where "i ioco s = (({det_io_atm.init i} = ndet_io_atm.init s) \ + (\ t \ cnv ` (etraces s). + \ \ \. + ((det_io_atm.init i) + \ (os \ (mbind (map fst t @ [\]) (det_io_atm.step i)) ; + return(os = map snd t @ [\]))) + \ + \ \ out s (s after t)))" *) + +definition ready :: "[('\, 'o, '\) ndet_io_atm,'\ set] \ '\ set" +where "ready atm ss = {\. \\ \ ss. ndet_io_atm.step atm \ \ \ {}}" + +definition ioco :: "[('\,'o,'\)ndet_io_atm, ('\,'o,'\)ndet_io_atm] \ bool" + (infixl "ioco" 200) +where "i ioco s = (\ t \ prefixes(etraces s). + \ \ \ ready s (s after t). out i (i after t) \ \ out s (s after t) \)" + +definition oico :: "[('\,'o,'\)ndet_io_atm,('\,'o,'\)ndet_io_atm] \ bool" + (infixl "oico" 200) +where "i oico s = (\ t \ prefixes(etraces s). + ready i (i after t) \ ready s (s after t))" + +definition ioco2 :: "[('\,'o,'\)ndet_io_atm, ('\,'o,'\)ndet_io_atm] \ bool" + (infixl "ioco2" 200) +where "i ioco2 s = (\ t \ eprefixes (ndet_io_atm.step s) (rtraces s). + \ \ \ ready s (s after t). + out i (i after t) \ \ out s (s after t) \)" + +definition ico :: "[('\, 'o, '\) det_io_atm,('\, 'o, '\) ndet_io_atm] \ bool" + (infixl "ico" 200) +where "i ico s = (\ t \ prefixes(etraces s). + let i' = det2ndet i in ready i' (i' after t) \ ready s (s after t))" + +lemma full_det_refine: "s = det2ndet s' \ + (det2ndet i) ioco s \ (det2ndet i) oico s \ input_output_refine i s" +apply(safe) +oops + +definition ico2 :: "[('\,'o,'\)ndet_io_atm,('\,'o,'\)ndet_io_atm] \ bool" + (infixl "ico2" 200) +where "i ico2 s \ \ t \ eprefixes (ndet_io_atm.step s) (rtraces s). + ready i (i after t) \ ready s (s after t)" + + +text{* There is lots of potential for optimization. +\begin{itemize} +\item only maximal prefixes +\item draw the $\omega$ tests inside the return +\item compute the $\omega$ by the $\ioprog$, not quantify over it. +\end{itemize} +*} + +section{* Generic Monadic Test Refinement Notions *} + +text{* Generic Notion *} + +definition monadic_test_refinement :: + "('\ \ ('o, '\)MON\<^sub>S\<^sub>E) \ + '\ set \ (* state invariant *) + '\ list set \ (* coverage or test-purpose *) + ('\ list \ 'o list \ '\ \ bool) \ (* post-cond *) + ('\ \ ('o, '\)MON\<^sub>S\<^sub>E) \ bool" + ("_ \\<^sub>\_,_,_\<^sub>\ _" [100,100,100,100,100]101) +where + + + "(MODEL \\<^sub>\Init,CovCrit,conf\<^sub>\ SUT) = + (\ \\<^sub>0 \ Init. \ \s \ CovCrit. \ res. + (\\<^sub>0 \ (os \ mbind \s MODEL; return (conf \s os res))) + \ + (\\<^sub>0 \ (os \ mbind \s SUT; return (conf \s os res))))" + +subsection{* Inclusion Test Refinements *} + +definition incl_ref :: "('\ \ ('o, '\)MON\<^sub>S\<^sub>E) \ + '\ set \ (* state invariant *) + '\ list set \ (* coverage or test-purpose *) + ('\ \ ('o, '\)MON\<^sub>S\<^sub>E) \ bool" + ("_ \\<^sub>I\<^sub>\_,_\<^sub>\ _" [100,100,100,100]101) +where "(S \\<^sub>I\<^sub>\Init,CC\<^sub>\ I) = (S \\<^sub>\Init,CC,(\ is os x. length is = length os \ os=x)\<^sub>\ I)" + + +lemma inclusion_test_I : + "(\\\<^sub>0 \s res. + \\<^sub>0 \ Init \ + \s \ CC \ + \\<^sub>0 \ ( os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s S; unit\<^sub>S\<^sub>E (length \s = length os \ os = res)) + \ + \\<^sub>0 \ ( os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s I; unit\<^sub>S\<^sub>E (length \s = length os \ os = res))) + \ (S \\<^sub>I\<^sub>\Init,CC\<^sub>\ I)" +unfolding incl_ref_def monadic_test_refinement_def +by auto + +lemma inclusion_test_I_opt : + "(\\\<^sub>0 \s res. + \\<^sub>0 \ Init \ + \s \ CC \ + \\<^sub>0 \ ( os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s S; unit\<^sub>S\<^sub>E (os = res)) + \ + \\<^sub>0 \ ( os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s I; unit\<^sub>S\<^sub>E (os = res))) + \ (S \\<^sub>I\<^sub>\Init,CC\<^sub>\ I)" +apply(rule inclusion_test_I) +apply(subst Monads.mbindFSave_vs_mbindFStop) +apply(subst (asm) Monads.mbindFSave_vs_mbindFStop) +by(auto) + + + +subsection{* Inclusion Test Refinements with Abort *} + +text{* The abort scenario allows operations to fail, so violations + to input-enabledness are permitted. The model assumes that a particular + abort-semantics is respected: the operation is assumed to fail explicitly, + \ie{} observably by the tester, and to leave the state unchanged. *} + + +definition incl_aborts_ref :: "('\ \ ('o, '\)MON\<^sub>S\<^sub>E) \ + '\ set \ (* state invariant *) + '\ list set \ (* coverage or test-purpose *) + ('\ \ ('o, '\)MON\<^sub>S\<^sub>E) \ bool" + ("_ \\<^sub>I\<^sub>A\<^sub>\_,_\<^sub>\ _" [100,100,100,100]101) +where "(S \\<^sub>I\<^sub>A\<^sub>\Inv,IS\<^sub>\ I) = (S \\<^sub>\Inv,IS,(\ is os x. os=x)\<^sub>\ I)" + + +(* TODO : Deadlock Refinement *) + +(* TODO : Monadic IOCO *) + + +end diff --git a/src/test/TestScript.thy b/src/test/TestScript.thy new file mode 100644 index 0000000..6cf2e42 --- /dev/null +++ b/src/test/TestScript.thy @@ -0,0 +1,164 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * TestScript.thy --- Test Script and Test Data exporter + * This file is part of HOL-TestGen. + * + * Copyright (c) 2011-2015 Achim D. Brucker, Germany + * 2016-2017 The University of Sheffield, UK + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: TestScript.thy 13147 2017-08-18 19:42:12Z brucker $ *) + + +theory TestScript +imports TestGen + "codegen_fsharp/code_fsharp" +keywords "generate_test_script" :: thy_decl + and "export_test_data" :: diag +begin + +code_printing constant "RSF" \ + (SML) "TestHarness.rsf" + and (Fsharp) "TestHarness.rsf" + and (Scala) "TestHarness.rsf" + +datatype Lazy = "lazy" bool + +code_printing constant "lazy" \ + (SML) "fn () => (_)" + and (Fsharp) "lazy ((_))" + and (Scala) "() => ((_))" + +code_printing type_constructor Lazy \ + (SML) "(unit -> bool)" + and (Fsharp) "Lazy" + and (Scala) "() => Boolean" + +code_printing code_module "Header" \ (SML) +{* + (********************************************************) + (* This file is generated automatically. Do not edit *) + (********************************************************) +*} + +ML{* +open List; + fun gen_ts_def dataname thy = let + + val tsname = "test_script" + val fqtsname = (Context.theory_name thy)^"."^dataname^"."^tsname + val tsbnd = Binding.name tsname + val tstype = HOLogic.listT (HOLogic.mk_tupleT[HOLogic.listT @{typ "Lazy"},@{typ "Lazy"}]) + + val (td,_) = (TestGen.get_test_data thy dataname,thy); + + fun def const bnd term thy = snd (Global_Theory.add_defs false [((bnd, + (Logic.mk_equals (const, term))),[Code.add_default_eqn_attribute Code.Equation])] thy) + + val prop_terms = List.map (fn p => let val _ $ ret = Thm.concl_of p in ret end) td; + val prems_terms = map (fn data => (map (fn p => let val _ $ ret = p in ret end) + (Thm.prems_of data))) td; + val spec = map2 (fn x => fn y => (x,y)) prems_terms prop_terms + + fun mk_abs t = Const("TestScript.Lazy.lazy",@{typ "bool"} --> @{typ "Lazy"})$t + + val ts_term = HOLogic.mk_list (HOLogic.mk_tupleT[HOLogic.listT @{typ "Lazy"},@{typ "Lazy"}]) + (map (fn (a,b) => HOLogic.mk_tuple [HOLogic.mk_list @{typ "Lazy"} + (map mk_abs a), mk_abs b]) spec) + in + thy |> (Sign.add_path (space_implode "_" [dataname])) + |> Sign.add_consts [(tsbnd, tstype, NoSyn)] + |> def (Const(fqtsname,tstype)) (tsbnd) ts_term + |> (Sign.parent_path) + end + +*} + + +ML {* (Outer_Syntax.command (("generate_test_script"), Position.none) + "generate code equation representing test script" + (Parse.name >> (fn name => Toplevel.theory (gen_ts_def name)))) +*} + +ML{* +fun export_test_data ctxt filename dataname = + let + val filename = Path.explode filename + val thy = Proof_Context.theory_of ctxt + val master_dir = Resources.master_directory thy + val td = TestGen.get_test_data thy dataname + val today = (Date.toString(Date.fromTimeUniv (Time.now())))^" (UTC)"; + val abs_filename = if (Path.is_absolute filename) then filename else Path.append master_dir filename + val thyname = Context.theory_name thy + (* fun string_of_term ctxt = Pretty.unformatted_string_of o (Syntax.unparse_term ctxt) *) + val string_of_term = Sledgehammer_Util.hackish_string_of_term + fun generate () = + let + val num = Int.toString(length td) + fun string_of_prems ctxt [] = "" + | string_of_prems ctxt (p::ps) = ("\n" + ^" "^(string_of_term ctxt p) ^ "\n" + ^" \n")^(string_of_prems ctxt ps) + val thmsstrings = String.concat (map (fn t => "\n" + ^(string_of_prems ctxt (Thm.prems_of t)) + ^" \n" + ^" "^string_of_term ctxt (Thm.prop_of t) ^ "\n" + ^" \n" + ^"\n" + ) td) + in + "\n\n" + ^thmsstrings + end + + val test_data_str = Print_Mode.setmp [] generate (); + val _ = File.write (abs_filename) test_data_str + handle (IO.Io{name=name,...}) => warning ("Could not write \""^(name)^"\".") + in () end +*} + +ML {* + Outer_Syntax.command (("export_test_data"), Position.none) + "export test data to an external file" + (Parse.name -- Parse.name + >> (fn (filename,name) => + (Toplevel.keep (fn state => export_test_data (Toplevel.context_of state) filename name)))); + + *} + +end diff --git a/src/test/TestSequence.thy b/src/test/TestSequence.thy new file mode 100644 index 0000000..861e8df --- /dev/null +++ b/src/test/TestSequence.thy @@ -0,0 +1,1001 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * Monads.thy --- a base testing theory for sequential computations. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007, ETH Zurich, Switzerland + * 2009 B. Wolff, Univ. Paris-Sud, France + * 2009, 2012 Achim D. Brucker, Germany + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Basic Monad Theory for Sequential Computations *} + +theory Monads imports Main +begin + + +section{*General Framework for Monad-based Sequence-Test *} +text{* As such, Higher-order Logic as a purely functional specification + formalism has no built-in mechanism for state and state-transitions. + Forms of testing involving state require therefore explicit mechanisms + for their treatment inside the logic; a well-known technique to model + states inside purely functional languages are \emph{monads} made popular + by Wadler and Moggi and extensively used in Haskell. \HOL is powerful + enough to represent the most important standard monads; + however, it is not possible to represent monads as such due to well-known + limitations of the Hindley-Milner type-system. *} + +text{* Here is a variant for state-exception monads, that models precisely + transition functions with preconditions. Next, we declare the + state-backtrack-monad. In all of them, our concept of i/o stepping + functions can be formulated; these are functions mapping input + to a given monad. Later on, we will build the usual concepts of: + \begin{enumerate} + \item deterministic i/o automata, + \item non-deterministic i/o automata, and + \item labelled transition systems (LTS) + \end{enumerate} +*} + +subsection{* Standard State Exception Monads *} +type_synonym ('o, '\) MON\<^sub>S\<^sub>E = "'\ \ ('o \ '\)" (* = '\ \ ('o \ '\)option *) + + +definition bind_SE :: "('o,'\)MON\<^sub>S\<^sub>E \ ('o \ ('o','\)MON\<^sub>S\<^sub>E) \ ('o','\)MON\<^sub>S\<^sub>E" +where "bind_SE f g = (\\. case f \ of None \ None + | Some (out, \') \ g out \')" + +notation bind_SE ("bind\<^sub>S\<^sub>E") + +syntax (xsymbols) + "_bind_SE" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>E,('o','\)MON\<^sub>S\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>E" + ("(2 _ \ _; _)" [5,8,8]8) +translations + "x \ f; g" == "CONST bind_SE f (% x . g)" + + +definition unit_SE :: "'o \ ('o, '\)MON\<^sub>S\<^sub>E" ("(return _)" 8) +where "unit_SE e = (\\. Some(e,\))" +notation unit_SE ("unit\<^sub>S\<^sub>E") + +definition fail_SE :: "('o, '\)MON\<^sub>S\<^sub>E" +where "fail_SE = (\\. None)" +notation fail_SE ("fail\<^sub>S\<^sub>E") + +definition assert_SE :: "('\ \ bool) \ (bool, '\)MON\<^sub>S\<^sub>E" +where "assert_SE P = (\\. if P \ then Some(True,\) else None)" +notation assert_SE ("assert\<^sub>S\<^sub>E") + +definition assume_SE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>E" +where "assume_SE P = (\\. if \\ . P \ then Some((), SOME \ . P \) else None)" +notation assume_SE ("assume\<^sub>S\<^sub>E") + +text{* The standard monad theorems about unit and associativity: *} + +lemma bind_left_unit [simp]: "(x \ return c; P x) = P c" + by (simp add: unit_SE_def bind_SE_def) + + +lemma bind_left_fail_SE[simp] : "(x \ fail\<^sub>S\<^sub>E; P x) = fail\<^sub>S\<^sub>E" + by (simp add: fail_SE_def bind_SE_def) + + +lemma bind_right_unit[simp]: "(x \ m; return x) = m" + apply (simp add: unit_SE_def bind_SE_def) + apply (rule ext) + apply (case_tac "m \", simp_all) + done + +lemma bind_assoc[simp]: "(y \ (x \ m; k x); h y) = (x \ m; (y \ k x; h y))" + apply (simp add: unit_SE_def bind_SE_def, rule ext) + apply (case_tac "m \", simp_all) + apply (case_tac "a", simp_all) + done + + +text{* The bind-operator in the state-exception monad yields already + a semantics for the concept of an input sequence on the meta-level: *} +lemma syntax_test: "(o1 \ f1 ; o2 \ f2; return (post o1 o2)) = X" +oops + + + +subsection {* "Pipe-free" - variant of the bind. *} + +definition seq_SE :: "[('\, '\)MON\<^sub>S\<^sub>E, ('\, '\)MON\<^sub>S\<^sub>E] \ ('\, '\)MON\<^sub>S\<^sub>E" (infixl ";-" 65) +where "f ;- g = (_ \ f ; g)" + + + +subsection {* Monadic If *} + +definition if_SE :: "['\ \ bool, ('\, '\)MON\<^sub>S\<^sub>E, ('\, '\)MON\<^sub>S\<^sub>E] \ ('\, '\)MON\<^sub>S\<^sub>E" +where "if_SE c E F = (\\. if c \ then E \ else F \)" + +syntax (xsymbols) + "_if_SE" :: "['\ \ bool,('o,'\)MON\<^sub>S\<^sub>E,('o','\)MON\<^sub>S\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>E" + ("(if\<^sub>S\<^sub>E _ then _ else _fi)" [5,8,8]8) +translations + "(if\<^sub>S\<^sub>E cond then T1 else T2 fi)" == "CONST if_SE cond T1 T2" + + + +subsubsection{* Theory of a Monadic While *} + +text{* First Step : Establishing an embedding between partial functions and relations *} +(* plongement *) +definition Mon2Rel :: "(unit, '\)MON\<^sub>S\<^sub>E \ ('\ \ '\) set" +where "Mon2Rel f = {(x, y). (f x = Some((), y))}" +(* ressortir *) +definition Rel2Mon :: " ('\ \ '\) set \ (unit, '\)MON\<^sub>S\<^sub>E " +where "Rel2Mon S = (\ \. if \\'. (\, \') \ S then Some((), SOME \'. (\, \') \ S) else None)" + +lemma Mon2Rel_Rel2Mon_id: assumes det:"single_valued R" shows "(Mon2Rel \ Rel2Mon) R = R" +apply (simp add: comp_def Mon2Rel_def Rel2Mon_def,auto) +apply (case_tac "\\'. (a, \') \ R", auto) +apply (subst some_eq_ex) back +apply (insert det[simplified single_valued_def]) +apply (auto) +done + +lemma Rel2Mon_Id: "(Rel2Mon \ Mon2Rel) x = x" +apply (rule ext) +apply (auto simp: comp_def Mon2Rel_def Rel2Mon_def) +apply (erule contrapos_pp, drule HOL.not_sym, simp) +done + +lemma single_valued_Mon2Rel: "single_valued (Mon2Rel B)" +by (auto simp: single_valued_def Mon2Rel_def) + +text{* Second Step : Proving an induction principle allowing to establish that lfp remains + deterministic *} + + +(* Due to Tobias Nipkow *) +definition chain :: "(nat => 'a set) => bool" +where "chain S = (\i. S i \ S(Suc i))" + +lemma chain_total: "chain S ==> S i \ S j \ S j \ S i" +by (metis chain_def le_cases lift_Suc_mono_le) + +definition cont :: "('a set => 'b set) => bool" +where "cont f = (\S. chain S \ f(UN n. S n) = (UN n. f(S n)))" + +lemma mono_if_cont: fixes f :: "'a set => 'b set" + assumes "cont f" shows "mono f" +proof + fix a b :: "'a set" assume "a \ b" + let ?S = "\n::nat. if n=0 then a else b" + have "chain ?S" using `a \ b` by(auto simp: chain_def) + hence "f(UN n. ?S n) = (UN n. f(?S n))" + using assms by(simp add: cont_def) + moreover have "(UN n. ?S n) = b" using `a \ b` by (auto split: if_splits) + moreover have "(UN n. f(?S n)) = f a \ f b" by (auto split: if_splits) + ultimately show "f a \ f b" by (metis Un_upper1) +qed + +lemma chain_iterates: fixes f :: "'a set => 'a set" + assumes "mono f" shows "chain(\n. (f^^n) {})" +proof- + { fix n have "(f ^^ n) {} \ (f ^^ Suc n) {}" using assms + by(induction n) (auto simp: mono_def) } + thus ?thesis by(auto simp: chain_def) +qed + +theorem lfp_if_cont: + assumes "cont f" shows "lfp f = (UN n. (f^^n) {})" (is "_ = ?U") +proof + show "lfp f \ ?U" + proof (rule lfp_lowerbound) + have "f ?U = (UN n. (f^^Suc n){})" + using chain_iterates[OF mono_if_cont[OF assms]] assms + by(simp add: cont_def) + also have "\ = (f^^0){} \ \" by simp + also have "\ = ?U" + by(auto simp del: funpow.simps) (metis not0_implies_Suc) + finally show "f ?U \ ?U" by simp + qed +next + { fix n p assume "f p \ p" + have "(f^^n){} \ p" + proof(induction n) + case 0 show ?case by simp + next + case Suc + from monoD[OF mono_if_cont[OF assms] Suc] `f p \ p` + show ?case by simp + qed + } + thus "?U \ lfp f" by(auto simp: lfp_def) +qed + + +lemma single_valued_UN_chain: + assumes "chain S" "(!!n. single_valued (S n))" + shows "single_valued(UN n. S n)" +proof(auto simp: single_valued_def) + fix m n x y z assume "(x, y) \ S m" "(x, z) \ S n" + with chain_total[OF assms(1), of m n] assms(2) + show "y = z" by (auto simp: single_valued_def) +qed + +lemma single_valued_lfp: +fixes f :: "('a \ 'a) set => ('a \ 'a) set" +assumes "cont f" "\r. single_valued r \ single_valued (f r)" +shows "single_valued(lfp f)" +unfolding lfp_if_cont[OF assms(1)] +proof(rule single_valued_UN_chain[OF chain_iterates[OF mono_if_cont[OF assms(1)]]]) + fix n show "single_valued ((f ^^ n) {})" + by(induction n)(auto simp: assms(2)) +qed + + +text{* Third Step: Definition of the Monadic While *} +definition \ :: "['\ \ bool,('\ \ '\) set] \ (('\ \ '\) set \ ('\ \ '\) set)" +where "\ b cd = (\cw. {(s,t). if b s then (s, t) \ cd O cw else s = t})" + + +definition while_SE :: "['\ \ bool, (unit, '\)MON\<^sub>S\<^sub>E] \ (unit, '\)MON\<^sub>S\<^sub>E" +where "while_SE c B \ (Rel2Mon(lfp(\ c (Mon2Rel B))))" + +syntax (xsymbols) + "_while_SE" :: "['\ \ bool, (unit, '\)MON\<^sub>S\<^sub>E] \ (unit, '\)MON\<^sub>S\<^sub>E" + ("(while\<^sub>S\<^sub>E _ do _ od)" [8,8]8) +translations + "while\<^sub>S\<^sub>E c do b od" == "CONST while_SE c b" + +lemma cont_\: "cont (\ c b)" +by (auto simp: cont_def \_def) + +text{* The fixpoint theory now allows us to establish that the lfp constructed over + @{term Mon2Rel} remains deterministic *} + +theorem single_valued_lfp_Mon2Rel: "single_valued (lfp(\ c (Mon2Rel B)))" +apply(rule single_valued_lfp, simp_all add: cont_\) +apply(auto simp: \_def single_valued_def) +apply(metis single_valued_Mon2Rel[of "B"] single_valued_def) +done + + +lemma Rel2Mon_if: + "Rel2Mon {(s, t). if b s then (s, t) \ Mon2Rel c O lfp (\ b (Mon2Rel c)) else s = t} \ = + (if b \ then Rel2Mon (Mon2Rel c O lfp (\ b (Mon2Rel c))) \ else Some ((), \))" +by (simp add: Rel2Mon_def) + +lemma Rel2Mon_homomorphism: + assumes determ_X: "single_valued X" and determ_Y: "single_valued Y" + shows "Rel2Mon (X O Y) = (Rel2Mon X) ;- (Rel2Mon Y)" +proof - + have relational_partial_next_in_O: "!!x E F. (\y. (x, y) \ (E O F)) + \ (\y. (x, y) \ E)" by (auto) +show ?thesis +apply (simp add: Rel2Mon_def seq_SE_def bind_SE_def) +apply (rule ext) +apply (case_tac " \\'. (\, \') \ X O Y") +apply (simp only: HOL.if_True) +apply (frule relational_partial_next_in_O) +apply (auto) +apply (insert determ_X determ_Y) + +defer 1 + +apply (rule_tac x=z in exI) +apply (rule someI2) +apply (assumption)+ +apply (simp add: single_valued_def) +apply (metis) + +apply (erule contrapos_pp) +apply (simp) +apply (rule_tac x=\' in exI) + apply (subgoal_tac "(SOME \'. (\, \') \ X) = \''") + apply (auto) + apply (auto simp: single_valued_def) +apply (fold single_valued_def) + + apply (subgoal_tac "(SOME \'. (x, \') \ X) = y") + apply (simp) + apply (subgoal_tac "(SOME \'. (y, \') \ Y) = z") + apply (simp) + apply (subgoal_tac "(SOME \'. (x, \') \ X O Y) = z") + apply (simp) + apply (auto simp: single_valued_def) + (* TODO IMPORTANT *) + + apply (rule someI2) + apply (assumption) + apply (rule someI2) + apply (auto) +oops +(* qed *) + +text{* Putting everything together, the theory of embedding and the invariance of + determinism of the while-body, gives us the usual unfold-theorem: *} +theorem "(while\<^sub>S\<^sub>E b do c od) = (if\<^sub>S\<^sub>E b then (c ;- (while\<^sub>S\<^sub>E b do c od)) else return () fi)" +apply (simp add: if_SE_def seq_SE_def while_SE_def unit_SE_def) +apply (subst lfp_unfold [OF mono_if_cont, OF cont_\]) +apply (rule ext) +apply (subst \_def) +apply (auto simp: Rel2Mon_if Rel2Mon_homomorphism seq_SE_def Rel2Mon_Id [simplified comp_def] + single_valued_Mon2Rel single_valued_lfp_Mon2Rel ) +done + + + +subsection{* Multi-binds *} + +text{* In order to express test-sequences also on the object-level and +to make our theory amenable to formal reasoning over test-sequences, +we represent them as lists of input and generalize the bind-operator +of the state-exception monad accordingly. The approach is straightforward, +but comes with a price: we have to encapsulate all input and output data +into one type. Assume that we have a typed interface to a module with +the operations $op_1$, $op_2$, \ldots, $op_n$ with the inputs +$\iota_1$, $\iota_2$, \ldots, $\iota_n$ (outputs are treated analogously). +Then we can encode for this interface the general input - type: +\begin{displaymath} +\texttt{datatype}\ \texttt{in}\ =\ op_1\ ::\ \iota_1\ |\ ...\ |\ \iota_n +\end{displaymath} +Obviously, we loose some type-safety in this approach; we have to express +that in traces only \emph{corresponding} input and output belonging to the +same operation will occur; this form of side-conditions have to be expressed +inside \HOL. From the user perspective, this will not make much difference, +since junk-data resulting from too weak typing can be ruled out by adopted +front-ends. +*} + +text{* Note that the subsequent notion of a test-sequence allows the io stepping +function (and the special case of a program under test) to stop execution +\emph{within} the sequence; such premature terminations are characterized by an +output list which is shorter than the input list. *} + +fun mbind :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" +where "mbind [] iostep \ = Some([], \)" | + "mbind (a#H) iostep \ = + (case iostep a \ of + None \ Some([], \) + | Some (out, \') \ (case mbind H iostep \' of + None \ Some([out],\') + | Some(outs,\'') \ Some(out#outs,\'')))" + +notation mbind ("mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e") (* future name: mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e *) + +text{* This definition is fail-safe; in case of an exception, the current state is maintained, + the computation as a whole is marked as success. + Compare to the fail-strict variant @{text "mbind'"}: *} + +lemma mbind_unit [simp]: + "mbind [] f = (return [])" + by(rule ext, simp add: unit_SE_def) + +text{* The characteristic property of @{term mbind} --- which distinguishes it from + @{text mbind'} defined in the sequel --- is that it never fails; it ``swallows'' internal + errors occuring during the computation. *} +lemma mbind_nofailure [simp]: + "mbind S f \ \ None" + apply(rule_tac x=\ in spec) + apply(induct S, auto simp:unit_SE_def) + apply(case_tac "f a x", auto) + apply(erule_tac x="b" in allE) + apply(erule exE, erule exE, simp) + done + + +fun mbind' :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" +where "mbind' [] iostep \ = Some([], \)" | + "mbind' (a#S) iostep \ = + (case iostep a \ of + None \ None + | Some (out, \') \ (case mbind' S iostep \' of + None \ None (* fail-strict *) + | Some(outs,\'') \ Some(out#outs,\'')))" +notation mbind' ("mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p") (* future name: mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p *) + +lemma mbind'_unit [simp]: + "mbind' [] f = (return [])" + by(rule ext, simp add: unit_SE_def) + +lemma mbind'_bind [simp]: + "(x \ mbind' (a#S) F; M x) = (a \ (F a); (x \ mbind' S F; M (a # x)))" + by(rule ext, rename_tac "z",simp add: bind_SE_def split: option.split) + +declare mbind'.simps[simp del] (* use only more abstract definitions *) + + +fun mbind'' :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" +where "mbind'' [] iostep \ = Some([], \)" | + "mbind'' (a#S) iostep \ = + (case iostep a \ of + None \ mbind'' S iostep \ + | Some (out, \') \ (case mbind'' S iostep \' of + None \ None (* does not occur *) + | Some(outs,\'') \ Some(out#outs,\'')))" + +notation mbind'' ("mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e") (* future name: mbind\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e\<^sub>F\<^sub>a\<^sub>i\<^sub>l *) +declare mbind''.simps[simp del] (* use only more abstract definitions *) + + +text{* mbind' as failure strict operator can be seen as a foldr on bind - + if the types would match \ldots *} + +definition try_SE :: "('o,'\) MON\<^sub>S\<^sub>E \ ('o option,'\) MON\<^sub>S\<^sub>E" ("try\<^sub>S\<^sub>E") +where "try\<^sub>S\<^sub>E ioprog = (\\. case ioprog \ of + None \ Some(None, \) + | Some(outs, \') \ Some(Some outs, \'))" +text{* In contrast, mbind as a failure safe operator can roughly be seen + as a foldr on bind - try: + m1 ; try m2 ; try m3; ... Note, that the rough equivalence only holds for + certain predicates in the sequence - length equivalence modulo None, + for example. However, if a conditional is added, the equivalence + can be made precise: *} + +lemma mbind_try: + "(x \ mbind (a#S) F; M x) = + (a' \ try\<^sub>S\<^sub>E(F a); + if a' = None + then (M []) + else (x \ mbind S F; M (the a' # x)))" +apply(rule ext) +apply(simp add: bind_SE_def try_SE_def) +apply(case_tac "F a x", auto) +apply(simp add: bind_SE_def try_SE_def) +apply(case_tac "mbind S F b", auto) +done + +text{* On this basis, a symbolic evaluation scheme can be established + that reduces mbind-code to try\_SE\_code and ite-cascades. *} + +definition alt_SE :: "[('o, '\)MON\<^sub>S\<^sub>E, ('o, '\)MON\<^sub>S\<^sub>E] \ ('o, '\)MON\<^sub>S\<^sub>E" (infixl "\\<^sub>S\<^sub>E" 10) +where "(f \\<^sub>S\<^sub>E g) = (\ \. case f \ of None \ g \ + | Some H \ Some H)" + +definition malt_SE :: "('o, '\)MON\<^sub>S\<^sub>E list \ ('o, '\)MON\<^sub>S\<^sub>E" +where "malt_SE S = foldr alt_SE S fail\<^sub>S\<^sub>E" +notation malt_SE ("\\<^sub>S\<^sub>E") + +lemma malt_SE_mt [simp]: "\\<^sub>S\<^sub>E [] = fail\<^sub>S\<^sub>E" +by(simp add: malt_SE_def) + +lemma malt_SE_cons [simp]: "\\<^sub>S\<^sub>E (a # S) = (a \\<^sub>S\<^sub>E (\\<^sub>S\<^sub>E S))" +by(simp add: malt_SE_def) + + +subsection{* State Backtrack Monads *} +text{*This subsection is still rudimentary and as such an interesting +formal analogue to the previous monad definitions. It is doubtful that it is +interesting for testing and as a cmputational stucture at all. +Clearly more relevant is ``sequence'' instead of ``set,'' which would +rephrase Isabelle's internal tactic concept. *} + +type_synonym ('o, '\) MON\<^sub>S\<^sub>B = "'\ \ ('o \ '\) set" + +definition bind_SB :: "('o, '\)MON\<^sub>S\<^sub>B \ ('o \ ('o', '\)MON\<^sub>S\<^sub>B) \ ('o', '\)MON\<^sub>S\<^sub>B" +where "bind_SB f g \ = \ ((\(out, \). (g out \)) ` (f \))" +notation bind_SB ("bind\<^sub>S\<^sub>B") + +definition unit_SB :: "'o \ ('o, '\)MON\<^sub>S\<^sub>B" ("(returns _)" 8) +where "unit_SB e = (\\. {(e,\)})" +notation unit_SB ("unit\<^sub>S\<^sub>B") + +syntax (xsymbols) + "_bind_SB" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>B,('o','\)MON\<^sub>S\<^sub>B] \ ('o','\)MON\<^sub>S\<^sub>B" + ("(2 _ := _; _)" [5,8,8]8) +translations + "x := f; g" == "CONST bind_SB f (% x . g)" + + + +lemma bind_left_unit_SB : "(x := returns a; m x) = m a" + by (rule ext,simp add: unit_SB_def bind_SB_def) + +lemma bind_right_unit_SB: "(x := m; returns x) = m" + by (rule ext, simp add: unit_SB_def bind_SB_def) + + +lemma bind_assoc_SB: "(y := (x := m; k x); h y) = (x := m; (y := k x; h y))" + by (rule ext, simp add: unit_SB_def bind_SB_def split_def) + + +subsection{* State Backtrack Exception Monad (vulgo: Boogie-PL) *} +text{* The following combination of the previous two Monad-Constructions +allows for the semantic foundation of a simple generic assertion language +in the style of Schirmers Simpl-Language or Rustan Leino's Boogie-PL language. +The key is to use the exceptional element None for violations of +the assert-statement. *} +type_synonym ('o, '\) MON\<^sub>S\<^sub>B\<^sub>E = "'\ \ (('o \ '\) set) option" + + +definition bind_SBE :: "('o,'\)MON\<^sub>S\<^sub>B\<^sub>E \ ('o \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E) \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E" +where "bind_SBE f g = (\\. case f \ of None \ None + | Some S \ (let S' = (\(out, \'). g out \') ` S + in if None \ S' then None + else Some(\ (the ` S'))))" + +syntax (xsymbols) + "_bind_SBE" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>B\<^sub>E,('o','\)MON\<^sub>S\<^sub>B\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E" + ("(2 _ :\ _; _)" [5,8,8]8) +translations + "x :\ f; g" == "CONST bind_SBE f (% x . g)" + +definition unit_SBE :: "'o \ ('o, '\)MON\<^sub>S\<^sub>B\<^sub>E" ("(returning _)" 8) +where "unit_SBE e = (\\. Some({(e,\)}))" + +definition assert_SBE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" +where "assert_SBE e = (\\. if e \ then Some({((),\)}) + else None)" +notation assert_SBE ("assert\<^sub>S\<^sub>B\<^sub>E") + +definition assume_SBE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" +where "assume_SBE e = (\\. if e \ then Some({((),\)}) + else Some {})" +notation assume_SBE ("assume\<^sub>S\<^sub>B\<^sub>E") + + +definition havoc_SBE :: " (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" +where "havoc_SBE = (\\. Some({x. True}))" +notation havoc_SBE ("havoc\<^sub>S\<^sub>B\<^sub>E") + + +lemma bind_left_unit_SBE : "(x :\ returning a; m x) = m a" + apply (rule ext,simp add: unit_SBE_def bind_SBE_def) + apply (case_tac "m a x",auto) + done + +lemma bind_right_unit_SBE: "(x :\ m; returning x) = m" + apply (rule ext, simp add: unit_SBE_def bind_SBE_def) + apply (case_tac "m x", simp_all add:Let_def) + apply (rule HOL.ccontr, simp add: Set.image_iff) + done + + +lemmas aux = trans[OF HOL.neq_commute,OF Option.not_None_eq] + +lemma bind_assoc_SBE: "(y :\ (x :\ m; k); h y) = (x :\ m; (y :\ k; h y))" +proof (rule ext, rename_tac z, simp add: unit_SBE_def bind_SBE_def, + case_tac "m z", simp_all add: Let_def Set.image_iff, safe) + case goal1 then show ?case + by(rule_tac x="(a, b)" in bexI, simp_all) +next + case goal2 then show ?case + apply(rule_tac x="(aa,b)" in bexI, simp_all add:split_def) + apply(erule_tac x="(aa,b)" in ballE) + apply(auto simp: aux image_def split_def intro!: rev_bexI) + done +next + case goal3 then show ?case + by(rule_tac x="(a, b)" in bexI, simp_all) +next + case goal4 then show ?case + apply(erule_tac Q="None = ?X" in contrapos_pp) + apply(erule_tac x="(aa,b)" and P="\ x. None \ split (\out. k) x" in ballE) + apply(auto simp: aux Option.not_None_eq image_def split_def intro!: rev_bexI) + done +next + case goal5 then show ?case + apply simp apply((erule_tac x="(ab,ba)" in ballE)+) + apply(simp_all add: aux Option.not_None_eq, (erule exE)+, simp add:split_def) + apply(erule rev_bexI,case_tac "None\(\p. h (fst p) (snd p))`y",auto simp:split_def) + done + +next + case goal6 then show ?case + apply simp apply((erule_tac x="(a,b)" in ballE)+) + apply(simp_all add: aux Option.not_None_eq, (erule exE)+, simp add:split_def) + apply(erule rev_bexI, case_tac "None\(\p. h(fst p)(snd p))`y",auto simp:split_def) + done +qed + + + +(* TODO: IF THEN ELSE and WHILE + Monadic Laws + Operational Rules. *) + + + +section{* Valid Execution Sequences in the State Exception Monad *} +text{* This is still an unstructured merge of executable monad concepts +and specification oriented high-level properties initiating test procedures. *} + +definition valid_SE :: "'\ \ (bool,'\) MON\<^sub>S\<^sub>E \ bool" (infix "\" 15) +where "(\ \ m) = (m \ \ None \ fst(the (m \)))" +text{* This notation consideres failures as valid -- a definition +inspired by I/O conformance. BUG: It is not possible to define +this concept once and for all in a Hindley-Milner type-system. +For the moment, we present it only for the state-exception +monad, although for the same definition, this notion is applicable +to other monads as well. *} + + +lemma exec_unit_SE [simp]: "(\ \ (return P)) = (P)" +by(auto simp: valid_SE_def unit_SE_def) + +lemma exec_unit_SE' [simp]: "(\\<^sub>0 \ (\\. Some (f \, \))) = (f \\<^sub>0)" +by(simp add: valid_SE_def ) + +lemma exec_fail_SE [simp]: "(\ \ fail\<^sub>S\<^sub>E) = False" +by(auto simp: valid_SE_def fail_SE_def) + + +lemma exec_fail_SE'[simp]: "\(\\<^sub>0 \ (\\. None))" +by(simp add: valid_SE_def ) + +lemma exec_bind_SE_failure: +"A \ = None \ \(\ \ ((s \ A ; M s)))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def) + +lemma exec_bind_SE_success: +"A \ = Some(b,\') \ (\ \ ((s \ A ; M s))) = (\' \ (M b))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def ) + +lemma exec_bind_SE_success': (* atomic boolean Monad "Query Functions" *) +"M \ = Some(f \,\) \ (\ \ M) = f \" +by(simp add: valid_SE_def unit_SE_def bind_SE_def ) + + + +lemma exec_bind_SE_success'': +"\ \ ((s \ A ; M s)) \ \ v \'. the(A \) = (v,\') \ \' \ (M v)" +apply(auto simp: valid_SE_def unit_SE_def bind_SE_def) +apply(cases "A \", simp_all) +apply(simp add: Product_Type.prod_case_unfold) +apply(drule_tac x="A \" and f=the in arg_cong, simp) +apply(rule_tac x="fst aa" in exI) +apply(rule_tac x="snd aa" in exI, auto) +done + + +lemma exec_bind_SE_success''': +"\ \ ((s \ A ; M s)) \ \ a. (A \) = Some a \ (snd a) \ (M (fst a))" +apply(auto simp: valid_SE_def unit_SE_def bind_SE_def) +apply(cases "A \", simp_all) +apply(simp_all add: Product_Type.prod_case_unfold + split: prod.splits) +apply(drule_tac x="A \" and f=the in arg_cong, simp) +apply(rule_tac x="fst aa" in exI) +apply(rule_tac x="snd aa" in exI, auto) +done + + +lemma exec_bind_SE_success'''' : +"\ \ ((s \ A ; M s)) \ \ v \'. A \ = Some(v,\') \ \' \ (M v)" +apply(auto simp: valid_SE_def unit_SE_def bind_SE_def) +apply(cases "A \", simp_all) +apply(simp add: Product_Type.prod_case_unfold) +apply(drule_tac x="A \" and f=the in arg_cong, simp) +apply(rule_tac x="fst aa" in exI) +apply(rule_tac x="snd aa" in exI, auto) +done + + +text{* Recall \verb+mbind_unit+ for the base case. *} + +lemma exec_mbindFSave_failure: +"ioprog a \ = None \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; M s)) = (\ \ (M []))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def) + +lemma exec_mbindFStop_failure: +"ioprog a \ = None \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (a#S) ioprog ; M s)) = (False)" +by(simp add: exec_bind_SE_failure) + +lemma exec_mbindFPurge_failure: +"ioprog a \ = None \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (a#S) ioprog ; M s)) = (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (S) ioprog ; M s))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def mbind''.simps) + + +lemma exec_mbindFSave_success : +"ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; M s)) = + (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog ; M (b#s)))" +unfolding valid_SE_def unit_SE_def bind_SE_def +by(cases "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog \'", auto) + +lemma exec_mbindFStop_success : +"ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (a#S) ioprog ; M s)) = + (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S ioprog ; M (b#s)))" +unfolding valid_SE_def unit_SE_def bind_SE_def +by(cases "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S ioprog \'", auto simp: mbind'.simps) + +lemma exec_mbindFPurge_success : +"ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (a#S) ioprog ; M s)) = + (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog ; M (b#s)))" +unfolding valid_SE_def unit_SE_def bind_SE_def +by(cases "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog \'", auto simp: mbind''.simps) + +lemma exec_mbindFSave: +"(\ \ (s \ mbind (a#S) ioprog ; return (P s))) = + (case ioprog a \ of + None \ (\ \ (return (P []))) + | Some(b,\') \ (\' \ (s \ mbind S ioprog ; return (P (b#s)))))" +apply(case_tac "ioprog a \") +apply(auto simp: exec_mbindFSave_failure exec_mbindFSave_success split: prod.splits) +apply(subst (asm) exec_mbindFSave_success,assumption,simp_all) +by(subst exec_mbindFSave_success,assumption,simp)+ + + +text{* Universal splitting and symbolic execution rule *} +lemma exec_mbindFSave_E: +assumes seq : "(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; (P s)))" + and none: "ioprog a \ = None \ (\ \ (P [])) \ Q" + and some: "\ b \'. ioprog a \ = Some(b,\') \ (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog;(P (b#s)))) \ Q " +shows "Q" +using seq +proof(cases "ioprog a \") + case None assume ass:"ioprog a \ = None" show "Q" + apply(rule none[OF ass]) + apply(insert ass, erule_tac ioprog1=ioprog in exec_mbindFSave_failure[THEN iffD1],rule seq) + done +next + case (Some aa) assume ass:"ioprog a \ = Some aa" show "Q" + apply(insert ass,cases "aa",simp, rename_tac "out" "\'") + apply(erule some) + apply(insert ass,simp) + apply(erule_tac ioprog1=ioprog in exec_mbindFSave_success[THEN iffD1],rule seq) + done +qed + +text{* The next rule reveals the particular interest in deduction; + as an elimination rule, it allows for a linear conversion of a validity judgement + @{term "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p"} over an input list @{term "S"} into a constraint system; without any + branching ... Symbolic execution can even be stopped tactically whenever + @{term "ioprog a \ = Some(b,\')"} comes to a contradiction. *} +lemma exec_mbindFStop_E: +assumes seq : "(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (a#S) ioprog ; (P s)))" + and some: "\b \'. ioprog a \ = Some(b,\') \ (\'\ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S ioprog;(P(b#s)))) \ Q" +shows "Q" +using seq +proof(cases "ioprog a \") + case None assume ass:"ioprog a \ = None" show "Q" + apply(insert ass seq) + apply(drule_tac \=\ and S=S and M=P in exec_mbindFStop_failure, simp) + done +next + case (Some aa) assume ass:"ioprog a \ = Some aa" show "Q" + apply(insert ass,cases "aa",simp, rename_tac "out" "\'") + apply(erule some) + apply(insert ass,simp) + apply(erule_tac ioprog1=ioprog in exec_mbindFStop_success[THEN iffD1],rule seq) + done +qed + + +lemma exec_mbindFPurge_E: +assumes seq : "(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (a#S) ioprog ; (P s)))" + and none: "ioprog a \ = None \ (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog;(P (s)))) \ Q" + and some: "\ b \'. ioprog a \ = Some(b,\') \ (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog;(P (b#s)))) \ Q " +shows "Q" +using seq +proof(cases "ioprog a \") + case None assume ass:"ioprog a \ = None" show "Q" + apply(rule none[OF ass]) + apply(insert ass, erule_tac ioprog1=ioprog in exec_mbindFPurge_failure[THEN iffD1],rule seq) + done +next + case (Some aa) assume ass:"ioprog a \ = Some aa" show "Q" + apply(insert ass,cases "aa",simp, rename_tac "out" "\'") + apply(erule some) + apply(insert ass,simp) + apply(erule_tac ioprog1=ioprog in exec_mbindFPurge_success[THEN iffD1],rule seq) + done +qed + + +lemma assert_disch1 :" P \ \ (\ \ (x \ assert\<^sub>S\<^sub>E P; M x)) = (\ \ (M True))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def) + +lemma assert_disch2 :" \ P \ \ \ (\ \ (x \ assert\<^sub>S\<^sub>E P ; M s))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def) + +lemma assert_disch3 :" \ P \ \ \ (\ \ (assert\<^sub>S\<^sub>E P))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def) + +lemma assert_D : "(\ \ (x \ assert\<^sub>S\<^sub>E P; M x)) \ P \ \ (\ \ (M True))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def split: HOL.split_if_asm) + +lemma assume_D : "(\ \ (x \ assume\<^sub>S\<^sub>E P; M x)) \ \ \. (P \ \ \ \ (M ()))" +apply(auto simp: bind_SE_def assume_SE_def valid_SE_def split: HOL.split_if_asm) +apply(rule_tac x="Eps P" in exI, auto) +apply(rule_tac x="True" in exI, rule_tac x="b" in exI) +apply(subst Hilbert_Choice.someI,assumption,simp) +apply(subst Hilbert_Choice.someI,assumption,simp) +done +text{* These two rule prove that the SE Monad in connection with the notion of valid sequence +is actually sufficient for a representation of a Boogie-like language. The SBE monad with explicit +sets of states --- to be shown below --- is strictly speaking not necessary (and will therefore +be discontinued in the development). *} + +term "if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi" + +lemma if_SE_D1 : "P \ \ (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = (\ \ B\<^sub>1)" +by(auto simp: if_SE_def valid_SE_def) + +lemma if_SE_D2 : "\ P \ \ (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = (\ \ B\<^sub>2)" +by(auto simp: if_SE_def valid_SE_def) + +lemma if_SE_split_asm : " (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = ((P \ \ (\ \ B\<^sub>1)) \ (\ P \ \ (\ \ B\<^sub>2)))" +by(cases "P \",auto simp: if_SE_D1 if_SE_D2) + +lemma if_SE_split : " (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = ((P \ \ (\ \ B\<^sub>1)) \ (\ P \ \ (\ \ B\<^sub>2)))" +by(cases "P \", auto simp: if_SE_D1 if_SE_D2) + + +lemma if_SE_execE: + assumes A: "\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)" + and B: "P \ \ \ \ B\<^sub>1 \ Q" + and C: "\ P \\ \ \ B\<^sub>2 \ Q" + shows "Q" +by(insert A [simplified if_SE_split],cases "P \", simp_all, auto elim: B C) + + +lemma [code]: + "(\ \ m) = (case (m \) of None \ False | (Some (x,y)) \ x)" + apply(simp add: valid_SE_def) + apply(cases "m \ = None", simp_all) + apply(insert not_None_eq, auto) +done + + +text{* Test-Refinements will be stated in terms of the failsave @{term mbind}, opting + more generality. The following lemma allows for an optimization both in + test execution as well as in symbolic execution for an important special case of + the post-codition: Whenever the latter has the constraint that the length of input and + output sequence equal each other (that is to say: no failure occured), failsave mbind + can be reduced to failstop mbind ... *} +lemma mbindFSave_vs_mbindFStop : + "(\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s ioprog); return(length \s = length os \ P \s os))) = + (\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s ioprog); return(P \s os)))" + apply(rule_tac x=P in spec) + apply(rule_tac x=\ in spec) + proof(induct "\s") + case Nil show ?case by(simp_all add: mbind_try try_SE_def del: Monads.mbind.simps) + case (Cons a \s) show ?case + apply(rule allI, rename_tac "\",rule allI, rename_tac "P") + apply(insert Cons.hyps) + apply(case_tac "ioprog a \") + apply(simp only: exec_mbindFSave_failure exec_mbindFStop_failure, simp) + apply(simp add: split_paired_all del: Monads.mbind.simps ) + apply(rename_tac "\'") + apply(subst exec_mbindFSave_success, assumption) + apply(subst (2) exec_bind_SE_success, assumption) + apply(erule_tac x="\'" in allE) + apply(erule_tac x="\\s s. P (a # \s) (aa # s)" in allE) (* heureka ! *) + apply(simp) + done + qed + + +lemma mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e_vs_mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p: +assumes A: "\ \ \. ioprog \ \ \ None" +shows "(\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s ioprog); P os)) = + (\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s ioprog); P os))" +proof(induct "\s") print_cases + case Nil show ?case by simp +next + case (Cons a \s) + from Cons.hyps + have B:"\ S f \. mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S f \ \ None " by simp + have C:"\\. mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s ioprog \ = mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s ioprog \" + apply(induct \s, simp) + apply(rule allI,rename_tac "\") + apply(simp add: Monads.mbind'.simps(2)) + apply(insert A, erule_tac x="a" in allE) + apply(erule_tac x="\" and P="\\ . ioprog a \ \ None" in allE) + apply(auto split:option.split) + done + show ?case + apply(insert A,erule_tac x="a" in allE,erule_tac x="\" in allE) + apply(simp, elim exE) + apply(rename_tac "out" "\'") + apply(insert B, erule_tac x=\s in allE, erule_tac x=ioprog in allE, erule_tac x=\' in allE) + apply(subst(asm) not_None_eq, elim exE) + apply(subst Monads.exec_bind_SE_success) + apply(simp split: option.split, auto) + apply(rule_tac s="(\ a b c. a # (fst c)) out \' (aa, b)" in trans, simp,rule refl) + apply(rule_tac s="(\ a b c. (snd c)) out \' (aa, b)" in trans, simp,rule refl) + apply(simp_all) + apply(subst Monads.exec_bind_SE_success, assumption) + apply(subst Monads.exec_bind_SE_success) + apply(rule_tac s="Some (aa, b)" in trans,simp_all add:C) + apply(subst(asm) Monads.exec_bind_SE_success, assumption) + apply(subst(asm) Monads.exec_bind_SE_success) + apply(rule_tac s="Some (aa, b)" in trans,simp_all add:C) + done +qed + + +section{* Valid Test Sequences in the State Exception Backtrack Monad *} +text{* This is still an unstructured merge of executable monad concepts +and specification oriented high-level properties initiating test procedures. *} + +definition valid_SBE :: "'\ \ ('a,'\) MON\<^sub>S\<^sub>B\<^sub>E \ bool" (infix "\\<^sub>S\<^sub>B\<^sub>E" 15) +where "\ \\<^sub>S\<^sub>B\<^sub>E m \ (m \ \ None)" +text{* This notation consideres all non-failures as valid. *} + + +lemma assume_assert: "(\ \\<^sub>S\<^sub>B\<^sub>E ( _ :\ assume\<^sub>S\<^sub>B\<^sub>E P ; assert\<^sub>S\<^sub>B\<^sub>E Q)) = (P \ \ Q \)" + by(simp add: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) + +lemma assert_intro: "Q \ \ \ \\<^sub>S\<^sub>B\<^sub>E (assert\<^sub>S\<^sub>B\<^sub>E Q)" + by(simp add: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) + +lemma assume_dest: + "\ \ \\<^sub>S\<^sub>B\<^sub>E (x :\ assume\<^sub>S\<^sub>B\<^sub>E Q; M x); Q \' \ \ \ \\<^sub>S\<^sub>B\<^sub>E M ()" + apply(auto simp: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) + apply(cases "Q \",simp_all) + oops + +text{* This still needs work. What would be needed is a kind + of wp - calculus that comes out of that. So far: nope. *} + +subsection{* Legacy Bindings *} + + +lemma valid_true[simp]: (* legacy: special case *) + "(\ \ (s \ return x ; return (P s))) = P x" by simp + + +(* +lemmas valid_both = exec_mbindFSave (* legacy *) +lemmas valid_success = exec_mbindFSave_success (* legacy *) +lemmas valid_success'' = exec_mbindFSave_success(* legacy *) +lemmas valid_success' = exec_bind_SE_success (* legacy *) +lemmas valid_failure = exec_mbindFSave_failure (* legacy : *) +lemmas valid_failure' = exec_bind_SE_failure (* legacy *) +lemmas valid_failure''=valid_failure (* legacy : *) +lemmas valid_failure''' = exec_mbindFStop_failure (* legacy : *) +lemmas valid_propagate_fail = exec_fail_SE (* legacy *) +lemmas valid_propagate_fail' = exec_fail_SE' (* legacy *) +lemmas valid_propoagate_3' = valid_propagate_fail' (* legacy *) +lemmas valid_propagate_2 = exec_bind_SE_success''(* legacy *) +lemmas valid_propagate_1 = exec_unit_SE (* legacy *) +lemmas valid_successElem = exec_bind_SE_success' (* legacy *) +lemmas valid_propagate_2' = exec_bind_SE_success'''(* legacy *) +lemmas valid_propagate_2'' = exec_bind_SE_success'''' (* legacy *) +lemmas valid_propoagate_3 = exec_unit_SE' (* legacy *) + *) +(* legacy ?: +lemma valid_success'': +"ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind (a#S) ioprog ; return (P s))) = + (\' \ (s \ mbind S ioprog ; return (P (b#s))))" +unfolding valid_SE_def unit_SE_def bind_SE_def +by(cases "mbind S ioprog \'", auto) +*) + +end diff --git a/src/test/Testing.thy b/src/test/Testing.thy new file mode 100644 index 0000000..94c6ff0 --- /dev/null +++ b/src/test/Testing.thy @@ -0,0 +1,57 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * Testing.thy --- wiring everything together. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007 ETH Zurich, Switzerland + * 2008-2015 Achim D. Brucker, Germany + * 2009-2016 Université Paris-Sud, France + * 2016 The University of Sheffield, UK + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Summing up *} + +theory + Testing +imports + TestGen + TestLib + TestScript + "codegen_fsharp/code_fsharp" +begin + + +end