From d2a175715719897618657baabe931c8636917d9e Mon Sep 17 00:00:00 2001 From: Peyman Derafshkavian Date: Sat, 5 Feb 2022 13:02:51 -0500 Subject: [PATCH 1/8] Add temporary modifications to TestGen (cherry picked from commit 1f306a984404b9c7cf30020614a00d4186e33a2d) --- src/test-gen/examples/ROOT | 83 + src/test-gen/src/ROOT | 20 + src/test-gen/src/test/Automata.thy | 391 +++++ src/test-gen/src/test/EFSM_Toolkit.thy | 167 ++ src/test-gen/src/test/Interleaving.thy | 244 +++ src/test-gen/src/test/Monads.thy | 1256 +++++++++++++++ src/test-gen/src/test/Observers.thy | 200 +++ src/test-gen/src/test/TestEnv.ML | 458 ++++++ src/test-gen/src/test/TestEnv.thy | 726 +++++++++ src/test-gen/src/test/TestGen.thy | 1704 +++++++++++++++++++++ src/test-gen/src/test/TestLib.thy | 57 + src/test-gen/src/test/TestRefinements.thy | 248 +++ src/test-gen/src/test/TestScript.thy | 164 ++ src/test-gen/src/test/TestSequence.thy | 1001 ++++++++++++ src/test-gen/src/test/Testing.thy | 57 + 15 files changed, 6776 insertions(+) create mode 100644 src/test-gen/examples/ROOT create mode 100644 src/test-gen/src/ROOT create mode 100644 src/test-gen/src/test/Automata.thy create mode 100644 src/test-gen/src/test/EFSM_Toolkit.thy create mode 100644 src/test-gen/src/test/Interleaving.thy create mode 100644 src/test-gen/src/test/Monads.thy create mode 100644 src/test-gen/src/test/Observers.thy create mode 100644 src/test-gen/src/test/TestEnv.ML create mode 100644 src/test-gen/src/test/TestEnv.thy create mode 100644 src/test-gen/src/test/TestGen.thy create mode 100644 src/test-gen/src/test/TestLib.thy create mode 100644 src/test-gen/src/test/TestRefinements.thy create mode 100644 src/test-gen/src/test/TestScript.thy create mode 100644 src/test-gen/src/test/TestSequence.thy create mode 100644 src/test-gen/src/test/Testing.thy diff --git a/src/test-gen/examples/ROOT b/src/test-gen/examples/ROOT new file mode 100644 index 0000000..aa6c5c6 --- /dev/null +++ b/src/test-gen/examples/ROOT @@ -0,0 +1,83 @@ +session "HOL-TestGen-Max" in "unit/Max" = "HOL-TestGen" + + theories + Max_test + +session "HOL-TestGen-Triangle" in "unit/Triangle" = "HOL-TestGen" + + theories + Triangle + Triangle_test + +session "HOL-TestGen-List" in "unit/List" = "HOL-TestGen" + + options [document = pdf,document_variants="document:outline=/proof,/ML",document_output=output] + theories + "List_test" + document_files + "root.tex" + "root.bib" + "main.tex" + "titlepage.tex" + +session "HOL-TestGen-ListVerified" in "unit/ListVerified" = "HOL-TestGen" + + theories + List_Verified_test + +session "HOL-TestGen-RBT" in "unit/RBT" = "HOL-TestGen" + + theories + RBT_def + RBT_test + +session "HOL-TestGen-AVL" in "unit/AVL" = "HOL-TestGen" + + theories + AVL_def + AVL_test + +session "HOL-TestGen-SharedMemory" in "sequence/SharedMemory" = "HOL-TestGen" + + options [quick_and_dirty] + theories + SharedMemory_test + +session "HOL-TestGen-Bank" in "sequence/Bank" = "HOL-TestGen" + + options [quick_and_dirty, document = pdf,document_variants="document:outline=/proof,/ML",document_output=output] + theories + Bank + NonDetBank + document_files + "root.tex" + "root.bib" + "main.tex" + "titlepage.tex" + +session "HOL-TestGen-MyKeOS" in "concurrency/MyKeOS" = "HOL-TestGen" + + options [quick_and_dirty, document = pdf,document_variants="document:outline=/proof,/ML",document_output=output] + theories + MyKeOS + MyKeOS_test + MyKeOS_test_conc + + document_files + "root.tex" + "root.bib" + "main.tex" + "titlepage.tex" + +session "HOL-TestGen-MyKeOS2" in "concurrency/MyKeOS2" = "HOL-TestGen" + + options [quick_and_dirty(*, document = pdf,document_variants="document:outline=/proof,/ML",document_output=output*)] + theories + MyKeOS + MyKeOS_test_conc + document_files + "root.tex" + "root.bib" + "main.tex" + "titlepage.tex" + + +session "HOL-TestGen-Sequence" in "sequence/RBT" = "HOL-TestGen" + + theories + "../../unit/RBT/RBT_def" + RBT_seq_test + RBT_pfenning_seq_test + +session "HOL-TestGen-MiniFTP" in "reactive_sequence" = "HOL-TestGen" + + theories + "MiniFTP_test" diff --git a/src/test-gen/src/ROOT b/src/test-gen/src/ROOT new file mode 100644 index 0000000..6896e05 --- /dev/null +++ b/src/test-gen/src/ROOT @@ -0,0 +1,20 @@ + +session "HOL-TestGenLib" in "test" = HOL + + description {* HOL-TestGen Basic Libraries without Code-Generators and Testgenerator *} + (*directories "/cygdrive/c/Program Files (x86)/Isabelle HOL/Isabelle2021/hol-testgen-1.9.1/hol-testgen-1.9.1/src"*) + + theories + "TestLib" + +session "HOL-TestGen" (main) = "HOL-TestGenLib" + + description {* HOL-TestGen *} + (*directories "/cygdrive/c/Program Files (x86)/Isabelle HOL/Isabelle2021/hol-testgen-1.9.1/hol-testgen-1.9.1/src"*) + theories + "codegen_fsharp/Code_String_Fsharp" + "codegen_fsharp/Code_Char_chr_Fsharp" + "codegen_fsharp/Code_Integer_Fsharp" + "codegen_fsharp/Code_Char_Fsharp" + "codegen_gdb/Code_gdb_script" + "Testing" + "IOCO" + "SharedMemory" diff --git a/src/test-gen/src/test/Automata.thy b/src/test-gen/src/test/Automata.thy new file mode 100644 index 0000000..f3018b8 --- /dev/null +++ b/src/test-gen/src/test/Automata.thy @@ -0,0 +1,391 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * Automata.thy --- the base testing theory for automatas in sequence testing. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007 ETH Zurich, Switzerland + * 2009-2017 Univ. Paris-Sud, France + * 2009-2015 Achim D. Brucker, Germany + * 2015-2017 The University of Sheffield, UK + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Basic Automaton Theory *} + +theory Automata imports Main +begin + +text{* Re-Definition of the following type synonyms from Monad-Theory - +apart from that, these theories are independent. *} +type_synonym ('o, '\) MON\<^sub>S\<^sub>E = "'\ \ ('o \ '\)" (* = '\ \ ('o \ '\)option *) +type_synonym ('o, '\) MON\<^sub>S\<^sub>B = "'\ \ ('o \ '\) set" +type_synonym ('o, '\) MON\<^sub>S\<^sub>B\<^sub>E = "'\ \ (('o \ '\) set) option" + + +subsection{* Deterministic I/O Automaton *} + + +class fin = + assumes finite: "finite(UNIV::'a set)" + + +record ('\, '\) DA = + init :: "'\" + step :: "'\ \ '\ \ '\" + accept :: "'\ set" + + +typedef ('\::fin, '\::fin) DFA = "{x::('\, '\) DA. True}" +proof - + show "\x. x \ {x. True}" by auto +qed + +record ('\, '\) pDA = + init :: "'\" + step :: "'\ \ '\ \ '\" + accept :: "'\ set" + +typ "'a option" +record ('\, '\) NDA = + init :: "'\" + step :: "('\ \ '\ \ '\)set" + accept :: "'\ set" + + +type_synonym ('\, 'o, '\) ioNDA = "('\ \ 'o, '\) NDA" + +record ('\, '\) EFSM = + init :: "'\" + step :: "('\ \ '\ \ '\)set" + accept :: "'\ set" + + +record ('\, 'o, '\) det_io_atm = + init :: "'\" + step :: "'\ \ ('o, '\) MON\<^sub>S\<^sub>E" + + +subsection{* Non-deterministic I/O automaton *} + +text{* + We will use two styles of non-deterministic automaton: Labelled Transition + Systems (LTS), which are intensively used in the literature, but tend to anihilate + the difference between input and output, and non-deterministic automaton, which + make this difference explicit and which have a closer connection to monads used + for the operational aspects of testing. +*} + +text{* + There we are: labelled transition systems. +*} + +record ('\, 'o, '\) lts = + init :: "'\ set" + step :: "('\ \ ('\ \ 'o) \ '\) set" (* false : this must be "('\ \ ('\ + 'o) \ '\) set" *) + +text{* And, equivalently; non-deterministic io automata. *} + +record ('\, 'o, '\) ndet_io_atm = + init :: "'\ set" + step :: "'\ \ ('o, '\) MON\<^sub>S\<^sub>B" + + +text{* + First, we will prove the fundamental equivalence of these two notions. + + We refrain from a formal definition of explicit conversion functions + and leave this internally in this proof (i.e. the existential witnesses). +*} + +definition det2ndet :: "('\, 'o, '\) det_io_atm \ ('\, 'o, '\) ndet_io_atm" +where "det2ndet A = \ndet_io_atm.init = {det_io_atm.init A}, + ndet_io_atm.step = + \ \ \. if \ \ dom(det_io_atm.step A \) + then {the(det_io_atm.step A \ \)} + else {} \" + +text{* + The following theorem establishes the fact that deterministic + automaton can be injectively embedded in non-deterministic ones. +*} +lemma det2ndet_injective : "inj det2ndet" + apply(auto simp: inj_on_def det2ndet_def) + apply(tactic {* Record.split_simp_tac @{context} [] (K ~1) 1*}, simp) + apply(simp (no_asm_simp) add: fun_eq_iff, auto) + apply(drule_tac x=x in fun_cong, drule_tac x=xa in fun_cong) + apply(case_tac "xa \ dom (step x)", simp_all) + apply(case_tac "xa \ dom (stepa x)", + simp_all add: fun_eq_iff[symmetric], auto) + apply(case_tac "xa \ dom (stepa x)", auto simp: fun_eq_iff[symmetric]) + apply(erule contrapos_np, simp) + apply(drule Product_Type.split_paired_All[THEN iffD2])+ + apply(simp only: Option.not_Some_eq) +done + + +text{* + We distinguish two forms of determinism - global determinism, where for each + state and input \emph{at most} one output-successor state + is assigned. +*} +definition deterministic :: "('\, 'o, '\) ndet_io_atm \ bool" +where "deterministic atm = (((\ x. ndet_io_atm.init atm = {x}) \ + (\ \ out. \ p1 \ step atm \ out. + \ p2 \ step atm \ out. + p1 = p2)))" + +text{* In contrast, transition relations *} +definition \deterministic :: "('\, 'o, '\) ndet_io_atm \ bool" +where "\deterministic atm = (\ x. ndet_io_atm.init atm = {x} \ + (\ \ out. + \ p1 \ step atm \ out. + \ p2 \ step atm \ out. + fst p1 = fst p2 \ snd p1 = snd p2))" + + +lemma det2ndet_deterministic: "deterministic (det2ndet atm)" + by(auto simp:deterministic_def det2ndet_def) + +lemma det2ndet_\deterministic: "\deterministic (det2ndet atm)" + by(auto simp: \deterministic_def det2ndet_def) + + +text{* + The following theorem establishes the isomorphism of the two concepts + IO-automata and LTS. We will therefore concentrate in the sequel on IO-Automata, + which have a slightly more realistic operational behaviour: you give the program + under test an input and get a possible set of responses rather than "agreeing + with the program under test" on a set of input-output-pairs. +*} + +definition ndet2lts :: "('\, 'o, '\) ndet_io_atm \ ('\, 'o, '\) lts" +where "ndet2lts A = \lts.init = init A, + lts.step = {(s,io,s').(snd io,s') \ step A (fst io) s}\" + +definition lts2ndet :: " ('\,'o,'\) lts \ ('\, 'o, '\) ndet_io_atm" +where "lts2ndet A = \init = lts.init A, + step = \ i s. {(out,s'). (s, (i,out), s') + \ lts.step A}\" + +lemma ndet_io_atm_isomorph_lts : "bij ndet2lts" + apply (auto simp: bij_def inj_on_def surj_def ndet2lts_def) + apply (tactic {* Record.split_simp_tac @{context} [] (K ~1) 1*}, simp) + apply (rule ext, rule ext, simp add: set_eq_iff) + apply (rule_tac x = "lts2ndet y" in exI) + apply (simp add: lts2ndet_def) + done + + +(* Lemma missing: for deterministic ndet_atm's, det2ndet is even bijective, +i.e the definition above is indeed a characterization. *) + +text{* + The following well-formedness property is important: for every state, + there is a valid transition. Otherwise, some states may never be part of an + (infinite) trace. +*} + +definition is_enabled :: "['\ \ ('o, '\) MON\<^sub>S\<^sub>B, '\ ] \ bool" +where "is_enabled rel \ = (\ \. rel \ \ \ {})" + +definition is_enabled' :: "['\ \ ('o, '\) MON\<^sub>S\<^sub>E, '\ ] \ bool" +where "is_enabled' rel \ = (\ \. \ \ dom(rel \))" + + +definition live_wff:: "('\, 'o, '\) ndet_io_atm \ bool" +where "live_wff atm = (\ \. \ \. step atm \ \ \ {})" + +lemma life_wff_charn: "live_wff atm = (\ \. is_enabled (step atm) \)" + by(auto simp: live_wff_def is_enabled_def) + +text{* + There are essentially two approaches: either we disallow non-enabled transition + systems---via life\_wff\_charn---or we restrict our machinery for traces and prefixed closed + sets of runs over them +*} + +section{* Rich Traces and its Derivatives *} +text{* + The easiest way to define the concept of traces is on LTS. Via the injections described + above, we can define notions like deterministic automata rich trace, and i/o automata rich + trace. Moreover, we can easily project event traces or state traces from rich traces. *} + +type_synonym ('\, 'o, '\) trace = "nat \ ('\ \ ('\ \ 'o) \ '\)" +type_synonym ('\, 'o) etrace = "nat \ ('\ \ 'o)" (* event traces *) +type_synonym '\ \trace = "nat \ '\" +type_synonym '\ in_trace = "nat \ '\" +type_synonym 'o out_trace = "nat \ 'o" +type_synonym ('\, 'o, '\) run = "('\ \ ('\ \ 'o) \ '\) list" +type_synonym ('\, 'o) erun = "('\ \ 'o) list" +type_synonym '\ \run = "'\ list" +type_synonym '\ in_run = "'\ list" +type_synonym 'o out_run = "'o list" + +definition rtraces ::"('\, 'o, '\) ndet_io_atm \ ('\, 'o, '\) trace set" +where "rtraces atm = { t. fst(t 0) \ init atm \ + (\ n. fst(t (Suc n)) = snd(snd(t n))) \ + (\ n. if is_enabled (step atm) (fst(t n)) + then t n \ {(s,io,s'). (snd io,s') + \ step atm (fst io) s} + else t n = (fst(t n),undefined,fst(t n)))}" + +lemma init_rtraces[elim!]: "t \ rtraces atm \ fst(t 0) \ init atm" +by(auto simp: rtraces_def) + +lemma post_is_pre_state[elim!]: "t \ rtraces atm \ fst(t (Suc n)) = snd(snd(t n))" +by(auto simp: rtraces_def) + + +lemma enabled_transition[elim!]: +"\t \ rtraces atm; is_enabled (step atm) (fst(t n)) \ + \ t n \ {(s,io,s'). (snd io,s') \ step atm (fst io) s}" +apply(simp add: rtraces_def split_def, safe) +apply (erule allE)+ +apply (auto simp add: split: if_split_asm) +done + +lemma nonenabled_transition[elim!]: "\t \ rtraces atm; \ is_enabled (step atm) (fst(t n)) \ + \ t n = (fst(t n),undefined,fst(t n))" +by(simp add: rtraces_def split_def) + + +text{* + The latter definition solves the problem of inherently finite traces, i.e. those that reach + a state in which they are no longer enabled. They are represented by stuttering steps on + the same state. +*} + +definition fin_rtraces :: "('\, 'o, '\) ndet_io_atm \ ('\, 'o, '\) trace set" +where "fin_rtraces atm = { t . t \ rtraces atm \ + (\ n. \ is_enabled (step atm) (fst(t n)))}" + +lemma fin_rtraces_are_rtraces : "fin_rtraces atm \ rtraces atm" +by(auto simp: rtraces_def fin_rtraces_def) + + +definition \traces ::"('\, 'o, '\) ndet_io_atm \ '\ \trace set" +where "\traces atm = {t . \ rt \ rtraces atm. t = fst o rt }" + +definition etraces ::"('\, 'o, '\) ndet_io_atm \ ('\, 'o) etrace set" +where "etraces atm = {t . \ rt \ rtraces atm. t = fst o snd o rt }" + +definition in_trace :: "('\, 'o) etrace \ '\ in_trace" +where "in_trace rt = fst o rt" + +definition out_trace :: "('\, 'o) etrace \ 'o out_trace" +where "out_trace rt = snd o rt" + +definition prefixes :: "(nat \ '\) set \ '\ list set" +where "prefixes ts = {l. \ t \ ts. \ (n::int). l = map (t o nat) [0..n]}" + +definition rprefixes :: "['\ \ ('o, '\) MON\<^sub>S\<^sub>B, + ('\, 'o, '\) trace set] \ ('\, 'o, '\) run set" +where "rprefixes rel ts = {l. \ t \ ts. \ n. (is_enabled rel (fst(t (nat n))) \ + l = map (t o nat) [0..n])}" +definition eprefixes :: "['\ \ ('o, '\) MON\<^sub>S\<^sub>B, + ('\, 'o, '\) trace set] \ ('\, 'o) erun set" +where "eprefixes rel ts = (map (fst o snd)) ` (rprefixes rel ts)" + +definition \prefixes :: "['\ \ ('o, '\) MON\<^sub>S\<^sub>B, + ('\, 'o, '\) trace set] \ '\ \run set" +where "\prefixes rel ts = (map fst) ` (rprefixes rel ts)" + + + +section{* Extensions: Automata with Explicit Final States *} + + +text{* + We model a few widely used variants of automata as record extensions. In particular, we + define automata with final states and internal (output) actions. +*} + +record ('\, 'o, '\) det_io_atm' = "('\, 'o, '\) det_io_atm" + + final :: "'\ set" + +text{* + A natural well-formedness property to be required from this type of atm is as follows: whenever + an atm' is in a final state, the transition operation is undefined. +*} + +definition final_wff:: "('\, 'o, '\) det_io_atm' \ bool" +where "final_wff atm' = + (\\ \ final atm'. \\. \ \ dom (det_io_atm.step atm' \))" + +text{* + Another extension provides the concept of internal actions, which are considered as part of + the output alphabet here. If internal actions are also used for synchronization, further + extensions admitting internal input actions will be necessary, too, which we do not model here. +*} + +record ('\, 'o, '\) det_io_atm'' = "('\, 'o, '\) det_io_atm'" + + internal :: "'o set" + +text{* + A natural well-formedness property to be required from this type of atm is as follows: whenever + an atm' is in a final state, the transition operation is required to provide a state that is + again final and an output that is considered internal. +*} + +definition final_wff2:: "('\, 'o, '\) det_io_atm'' \ bool" +where "final_wff2 atm'' = (\\ \ final atm''. + \ \. \ \ dom (det_io_atm.step atm'' \) \ + (let (out, \') = the(det_io_atm.step atm'' \ \) + in out \ internal atm'' \ \' \ final atm''))" + +text{* + Of course, for this type of extended automata, it is also possible to impose the additional + requirement that the step function is total---undefined steps would then be represented as + steps leading to final states. + + The standard extensions on deterministic automata are also redefined for the non-deterministic + (specification) case. +*} + +record ('\, 'o, '\) ndet_io_atm' = "('\, 'o, '\) ndet_io_atm" + + final :: "'\ set" + +definition final_wff_ndet_io_atm2 :: "('\, 'o, '\) ndet_io_atm' \ bool" + where "final_wff_ndet_io_atm2 atm' = (\\ \ final atm'. \\. ndet_io_atm.step atm' \ \ = {})" + +record ('\, 'o, '\) ndet_io_atm'' = "('\, 'o, '\) ndet_io_atm'" + + internal :: "'o set" + +definition final_wff2_ndet_io_atm2 :: "('\, 'o, '\) ndet_io_atm'' \ bool" + where "final_wff2_ndet_io_atm2 atm'' = + (\\ \ final atm''. + \\. step atm'' \ \ \ {} \ step atm'' \ \ \ internal atm'' \ final atm'')" + +end diff --git a/src/test-gen/src/test/EFSM_Toolkit.thy b/src/test-gen/src/test/EFSM_Toolkit.thy new file mode 100644 index 0000000..67205c8 --- /dev/null +++ b/src/test-gen/src/test/EFSM_Toolkit.thy @@ -0,0 +1,167 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * TestRefinements.thy --- for sequence testing. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007, ETH Zurich, Switzerland + * 2009 B. Wolff, Univ. Paris-Sud, France + * 2009 Achim D. Brucker, Germany + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Test-Refinements (IOCO and Friends) *} + +theory EFSM_Toolkit imports Monads Automata +begin + + +definition impl :: "['\\'\\bool, '\ \ ('o,'\)MON\<^sub>S\<^sub>B] \ '\ \ ('o,'\)MON\<^sub>S\<^sub>E" +where "impl pre post \ = (\ \. if pre \ \ + then Some(SOME(out,\'). (out,\') \ post \ \) + else undefined)" + +definition strong_impl :: "['\\'\\bool, '\\('o,'\)MON\<^sub>S\<^sub>B] \ '\\('o, '\)MON\<^sub>S\<^sub>E" +where "strong_impl pre post \ = + (\ \. if pre \ \ + then Some(SOME(out,\'). (out,\') \ post \ \) + else None)" + +definition strong_impl' :: "['\\'\\bool, '\\('o,'\)MON\<^sub>S\<^sub>B] \ '\\('o, '\)MON\<^sub>S\<^sub>E" +where "strong_impl' pre post \ = + (\ \. if pre \ \ + then Some(SOME(out,\'). (out,\') \ post \ \) + else + (if post \ \ \ {} + then Some(SOME(out,\'). (out,\') \ post \ \) + else None))" + + +definition implementable :: "['\ \ '\ \ bool,'\ \ ('o,'\)MON\<^sub>S\<^sub>B] \ bool" +where "implementable pre post =(\ \ \. pre \ \ \(\ out \'. (out,\') \ post \ \ ))" + +definition is_strong_impl :: "['\ \ '\ \ bool, + '\ \ ('o,'\)MON\<^sub>S\<^sub>B, + '\ \ ('o, '\)MON\<^sub>S\<^sub>E] \ bool" +where "is_strong_impl pre post ioprog = + (\ \ \. (\pre \ \ \ ioprog \ \ = None) \ + (pre \ \ \ (\ x. ioprog \ \ = Some x)))" + +lemma is_strong_impl : + "is_strong_impl pre post (strong_impl pre post)" +by(simp add: is_strong_impl_def strong_impl_def) + + +text{* This following characterization of implementable +specifications has actually a quite complicated form due to the fact +that post expects its arguments in curried form - should +be improved \ldots *} +lemma implementable_charn: + "\implementable pre post; pre \ \ \ \ + (the(strong_impl pre post \ \)) \ post \ \" +apply(auto simp: implementable_def strong_impl_def) +apply(erule_tac x=\ in allE) +apply(erule_tac x=\ in allE) +apply(simp add: Eps_case_prod) +apply(rule someI_ex, auto) +done + + +locale efsm_det = + fixes pre :: "'\ \ '\ \ bool" + fixes post :: "'\ \ '\ \ ('o \ '\) set" + fixes efsm :: "'\ \ '\ \ ('o \ '\) option" + fixes in_ev:: "'\" + fixes out_ev:: "'\ \ 'o" + fixes upd :: "'\ \ '\" + fixes E :: "'\ \ bool" + assumes SPEC: "efsm \ (strong_impl pre post)" + assumes pre_red : "\ \. pre \ in_ev = E \" + assumes post_red : "\ \. pre \ in_ev \ (SOME(res,\'). (res,\') \ post in_ev \) = (out_ev \,upd \)" +begin + +lemma impl_def:"efsm in_ev = (\\. if E \ then Some(out_ev \, upd \) else None)" + by(rule ext, auto simp: SPEC strong_impl_def pre_red post_red) + +lemma exec_succes: + "(efsm in_ev \ = Some(b,\')) = (E \ \ (b=out_ev \) \ \'= upd \)" + by(auto simp: impl_def split:if_split_asm) + + +lemma exec_failure: + "(efsm in_ev \ = None) = (\ E \)" + by(auto simp: impl_def split:if_split_asm) + + + +lemma exec_mbindFSave_If[simp]: +"(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (in_ev # S) efsm; return (P s))) = + (if E \ + then ((upd \) \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S efsm; return (P (out_ev \ # s)))) + else (\ \ (return (P []))))" +by(auto simp: exec_mbindFSave impl_def) + +lemma exec_mbindFSave_E: +assumes A:"(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (in_ev # S) efsm; return (P s)))" + and B:"E \ \ ((upd \) \ (s\mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S efsm;return(P(out_ev \ # s)))) \ Q" + and C:"\ E \ \\ \ (return (P [])) \ Q" +shows "Q" +apply(insert A, subst (asm) exec_mbindFSave_If, case_tac "E \", simp_all only: if_True if_False) +by(auto elim: B C) + + + +lemma exec_mbindFStop[simp]: +"(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (in_ev#S) efsm; return (P s))) = + (E \ \ ((upd \) \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S efsm; return (P (out_ev \ # s)))))" +apply(rule iffI) +apply(erule Monads.exec_mbindFStop_E,simp add: exec_succes,auto) +apply(subst exec_bind_SE_success[OF exec_succes[THEN iffD2]], auto) +done + + +lemma exec_mbindFStop_E: +assumes A:"(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (in_ev # S) efsm; return (P s)))" + and B:"E \ \ ((upd \) \ (s\mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S efsm;return(P(out_ev \ # s)))) \ Q" +shows "Q" +by(insert A, rule B, simp_all del: mbind'_bind) + +(* +lemmas impl_1'' = exec_failure +lemmas impl_1' = exec_succes +*) + +end + +end diff --git a/src/test-gen/src/test/Interleaving.thy b/src/test-gen/src/test/Interleaving.thy new file mode 100644 index 0000000..f63c0ea --- /dev/null +++ b/src/test-gen/src/test/Interleaving.thy @@ -0,0 +1,244 @@ +theory Interleaving +imports Main +begin + +(* +fun interleave :: "'a list => 'a list => 'a list set" +where "interleave [] [] = {[]}" + |"interleave (A) [] = {A}" + |"interleave [] B = {B}" + |"interleave (a#A)(b#B) = (let S = interleave A (b#B); + S' = interleave (a#A) (B) + in (\ x. a # x) ` S \ (\ x. b # x) ` S')" +*) + +fun interleave :: "'a list => 'a list => 'a list set" +where "interleave [] [] = {[]}" + | "interleave (A) [] = {A}" + | "interleave [] B = {B}" + |"interleave (a # A) (b # B) = + (\x. a # x) ` interleave A (b # B) \ + (\x. b # x) ` interleave (a # A) B" + +find_theorems interleave + +value"interleave [1::int,2,3] [11,12]" + + +fun interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t :: "'a list => 'a list => 'a list list" +where "interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t [] [] = [[]]" + |"interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t (A) [] = [A]" + |"interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t [] B = [B]" + |"interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t (a#A)(b#B) = (let S = interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t A (b#B); + S' = interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t (a#A) (B) + in map (\ x. a # x) S @ map (\ x. b # x) S')" + + +value "interleave' [1::int,2,1,2] [2,1]" +value "interleave [1::int,2] [1]" + +(* Junk --- should be associativ application of interleave *) +fun interleave2 :: "'a list => 'a list => 'a list => 'a list set" +where "interleave2 [] [] [] = {[]}" + |"interleave2 (A) [] [] = {A}" + |"interleave2 [] B [] = {B}" + |"interleave2 [] [] C = {C}" + |"interleave2 (A)(B) [] = interleave A B" + |"interleave2 (A)[] (C) = interleave A C" + |"interleave2 [] B (C) = interleave B C" + |"interleave2 (a#A)(b#B) (c#C) = (let S = interleave2 A (b#B) (c#C) ; + S' = interleave2 (a#A) (B) (c#C) ; + S'' = interleave2 (a#A) (b#B) (C) + in (\ x. a # x) ` ( S) \ (\ x. b # x) ` S' \ (\ x. c # x) ` S'')" + +lemma replace : "interleave2 A B C = ((\x\interleave A B. interleave X C))" +oops + +(* +fun interleave1 :: "'a list => 'a list => 'a list => 'a list => 'a list set" +where "interleave1 [] [] [] [] = {[]}" + |"interleave1 (A) [] [] [] = {A}" + |"interleave1 [] B [] []= {B}" + |"interleave1 [] [] C []= {C}" + |"interleave1 [] [] [] D= {D}" + |"interleave1 (A)(B) (C) []= interleave2 A B C" + |"interleave1 (A)(B) [] D = interleave2 A B D" + |"interleave1 (A)[] (C) D= interleave2 A C D" + |"interleave1 [] B (C) D= interleave2 B C D" + |"interleave1 (a#A)(b#B) (c#C) (d#D) = (let S = interleave1 A (b#B) (c#C) (d#D); + S' = interleave1 (a#A) (B) (c#C) (d#D); + S'' = interleave1 (a#A) (b#B) (C) (d#D); + S'''= interleave1 (a#A) (b#B) (c#C) (D) + in (\ x. a # x) ` S \ (\ x. b # x) ` S' \ (\ x. c # x) ` S'' \ (\ x. d # x) ` S''')" + +value "int(card(interleave1 [] [] [2::int,1,2,1] [1,2]))" +*) +fun bus_interleave :: " ('a::order) list \ 'a list \ 'a list set" +where "bus_interleave [] [] = {[]}" + |"bus_interleave (A) [] = {A}" + |"bus_interleave [] B = {B}" + |"bus_interleave (a#A)(b#B) = + (if a \ b then (\ x. a # x) ` bus_interleave A (b#B) + else if b \ a then (\ x. b # x) ` bus_interleave (a#A) (B) + else (let S = bus_interleave A (b#B); + S' = bus_interleave (a#A) (B) + in (\ x. a # x) ` S \ (\ x. b # x) ` S'))" + +fun sync_interleave :: "('a \ bool) \ ('a \ 'a \ bool) \ ('a \ bool) \'a list \ 'a list \ 'a list set" +where "sync_interleave L S R [] [] = {[]}" + |"sync_interleave L S R (A) [] = {A}" + |"sync_interleave L S R [] B = {B}" + |"sync_interleave L S R (a#A)(b#B) = + (if L a \ \ L b then sync_interleave L S R (A) (b#B) + else if \ L a \ L b then sync_interleave L S R (a#A) (B) + else + if L a \ L b \ S a b then (\ x. a # x) ` sync_interleave L S R (A) (B) + else (let IL = sync_interleave L S R A (b#B); + IL' = sync_interleave L S R (a#A) (B) + in (\ x. a # x) ` IL \ (\ x. b # x) ` IL'))" + +fun sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t :: "'a \'a list \ 'a list \ 'a list list" +where "sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n [] [] = [[]]" + |"sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n (A) [] = [A]" + |"sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n [] B = [B]" + |"sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n (a#A)(b#B) = + (if a = n \ b \ n then sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n (A) (b#B) + else if a \ n \ b = n then sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n (a#A) (B) + else + if a = n \ b= n then map (\ x. n # x) (sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n (A) (B) ) + else (let S = sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n A (b#B); + S' = sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n (a#A) (B) + in map (\ x. a # x) S @ map (\ x. b # x) S'))" + +value "((sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t 2 [1::int,3,2 ] [1,2]))" + + + +value "sync_interleave' 2 [1::int,3,2 ] [1,2]" + +fun sync_interleave1 :: "'a list \'a list \ 'a list \ 'a list set" +where + "sync_interleave1 [] [] [] = {[]}" + |"sync_interleave1 n [] [] = {[]}" + |"sync_interleave1 [] A [] = {A}" + |"sync_interleave1 [] [] B = {B}" + |"sync_interleave1 n (A) [] = {A}" + |"sync_interleave1 n [] B = {B}" + |"sync_interleave1 [] A B = interleave A B" + |"sync_interleave1 (n#N) (a#A)(b#B) = + (if a = n \ b \ n then sync_interleave1 (n#N) (A) (b#B) + else if a \ n \ b = n then sync_interleave1 (n#N) (a#A) (B) + else + if a = n \ b= n then (\ x. n # x) ` sync_interleave1 (n#N) (A) (B) + else + (let S = sync_interleave1 (n#N) A (b#B); + S' = sync_interleave1 (n#N) (a#A) (B) + in (\ x. a # x) ` S \ (\ x. b # x) ` S'))" + +value "((sync_interleave1 [1,3] [1::int,3,2 ] [1,2]))" +value "((sync_interleave1 [3,1] [1::int,3,2 ] [1,2]))" +value "(sync_interleave1 [3,1] [1::int,2 ] [1,3,2])" + +find_theorems " interleave" +value "int (card (interleave [1::int,2,3,4 ] [8,9,10,11]))" + +term" \ X" + + +lemma "x \ set A \ x \ \(set ` interleave A B)" +proof (induct A B rule: interleave.induct) + case 1 + then show ?case by simp +next + case (2 a A) + then show ?case by simp +next + case (3 b B) + then show ?case by simp +next + case (4 a A b B) + from 4 have "x \ \(set ` interleave (a # A) B)" by simp + then show ?case by auto +qed + + +lemma commute1[simp]: "interleave A [] = interleave [] A" by(case_tac A, simp_all) + + +lemma interleave_commute'[rule_format]: "\B. interleave A B = interleave B A" +proof(induct A) + show "\B. interleave [] B = interleave B []" by(simp) +next + show "\a A. \B. interleave A B = interleave B A \ \B. interleave (a # A) B = interleave B (a # A)" + apply(rule allI,induct_tac B, simp_all) + apply(subst Set.Un_commute) + apply (elim allE) + apply (erule ssubst)+ + apply (simp only: triv_forall_equality) + done +qed + + +lemma interleave1 [simp] : "(x \ interleave [] A) = (x = A)" +by(induct A, simp_all only: interleave.simps, simp_all) + + +lemma interleave2 [simp] : "(x \ interleave A []) = (x = A)" +by(induct A, simp_all only: interleave.simps, simp_all) + + + +lemma length_interleave_lemma: + "\n < k . length A + length B \ n + \ (x::'a list) \ (interleave A B) + \ length x = (length A + length B)" +proof(induct k arbitrary: A B x) + case 0 fix A::"'a list" and B::"'a list" and x::"'a list" + have * : "\A B. length A + length B \ 0 \ length A = 0" by auto + have ** : "\A B. length A + length B \ 0 \ length B = 0" by auto + show "\n<0. length A + length B \ n + \ x \ interleave A B + \ length x = length A + length B" + by auto + case (Suc k) fix A::"'a list" and B::"'a list" and x::"'a list" + assume H: "\(A::'a list) B x. \n n + \ x \ interleave A B + \ length x = length A + length B" + have H': "\(A::'a list) B x n. x \ interleave A B \ n length A + length B \ n + \ length x = length A + length B" + by(erule H[THEN spec, THEN mp, THEN mp, THEN mp], auto) + + show "\n n + \ x \ interleave A B + \ length x = length A + length B" + apply(case_tac "A", simp_all) + apply(case_tac "B", simp_all) + apply(auto) + apply(case_tac "n", simp_all) + apply(case_tac "list",auto) + apply(rule trans, erule H', auto) + apply(rule trans, erule H', auto) + apply(case_tac "n", simp_all) + apply(case_tac "list",auto) + apply(rule trans, erule H', auto) + apply(rule trans, erule H', auto) + done +qed + + +lemma length_interleave: "x \ (interleave A B) \ length x = (length A + length B)" +apply(rule length_interleave_lemma[rule_format, of "length A + length B" "length A + length B + 1"]) +by auto + + +lemma cardinality: "card(interleave A B) = ( length A * length B + 1)" (* really ? *) +oops + +lemma causality : + "\ k i. i < k \ (\ C \ (interleave A B). (\ l m. l < m \ A!i = C!l \ A!k = C!m))" (* really ? *) +oops + + + +end + diff --git a/src/test-gen/src/test/Monads.thy b/src/test-gen/src/test/Monads.thy new file mode 100644 index 0000000..b03d619 --- /dev/null +++ b/src/test-gen/src/test/Monads.thy @@ -0,0 +1,1256 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * Monads.thy --- a base testing theory for sequential computations. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007 ETH Zurich, Switzerland + * 2009-2017 Univ. Paris-Sud, France + * 2009-2012 Achim D. Brucker, Germany + * 2015-2017 University Sheffield, UK + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Basic Monad Theory for Sequential Computations *} + +theory Monads imports Main +begin + +section{* A Generic (Approximative) Notion of Monad *} + +text {* + A (fake) monad is a structure with a return and a bind satisfying three laws. + Since we cannot use generic type constructors in HOL, + we formulate monads using only one type 'a and its monadic representation 'am. +*} + +locale Monad = +fixes returm :: "'a \ 'am" + and bind :: "'am \ ('a \ 'am) \ 'am" (infixr ">>=" 70) +assumes left_unit : "(returm x) >>= f = f x" + and right_unit: "m >>= returm = m" + and assoc : "(m >>= f) >>= g = m >>= (\ x . f x >>= g)" +begin + +definition pipe :: "'am \ 'am \ 'am" +where "pipe f g \ bind f (\ _. g)" + +lemma pipe_left_unit : "pipe (returm x) f = f" +by(simp add: left_unit pipe_def) + + +lemma pipe_right_unit : "pipe f (returm x) = f" +oops (* Not possible to formulate in this Monad Framework generically ! ! ! + In the concrete SE-Monad, we have the (interesting) special case: + "(m;- (return ())) = m" *) + +lemma pipe_assoc : "pipe f (pipe g h) = pipe (pipe f g) h" +by(simp add: assoc pipe_def) + +primrec Mrun :: "('b \ 'a \ 'am) \ 'b list \ 'a \ 'am" +where "Mrun m [] = returm" + | "Mrun m (b#bs) = (\ a . Mrun m bs a >>= m b)" + +end + +section{*General Framework for Monad-based Sequence-Test *} +text{* As such, Higher-order Logic as a purely functional specification + formalism has no built-in mechanism for state and state-transitions. + Forms of testing involving state require therefore explicit mechanisms + for their treatment inside the logic; a well-known technique to model + states inside purely functional languages are \emph{monads} made popular + by Wadler and Moggi and extensively used in Haskell. \HOL is powerful + enough to represent the most important \emph{instances} of standard monads; + however, it is not possible to represent monads as such due to well-known + limitations of the Hindley-Milner type-system (no type-constructor-classes). *} + +text{* Here is a variant for state-exception monads, that models precisely + transition functions with preconditions. Next, we declare the + state-backtrack-monad. In all of them, our concept of i/o stepping + functions can be formulated; these are functions mapping input + to a given monad. Later on, we will build the usual concepts of: + \begin{enumerate} + \item deterministic i/o automata, + \item non-deterministic i/o automata, and + \item labelled transition systems (LTS) + \end{enumerate} +*} + +subsection{* Definition : Standard State Exception Monads *} + +subsubsection{* Definition : Core Types and Operators *} + +type_synonym ('o, '\) MON\<^sub>S\<^sub>E = "'\ \ ('o \ '\)" (* = '\ \ ('o \ '\)option *) + +definition bind_SE :: "('o,'\)MON\<^sub>S\<^sub>E \ ('o \ ('o','\)MON\<^sub>S\<^sub>E) \ ('o','\)MON\<^sub>S\<^sub>E" +where "bind_SE f g = (\\. case f \ of None \ None + | Some (out, \') \ g out \')" + +notation bind_SE ("bind\<^sub>S\<^sub>E") + +syntax (xsymbols) + "_bind_SE" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>E,('o','\)MON\<^sub>S\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>E" + ("(2 _ \ _; _)" [5,8,8]8) +translations + "x \ f; g" == "CONST bind_SE f (% x . g)" + +definition unit_SE :: "'o \ ('o, '\)MON\<^sub>S\<^sub>E" ("(return _)" 8) +where "unit_SE e = (\\. Some(e,\))" +notation unit_SE ("unit\<^sub>S\<^sub>E") + +text{* In the following, we prove the required Monad-laws *} + +lemma bind_right_unit[simp]: "(x \ m; return x) = m" + apply (simp add: unit_SE_def bind_SE_def) + apply (rule ext) + apply (case_tac "m \", simp_all) + done + +lemma bind_left_unit [simp]: "(x \ return c; P x) = P c" + by (simp add: unit_SE_def bind_SE_def) + +lemma bind_assoc[simp]: "(y \ (x \ m; k x); h y) = (x \ m; (y \ k x; h y))" + apply (simp add: unit_SE_def bind_SE_def, rule ext) + apply (case_tac "m \", simp_all) + apply (case_tac "a", simp_all) + done + +interpretation SE : Monad unit_SE bind_SE + by unfold_locales (simp_all) + + +subsubsection{* Definition : More Operators and their Properties *} + +definition fail_SE :: "('o, '\)MON\<^sub>S\<^sub>E" +where "fail_SE = (\\. None)" +notation fail_SE ("fail\<^sub>S\<^sub>E") + +definition assert_SE :: "('\ \ bool) \ (bool, '\)MON\<^sub>S\<^sub>E" +where "assert_SE P = (\\. if P \ then Some(True,\) else None)" +notation assert_SE ("assert\<^sub>S\<^sub>E") + +definition assume_SE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>E" +where "assume_SE P = (\\. if \\ . P \ then Some((), SOME \ . P \) else None)" +notation assume_SE ("assume\<^sub>S\<^sub>E") + + +lemma bind_left_fail_SE[simp] : "(x \ fail\<^sub>S\<^sub>E; P x) = fail\<^sub>S\<^sub>E" + by (simp add: fail_SE_def bind_SE_def) + + +text{* We also provide a "Pipe-free" - variant of the bind operator. +Just a "standard" programming sequential operator without output frills. *} +(* TODO: Eliminate/Modify this. Is a consequence of the Monad-Instantiation. *) + + +definition bind_SE' :: "[('\, '\)MON\<^sub>S\<^sub>E, ('\, '\)MON\<^sub>S\<^sub>E] \ ('\, '\)MON\<^sub>S\<^sub>E" (infixr ";-" 60) +where "f ;- g = (_ \ f ; g)" + +lemma bind_assoc'[simp]: "((m;- k);- h ) = (m;- (k;- h))" +by(simp add:bind_SE'_def) + + +lemma bind_left_unit' [simp]: "((return c);- P) = P" + by (simp add: bind_SE'_def) + + +lemma bind_left_fail_SE'[simp]: "(fail\<^sub>S\<^sub>E;- P) = fail\<^sub>S\<^sub>E" + by (simp add: bind_SE'_def) + +lemma bind_right_unit'[simp]: "(m;- (return ())) = m" + by (simp add: bind_SE'_def) + +text{* The bind-operator in the state-exception monad yields already + a semantics for the concept of an input sequence on the meta-level: *} +lemma syntax_test: "(o1 \ f1 ; o2 \ f2; return (post o1 o2)) = X" +oops + +definition if_SE :: "['\ \ bool, ('\, '\)MON\<^sub>S\<^sub>E, ('\, '\)MON\<^sub>S\<^sub>E] \ ('\, '\)MON\<^sub>S\<^sub>E" +where "if_SE c E F = (\\. if c \ then E \ else F \)" + +syntax (xsymbols) + "_if_SE" :: "['\ \ bool,('o,'\)MON\<^sub>S\<^sub>E,('o','\)MON\<^sub>S\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>E" + ("(if\<^sub>S\<^sub>E _ then _ else _fi)" [5,8,8]8) +translations + "(if\<^sub>S\<^sub>E cond then T1 else T2 fi)" == "CONST if_SE cond T1 T2" + + +subsubsection{* Theory of a Monadic While *} + +text{* Prerequisites *} +fun replicator :: "[('a, '\)MON\<^sub>S\<^sub>E, nat] \ (unit, '\)MON\<^sub>S\<^sub>E" (infixr "^^^" 60) +where "f ^^^ 0 = (return ())" + | "f ^^^ (Suc n) = (f ;- f ^^^ n)" + + +fun replicator2 :: "[('a, '\)MON\<^sub>S\<^sub>E, nat, ('b, '\)MON\<^sub>S\<^sub>E] \ ('b, '\)MON\<^sub>S\<^sub>E" (infixr "^:^" 60) +where "(f ^:^ 0) M = (M )" + | "(f ^:^ (Suc n)) M = (f ;- ((f ^:^ n) M))" + + +text{* First Step : Establishing an embedding between partial functions and relations *} +(* plongement *) +definition Mon2Rel :: "(unit, '\)MON\<^sub>S\<^sub>E \ ('\ \ '\) set" +where "Mon2Rel f = {(x, y). (f x = Some((), y))}" +(* ressortir *) +definition Rel2Mon :: " ('\ \ '\) set \ (unit, '\)MON\<^sub>S\<^sub>E " +where "Rel2Mon S = (\ \. if \\'. (\, \') \ S then Some((), SOME \'. (\, \') \ S) else None)" + +lemma Mon2Rel_Rel2Mon_id: assumes det:"single_valued R" shows "(Mon2Rel \ Rel2Mon) R = R" +apply (simp add: comp_def Mon2Rel_def Rel2Mon_def,auto) +apply (case_tac "\\'. (a, \') \ R", auto) +apply (subst (2) some_eq_ex) +using det[simplified single_valued_def] by auto + + +lemma Rel2Mon_Id: "(Rel2Mon \ Mon2Rel) x = x" +apply (rule ext) +apply (auto simp: comp_def Mon2Rel_def Rel2Mon_def) +apply (erule contrapos_pp, drule HOL.not_sym, simp) +done + +lemma single_valued_Mon2Rel: "single_valued (Mon2Rel B)" +by (auto simp: single_valued_def Mon2Rel_def) + +text{* Second Step : Proving an induction principle allowing to establish that lfp remains + deterministic *} + + +(* A little complete partial order theory due to Tobias Nipkow *) +definition chain :: "(nat \ 'a set) \ bool" +where "chain S = (\i. S i \ S(Suc i))" + +lemma chain_total: "chain S ==> S i \ S j \ S j \ S i" +by (metis chain_def le_cases lift_Suc_mono_le) + +definition cont :: "('a set => 'b set) => bool" +where "cont f = (\S. chain S \ f(UN n. S n) = (UN n. f(S n)))" + +lemma mono_if_cont: fixes f :: "'a set \ 'b set" + assumes "cont f" shows "mono f" +proof + fix a b :: "'a set" assume "a \ b" + let ?S = "\n::nat. if n=0 then a else b" + have "chain ?S" using `a \ b` by(auto simp: chain_def) + hence "f(UN n. ?S n) = (UN n. f(?S n))" + using assms by(simp add: cont_def) + moreover have "(UN n. ?S n) = b" using `a \ b` by (auto split: if_splits) + moreover have "(UN n. f(?S n)) = f a \ f b" by (auto split: if_splits) + ultimately show "f a \ f b" by (metis Un_upper1) +qed + +lemma chain_iterates: fixes f :: "'a set \ 'a set" + assumes "mono f" shows "chain(\n. (f^^n) {})" +proof- + { fix n have "(f ^^ n) {} \ (f ^^ Suc n) {}" using assms + by(induction n) (auto simp: mono_def) } + thus ?thesis by(auto simp: chain_def) +qed + +theorem lfp_if_cont: + assumes "cont f" shows "lfp f = (\n. (f ^^ n) {})" (is "_ = ?U") +proof + show "lfp f \ ?U" + proof (rule lfp_lowerbound) + have "f ?U = (UN n. (f^^Suc n){})" + using chain_iterates[OF mono_if_cont[OF assms]] assms + by(simp add: cont_def) + also have "\ = (f^^0){} \ \" by simp + also have "\ = ?U" + apply(auto simp del: funpow.simps) + by (metis empty_iff funpow_0 old.nat.exhaust) + finally show "f ?U \ ?U" by simp + qed +next + { fix n p assume "f p \ p" + have "(f^^n){} \ p" + proof(induction n) + case 0 show ?case by simp + next + case Suc + from monoD[OF mono_if_cont[OF assms] Suc] `f p \ p` + show ?case by simp + qed + } + thus "?U \ lfp f" by(auto simp: lfp_def) +qed + + +lemma single_valued_UN_chain: + assumes "chain S" "(!!n. single_valued (S n))" + shows "single_valued(UN n. S n)" +proof(auto simp: single_valued_def) + fix m n x y z assume "(x, y) \ S m" "(x, z) \ S n" + with chain_total[OF assms(1), of m n] assms(2) + show "y = z" by (auto simp: single_valued_def) +qed + +lemma single_valued_lfp: +fixes f :: "('a \ 'a) set \ ('a \ 'a) set" +assumes "cont f" "\r. single_valued r \ single_valued (f r)" +shows "single_valued(lfp f)" +unfolding lfp_if_cont[OF assms(1)] +proof(rule single_valued_UN_chain[OF chain_iterates[OF mono_if_cont[OF assms(1)]]]) + fix n show "single_valued ((f ^^ n) {})" + by(induction n)(auto simp: assms(2)) +qed + + +text{* Third Step: Definition of the Monadic While *} +definition \ :: "['\ \ bool,('\ \ '\) set] \ (('\ \ '\) set \ ('\ \ '\) set)" +where "\ b cd = (\cw. {(s,t). if b s then (s, t) \ cd O cw else s = t})" + + +definition while_SE :: "['\ \ bool, (unit, '\)MON\<^sub>S\<^sub>E] \ (unit, '\)MON\<^sub>S\<^sub>E" +where "while_SE c B \ (Rel2Mon(lfp(\ c (Mon2Rel B))))" + +syntax (xsymbols) + "_while_SE" :: "['\ \ bool, (unit, '\)MON\<^sub>S\<^sub>E] \ (unit, '\)MON\<^sub>S\<^sub>E" + ("(while\<^sub>S\<^sub>E _ do _ od)" [8,8]8) +translations + "while\<^sub>S\<^sub>E c do b od" == "CONST while_SE c b" + +lemma cont_\: "cont (\ c b)" +by (auto simp: cont_def \_def) + +text{* The fixpoint theory now allows us to establish that the lfp constructed over + @{term Mon2Rel} remains deterministic *} + +theorem single_valued_lfp_Mon2Rel: "single_valued (lfp(\ c (Mon2Rel B)))" +apply(rule single_valued_lfp, simp_all add: cont_\) +apply(auto simp: \_def single_valued_def) +apply(metis single_valued_Mon2Rel[of "B"] single_valued_def) +done + + +lemma Rel2Mon_if: + "Rel2Mon {(s, t). if b s then (s, t) \ Mon2Rel c O lfp (\ b (Mon2Rel c)) else s = t} \ = + (if b \ then Rel2Mon (Mon2Rel c O lfp (\ b (Mon2Rel c))) \ else Some ((), \))" +by (simp add: Rel2Mon_def) + +lemma Rel2Mon_homomorphism: + assumes determ_X: "single_valued X" and determ_Y: "single_valued Y" + shows "Rel2Mon (X O Y) = (Rel2Mon X) ;- (Rel2Mon Y)" +proof - + have relational_partial_next_in_O: "\x E F. (\y. (x, y) \ (E O F)) \ (\y. (x, y) \ E)" + by (auto) + have some_eq_intro: "\ X x y . single_valued X \ (x, y) \ X \ (SOME y. (x, y) \ X) = y" + by (auto simp: single_valued_def) + + show ?thesis +apply (simp add: Rel2Mon_def bind_SE'_def bind_SE_def) +apply (rule ext, rename_tac "\") +apply (case_tac " \ \'. (\, \') \ X O Y") +apply (simp only: HOL.if_True) +apply (frule relational_partial_next_in_O) +apply (auto simp: single_valued_relcomp some_eq_intro determ_X determ_Y relcomp.relcompI) +by blast +qed + + + +text{* Putting everything together, the theory of embedding and the invariance of + determinism of the while-body, gives us the usual unfold-theorem: *} +theorem while_SE_unfold: +"(while\<^sub>S\<^sub>E b do c od) = (if\<^sub>S\<^sub>E b then (c ;- (while\<^sub>S\<^sub>E b do c od)) else return () fi)" +apply (simp add: if_SE_def bind_SE'_def while_SE_def unit_SE_def) +apply (subst lfp_unfold [OF mono_if_cont, OF cont_\]) +apply (rule ext) +apply (subst \_def) +apply (auto simp: Rel2Mon_if Rel2Mon_homomorphism bind_SE'_def Rel2Mon_Id [simplified comp_def] + single_valued_Mon2Rel single_valued_lfp_Mon2Rel ) +done + + +subsection{* Chaining Executions : Definitions of Multi-bind Operators *} + +text{* In order to express execution sequences inside \HOL --- rather +than arguing over a certain pattern of terms on the meta-level --- and +in order to make our theory amenable to formal reasoning over execution sequences, +we represent them as lists of input and generalize the bind-operator +of the state-exception monad accordingly. The approach is straightforward, +but comes with a price: we have to encapsulate all input and output data +into one type, and restrict ourselves to a uniform step function. +Assume that we have a typed interface to a module with +the operations $op_1$, $op_2$, \ldots, $op_n$ with the inputs +$\iota_1$, $\iota_2$, \ldots, $\iota_n$ (outputs are treated analogously). +Then we can encode for this interface the general input - type: +\begin{displaymath} +\texttt{datatype}\ \texttt{in}\ =\ op_1\ ::\ \iota_1\ |\ ...\ |\ \iota_n +\end{displaymath} +Obviously, we loose some type-safety in this approach; we have to express +that in traces only \emph{corresponding} input and output belonging to the +same operation will occur; this form of side-conditions have to be expressed +inside \HOL. From the user perspective, this will not make much difference, +since junk-data resulting from too weak typing can be ruled out by adopted +front-ends. +*} + +text{* Note that the subsequent notion of a test-sequence allows the io stepping +function (and the special case of a program under test) to stop execution +\emph{within} the sequence; such premature terminations are characterized by an +output list which is shorter than the input list. + +Intuitively, @{text mbind} corresponds to a sequence of operation calls, separated by +";", in Java. The operation calls may fail (raising an exception), which means that +the state is maintained and the exception can still be caught at the end of the +execution sequence. + +*} + +fun mbind :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" +where "mbind [] iostep \ = Some([], \)" + | "mbind (a#S) iostep \ = + (case iostep a \ of + None \ Some([], \) + | Some (out, \') \ (case mbind S iostep \' of + None \ Some([out],\') + | Some(outs,\'') \ Some(out#outs,\'')))" + +notation mbind ("mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e") (* future name: mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e *) + +text{* This definition is fail-safe; in case of an exception, the current state is maintained, + the computation as a whole is marked as success. + Compare to the fail-strict variant @{text "mbind'"}: *} + +lemma mbind_unit [simp]: + "mbind [] f = (return [])" + by(rule ext, simp add: unit_SE_def) + +text{* The characteristic property of @{term mbind} --- which distinguishes it from + @{text mbind} defined in the sequel --- is that it never fails; it ``swallows'' internal + errors occuring during the computation. *} +lemma mbind_nofailure [simp]: + "mbind S f \ \ None" + apply(rule_tac x=\ in spec) + apply(induct S, auto simp:unit_SE_def) + apply(case_tac "f a x", auto) + apply(erule_tac x="b" in allE) + apply(erule exE, erule exE, simp) + done + +text{* In contrast, we define a fail-strict sequential execution operator. +He has more the characteristic to fail globally whenever one of its operation +steps fails. + +Intuitively speaking, @{text mbind'} corresponds to an execution of operations +where a results in a System-Halt. Another interpretation of @{text mbind'} is to +view it as a kind of @{term foldl} foldl over lists via @{term bind\<^sub>S\<^sub>E}.*} + +fun mbind' :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" +where "mbind' [] iostep \ = Some([], \)" | + "mbind' (a#S) iostep \ = + (case iostep a \ of + None \ None + | Some (out, \') \ (case mbind' S iostep \' of + None \ None (* fail-strict *) + | Some(outs,\'') \ Some(out#outs,\'')))" +notation mbind' ("mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p") (* future name: mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p *) + +lemma mbind'_unit [simp]: + "mbind' [] f = (return [])" + by(rule ext, simp add: unit_SE_def) + +lemma mbind'_bind [simp]: + "(x \ mbind' (a#S) F; M x) = (a \ (F a); (x \ mbind' S F; M (a # x)))" + by(rule ext, rename_tac "z",simp add: bind_SE_def split: option.split) + +declare mbind'.simps[simp del] (* use only more abstract definitions *) + +text{* The next @{text mbind} sequential execution operator is called +Fail-Purge. He has more the characteristic to never fail, just "stuttering" +above operation steps that fail. Another alternative in modeling. *} + +fun mbind'' :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" +where "mbind'' [] iostep \ = Some([], \)" | + "mbind'' (a#S) iostep \ = + (case iostep a \ of + None \ mbind'' S iostep \ + | Some (out, \') \ (case mbind'' S iostep \' of + None \ None (* does not occur *) + | Some(outs,\'') \ Some(out#outs,\'')))" + +notation mbind'' ("mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e") (* future name: mbind\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e\<^sub>F\<^sub>a\<^sub>i\<^sub>l *) +declare mbind''.simps[simp del] (* use only more abstract definitions *) + + +text{* mbind' as failure strict operator can be seen as a foldr on bind - + if the types would match \ldots *} + +subsubsection{* Definition : Miscellaneous Operators and their Properties *} + +definition try_SE :: "('o,'\) MON\<^sub>S\<^sub>E \ ('o option,'\) MON\<^sub>S\<^sub>E" ("try\<^sub>S\<^sub>E") +where "try\<^sub>S\<^sub>E ioprog = (\\. case ioprog \ of + None \ Some(None, \) + | Some(outs, \') \ Some(Some outs, \'))" +text{* In contrast, mbind as a failure safe operator can roughly be seen + as a foldr on bind - try: + m1 ; try m2 ; try m3; ... Note, that the rough equivalence only holds for + certain predicates in the sequence - length equivalence modulo None, + for example. However, if a conditional is added, the equivalence + can be made precise: *} + +lemma mbind_try: + "(x \ mbind (a#S) F; M x) = + (a' \ try\<^sub>S\<^sub>E(F a); + if a' = None + then (M []) + else (x \ mbind S F; M (the a' # x)))" +apply(rule ext) +apply(simp add: bind_SE_def try_SE_def) +apply(case_tac "F a x", auto) +apply(simp add: bind_SE_def try_SE_def) +apply(case_tac "mbind S F b", auto) +done + +text{* On this basis, a symbolic evaluation scheme can be established + that reduces mbind-code to try\_SE\_code and ite-cascades. *} + +definition alt_SE :: "[('o, '\)MON\<^sub>S\<^sub>E, ('o, '\)MON\<^sub>S\<^sub>E] \ ('o, '\)MON\<^sub>S\<^sub>E" (infixl "\\<^sub>S\<^sub>E" 10) +where "(f \\<^sub>S\<^sub>E g) = (\ \. case f \ of None \ g \ + | Some H \ Some H)" + +definition malt_SE :: "('o, '\)MON\<^sub>S\<^sub>E list \ ('o, '\)MON\<^sub>S\<^sub>E" +where "malt_SE S = foldr alt_SE S fail\<^sub>S\<^sub>E" +notation malt_SE ("\\<^sub>S\<^sub>E") + +lemma malt_SE_mt [simp]: "\\<^sub>S\<^sub>E [] = fail\<^sub>S\<^sub>E" +by(simp add: malt_SE_def) + +lemma malt_SE_cons [simp]: "\\<^sub>S\<^sub>E (a # S) = (a \\<^sub>S\<^sub>E (\\<^sub>S\<^sub>E S))" +by(simp add: malt_SE_def) + + + + +subsection{* Definition and Properties of Valid Execution Sequences*} + +text{* A key-notion in our framework is the \emph{valid} execution +sequence, \ie{} a sequence that: +\begin{enumerate} +\item terminates (not obvious since while), +\item results in a final @{term True}, +\item does not fail globally (but recall the FailSave and FailPurge + variants of @{term mbind}-operators, that handle local exceptions in + one or another way). +\end{enumerate} +Seen from an automata perspective (where the monad - operations correspond to +the step function), valid execution sequences can be used to model ``feasible paths'' +across an automaton. *} + +definition valid_SE :: "'\ \ (bool,'\) MON\<^sub>S\<^sub>E \ bool" (infix "\" 15) +where "(\ \ m) = (m \ \ None \ fst(the (m \)))" +text{* This notation consideres failures as valid -- a definition +inspired by I/O conformance. *} + +subsubsection{* Valid Execution Sequences and their Symbolic Execution *} +lemma exec_unit_SE [simp]: "(\ \ (return P)) = (P)" +by(auto simp: valid_SE_def unit_SE_def) + +lemma exec_unit_SE' [simp]: "(\\<^sub>0 \ (\\. Some (f \, \))) = (f \\<^sub>0)" +by(simp add: valid_SE_def ) + +lemma exec_fail_SE [simp]: "(\ \ fail\<^sub>S\<^sub>E) = False" +by(auto simp: valid_SE_def fail_SE_def) + +lemma exec_fail_SE'[simp]: "\(\\<^sub>0 \ (\\. None))" +by(simp add: valid_SE_def ) + +text{* The following the rules are in a sense the heart of the entire symbolic execution approach *} +lemma exec_bind_SE_failure: +"A \ = None \ \(\ \ ((s \ A ; M s)))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def) + +lemma exec_bind_SE_success: +"A \ = Some(b,\') \ (\ \ ((s \ A ; M s))) = (\' \ (M b))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def ) + +lemma exec_bind_SE_success': (* atomic boolean Monad "Query Functions" *) +"M \ = Some(f \,\) \ (\ \ M) = f \" +by(simp add: valid_SE_def unit_SE_def bind_SE_def ) + + + + +lemma exec_bind_SE_success'': +"\ \ ((s \ A ; M s)) \ \ v \'. the(A \) = (v,\') \ \' \ (M v)" +apply(auto simp: valid_SE_def unit_SE_def bind_SE_def) +apply(cases "A \", simp_all) +apply(drule_tac x="A \" and f=the in arg_cong, simp) +apply(rule_tac x="fst aa" in exI) +apply(rule_tac x="snd aa" in exI, auto) +done + + +lemma exec_bind_SE_success''': +"\ \ ((s \ A ; M s)) \ \ a. (A \) = Some a \ (snd a) \ (M (fst a))" +apply(auto simp: valid_SE_def unit_SE_def bind_SE_def) +apply(cases "A \", simp_all) +apply(drule_tac x="A \" and f=the in arg_cong, simp) +apply(rule_tac x="fst aa" in exI) +apply(rule_tac x="snd aa" in exI, auto) +done + + +lemma exec_bind_SE_success'''' : +"\ \ ((s \ A ; M s)) \ \ v \'. A \ = Some(v,\') \ \' \ (M v)" +apply(auto simp: valid_SE_def unit_SE_def bind_SE_def) +apply(cases "A \", simp_all) +apply(drule_tac x="A \" and f=the in arg_cong, simp) +apply(rule_tac x="fst aa" in exI) +apply(rule_tac x="snd aa" in exI, auto) +done + + + + +text{* Recall \verb+mbind_unit+ for the base case. *} + +lemma valid_mbind_mt : "(\ \ ( s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e [] f; unit\<^sub>S\<^sub>E (P s))) = P [] " by simp +lemma valid_mbind_mtE: "\ \ ( s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e [] f; unit\<^sub>S\<^sub>E (P s)) \ (P [] \ Q) \ Q" +by(auto simp: valid_mbind_mt) + +lemma valid_mbind'_mt : "(\ \ ( s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p [] f; unit\<^sub>S\<^sub>E (P s))) = P [] " by simp +lemma valid_mbind'_mtE: "\ \ ( s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p [] f; unit\<^sub>S\<^sub>E (P s)) \ (P [] \ Q) \ Q" +by(auto simp: valid_mbind'_mt) + +lemma valid_mbind''_mt : "(\ \ ( s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e [] f; unit\<^sub>S\<^sub>E (P s))) = P [] " +by(simp add: mbind''.simps valid_SE_def bind_SE_def unit_SE_def) +lemma valid_mbind''_mtE: "\ \ ( s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e [] f; unit\<^sub>S\<^sub>E (P s)) \ (P [] \ Q) \ Q" +by(auto simp: valid_mbind''_mt) + + +lemma exec_mbindFSave_failure: +"ioprog a \ = None \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; M s)) = (\ \ (M []))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def) + +lemma exec_mbindFStop_failure: +"ioprog a \ = None \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (a#S) ioprog ; M s)) = (False)" +by(simp add: exec_bind_SE_failure) + +lemma exec_mbindFPurge_failure: +"ioprog a \ = None \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (a#S) ioprog ; M s)) = (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (S) ioprog ; M s))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def mbind''.simps) + + +lemma exec_mbindFSave_success : +"ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; M s)) = + (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog ; M (b#s)))" +unfolding valid_SE_def unit_SE_def bind_SE_def +by(cases "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog \'", auto) + +lemma exec_mbindFStop_success : +"ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (a#S) ioprog ; M s)) = + (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S ioprog ; M (b#s)))" +unfolding valid_SE_def unit_SE_def bind_SE_def +by(cases "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S ioprog \'", auto simp: mbind'.simps) + +lemma exec_mbindFPurge_success : +"ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (a#S) ioprog ; M s)) = + (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog ; M (b#s)))" +unfolding valid_SE_def unit_SE_def bind_SE_def +by(cases "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog \'", auto simp: mbind''.simps) + +lemma exec_mbindFSave: +"(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; return (P s))) = + (case ioprog a \ of + None \ (\ \ (return (P []))) + | Some(b,\') \ (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog ; return (P (b#s)))))" +apply(case_tac "ioprog a \") +apply(auto simp: exec_mbindFSave_failure exec_mbindFSave_success split: prod.splits) +done + +lemma mbind_eq_sexec: +assumes * : "\b \'. f a \ = Some(b,\') \ + (os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S f; P (b#os)) = (os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S f; P' (b#os))" +shows "( a \ f a; x \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S f; P (a # x)) \ = + ( a \ f a; x \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S f; P'(a # x)) \" + apply(cases "f a \ = None") + apply(subst bind_SE_def, simp) + apply(subst bind_SE_def, simp) + apply auto + apply(subst bind_SE_def, simp) + apply(subst bind_SE_def, simp) +apply(simp add: *) +done + + +lemma mbind_eq_sexec': +assumes * : "\b \'. f a \ = Some(b,\') \ + (P (b))\' = (P' (b))\'" +shows "( a \ f a; P (a)) \ = + ( a \ f a; P'(a)) \" + apply(cases "f a \ = None") + apply(subst bind_SE_def, simp) + apply(subst bind_SE_def, simp) + apply auto + apply(subst bind_SE_def, simp) + apply(subst bind_SE_def, simp) + apply(simp add: *) +done + +lemma mbind'_concat: +"(os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (S@T) f; P os) = (os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S f; os' \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p T f; P (os @ os'))" +proof (rule ext, rename_tac "\", induct S arbitrary: \ P) + case Nil show ?case by simp +next + case (Cons a S) show ?case + apply(insert Cons.hyps, simp) + by(rule mbind_eq_sexec',simp) +qed + +lemma assert_suffix_inv : + "\ \ ( _ \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p xs istep; assert\<^sub>S\<^sub>E (P)) + \ \\. P \ \ (\ \ (_ \ istep x; assert\<^sub>S\<^sub>E (P))) + \ \ \ ( _ \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (xs @ [x]) istep; assert\<^sub>S\<^sub>E (P))" +apply(subst mbind'_concat, simp) +unfolding bind_SE_def assert_SE_def valid_SE_def +apply(auto split: option.split option.split_asm) +apply(case_tac "aa",simp_all) +apply(case_tac "P bb",simp_all) +apply (metis option.distinct(1)) +apply(case_tac "aa",simp_all) +apply(case_tac "P bb",simp_all) +by (metis option.distinct(1)) + + + +text{* Universal splitting and symbolic execution rule *} +lemma exec_mbindFSave_E: +assumes seq : "(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; (P s)))" + and none: "ioprog a \ = None \ (\ \ (P [])) \ Q" + and some: "\ b \'. ioprog a \ = Some(b,\') \ (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog;(P (b#s)))) \ Q " +shows "Q" +using seq +proof(cases "ioprog a \") + case None assume ass:"ioprog a \ = None" show "Q" + apply(rule none[OF ass]) + apply(insert ass, erule_tac ioprog1=ioprog in exec_mbindFSave_failure[THEN iffD1],rule seq) + done +next + case (Some aa) assume ass:"ioprog a \ = Some aa" show "Q" + apply(insert ass,cases "aa",simp, rename_tac "out" "\'") + apply(erule some) + apply(insert ass,simp) + apply(erule_tac ioprog1=ioprog in exec_mbindFSave_success[THEN iffD1],rule seq) + done +qed + +text{* The next rule reveals the particular interest in deduction; + as an elimination rule, it allows for a linear conversion of a validity judgement + @{term "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p"} over an input list @{term "S"} into a constraint system; without any + branching ... Symbolic execution can even be stopped tactically whenever + @{term "ioprog a \ = Some(b,\')"} comes to a contradiction. *} +lemma exec_mbindFStop_E: +assumes seq : "(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (a#S) ioprog ; (P s)))" + and some: "\b \'. ioprog a \ = Some(b,\') \ (\'\ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S ioprog;(P(b#s)))) \ Q" +shows "Q" +using seq +proof(cases "ioprog a \") + case None assume ass:"ioprog a \ = None" show "Q" + apply(insert ass seq) + apply(drule_tac \=\ and S=S and M=P in exec_mbindFStop_failure, simp) + done +next + case (Some aa) assume ass:"ioprog a \ = Some aa" show "Q" + apply(insert ass,cases "aa",simp, rename_tac "out" "\'") + apply(erule some) + apply(insert ass,simp) + apply(erule_tac ioprog1=ioprog in exec_mbindFStop_success[THEN iffD1],rule seq) + done +qed + + +lemma exec_mbindFPurge_E: +assumes seq : "(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (a#S) ioprog ; (P s)))" + and none: "ioprog a \ = None \ (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog;(P (s)))) \ Q" + and some: "\ b \'. ioprog a \ = Some(b,\') \ (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog;(P (b#s)))) \ Q " +shows "Q" +using seq +proof(cases "ioprog a \") + case None assume ass:"ioprog a \ = None" show "Q" + apply(rule none[OF ass]) + apply(insert ass, erule_tac ioprog1=ioprog in exec_mbindFPurge_failure[THEN iffD1],rule seq) + done +next + case (Some aa) assume ass:"ioprog a \ = Some aa" show "Q" + apply(insert ass,cases "aa",simp, rename_tac "out" "\'") + apply(erule some) + apply(insert ass,simp) + apply(erule_tac ioprog1=ioprog in exec_mbindFPurge_success[THEN iffD1],rule seq) + done +qed + + +lemma assert_disch1 :" P \ \ (\ \ (x \ assert\<^sub>S\<^sub>E P; M x)) = (\ \ (M True))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def) + +lemma assert_disch2 :" \ P \ \ \ (\ \ (x \ assert\<^sub>S\<^sub>E P ; M s))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def) + +lemma assert_disch3 :" \ P \ \ \ (\ \ (assert\<^sub>S\<^sub>E P))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def) + +lemma assert_disch4 :" P \ \ (\ \ (assert\<^sub>S\<^sub>E P))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def) + +lemma assert_simp : "(\ \ assert\<^sub>S\<^sub>E P) = P \" +by (meson assert_disch3 assert_disch4) + +lemmas assert_D = assert_simp[THEN iffD1] (* legacy *) + +lemma assert_bind_simp : "(\ \ (x \ assert\<^sub>S\<^sub>E P; M x)) = (P \ \ (\ \ (M True)))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def split: HOL.if_split_asm) + +lemmas assert_bindD = assert_bind_simp[THEN iffD1] (* legacy *) + + +lemma assume_D : "(\ \ (_ \ assume\<^sub>S\<^sub>E P; M)) \ \ \. (P \ \ (\ \ M) )" +apply(auto simp: bind_SE_def assume_SE_def valid_SE_def split: HOL.if_split_asm) +apply(rule_tac x="Eps P" in exI, auto) +apply(subst Hilbert_Choice.someI,assumption,simp) +done + + +lemma assume_E : +assumes * : "\ \ ( _ \ assume\<^sub>S\<^sub>E P; M) " +and ** : "\ \. P \ \ \ \ M \ Q" +shows "Q" +apply(insert *) +by(insert *[THEN assume_D], auto intro: **) + +lemma assume_E' : +assumes * : "\ \ assume\<^sub>S\<^sub>E P ;- M" +and ** : "\ \. P \ \ \ \ M \ Q" +shows "Q" +by(insert *[simplified "bind_SE'_def", THEN assume_D], auto intro: **) + + +text{* These two rule prove that the SE Monad in connection with the notion of valid sequence +is actually sufficient for a representation of a Boogie-like language. The SBE monad with explicit +sets of states --- to be shown below --- is strictly speaking not necessary (and will therefore +be discontinued in the development). *} + +term "if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi" + +lemma if_SE_D1 : "P \ \ (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = (\ \ B\<^sub>1)" +by(auto simp: if_SE_def valid_SE_def) + +lemma if_SE_D1' : "P \ \ (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi);-M) = (\ \ (B\<^sub>1;-M))" +by(auto simp: if_SE_def valid_SE_def bind_SE'_def bind_SE_def) + + +lemma if_SE_D2 : "\ P \ \ (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = (\ \ B\<^sub>2)" +by(auto simp: if_SE_def valid_SE_def) + +lemma if_SE_D2' : "\ P \ \ (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi);-M) = (\ \ B\<^sub>2;-M)" +by(auto simp: if_SE_def valid_SE_def bind_SE'_def bind_SE_def) + + +lemma if_SE_split_asm : +"(\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = ((P \ \ (\ \ B\<^sub>1)) \ (\ P \ \ (\ \ B\<^sub>2)))" +by(cases "P \",auto simp: if_SE_D1 if_SE_D2) + +lemma if_SE_split_asm': +"(\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi);-M) = ((P \ \ (\ \ B\<^sub>1;-M)) \ (\ P \ \ (\ \ B\<^sub>2;-M)))" +by(cases "P \",auto simp: if_SE_D1' if_SE_D2') + + +lemma if_SE_split: +"(\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = ((P \ \ (\ \ B\<^sub>1)) \ (\ P \ \ (\ \ B\<^sub>2)))" +by(cases "P \", auto simp: if_SE_D1 if_SE_D2) + + +lemma if_SE_split': +"(\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi);-M) = ((P \ \ (\ \ B\<^sub>1;-M)) \ (\ P \ \ (\ \ B\<^sub>2;-M)))" +by(cases "P \", auto simp: if_SE_D1' if_SE_D2') + +lemma if_SE_execE: + assumes A: "\ \ ((if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi))" + and B: "P \ \ \ \ (B\<^sub>1) \ Q" + and C: "\ P \\ \ \ (B\<^sub>2) \ Q" + shows "Q" +by(insert A [simplified if_SE_split],cases "P \", simp_all, auto elim: B C) + + +lemma if_SE_execE': + assumes A: "\ \ ((if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi);-M)" + and B: "P \ \ \ \ (B\<^sub>1;-M) \ Q" + and C: "\ P \\ \ \ (B\<^sub>2;-M) \ Q" + shows "Q" +by(insert A [simplified if_SE_split'],cases "P \", simp_all, auto elim: B C) + + +lemma exec_while : +"(\ \ ((while\<^sub>S\<^sub>E b do c od) ;- M)) = + (\ \ ((if\<^sub>S\<^sub>E b then c ;- (while\<^sub>S\<^sub>E b do c od) else unit\<^sub>S\<^sub>E ()fi) ;- M))" +apply(subst while_SE_unfold) +by(simp add: bind_SE'_def ) + +lemmas exec_whileD = exec_while[THEN iffD1] + +lemma if_SE_execE'': +"\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi) ;- M +\ (P \ \ \ \ B\<^sub>1 ;- M \ Q) +\ (\ P \ \ \ \ B\<^sub>2 ;- M \ Q) +\ Q" +by(auto elim: if_SE_execE') + + +lemma [code]: + "(\ \ m) = (case (m \) of None \ False | (Some (x,y)) \ x)" + apply(simp add: valid_SE_def) + apply(cases "m \ = None", simp_all) + apply(insert not_None_eq, auto) +done + + +text{* Test-Refinements will be stated in terms of the failsave @{term mbind}, opting + more generality. The following lemma allows for an optimization both in + test execution as well as in symbolic execution for an important special case of + the post-codition: Whenever the latter has the constraint that the length of input and + output sequence equal each other (that is to say: no failure occured), failsave mbind + can be reduced to failstop mbind ... *} +lemma mbindFSave_vs_mbindFStop : + "(\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s ioprog); return(length \s = length os \ P \s os))) = + (\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s ioprog); return(P \s os)))" + apply(rule_tac x=P in spec) + apply(rule_tac x=\ in spec) + proof(induct "\s") + case Nil show ?case by(simp_all add: mbind_try try_SE_def del: Monads.mbind.simps) + case (Cons a \s) show ?case + apply(rule allI, rename_tac "\",rule allI, rename_tac "P") + apply(insert Cons.hyps) + apply(case_tac "ioprog a \") + apply(simp only: exec_mbindFSave_failure exec_mbindFStop_failure, simp) + apply(simp add: split_paired_all del: Monads.mbind.simps ) + apply(rename_tac "\'") + apply(subst exec_mbindFSave_success, assumption) + apply(subst (2) exec_bind_SE_success, assumption) + apply(erule_tac x="\'" in allE) + apply(erule_tac x="\\s s. P (a # \s) (aa # s)" in allE) (* heureka ! *) + apply(simp) + done + qed + + +lemma mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e_vs_mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p: +assumes A: "\ \ \. ioprog \ \ \ None" +shows "(\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s ioprog); P os)) = + (\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s ioprog); P os))" +proof(induct "\s") + case Nil show ?case by simp +next + case (Cons a \s) + from Cons.hyps + have B:"\ S f \. mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S f \ \ None " by simp + have C:"\\. mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s ioprog \ = mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s ioprog \" + apply(induct \s, simp) + apply(rule allI,rename_tac "\") + apply(simp add: Monads.mbind'.simps(2)) + apply(insert A, erule_tac x="a" in allE) + apply(erule_tac x="\" and P="\\ . ioprog a \ \ None" in allE) + apply(auto split:option.split) + done + show ?case + apply(insert A,erule_tac x="a" in allE,erule_tac x="\" in allE) + apply(simp, elim exE) + apply(rename_tac "out" "\'") + apply(insert B, erule_tac x=\s in allE, erule_tac x=ioprog in allE, erule_tac x=\' in allE) + apply(subst(asm) not_None_eq, elim exE) + apply(subst Monads.exec_bind_SE_success) + apply(simp split: option.split, auto) + apply(rule_tac s="(\ a b c. a # (fst c)) out \' (aa, b)" in trans, simp,rule refl) + apply(rule_tac s="(\ a b c. (snd c)) out \' (aa, b)" in trans, simp,rule refl) + apply(simp_all) + apply(subst Monads.exec_bind_SE_success, assumption) + apply(subst Monads.exec_bind_SE_success) + apply(rule_tac s="Some (aa, b)" in trans,simp_all add:C) + apply(subst(asm) Monads.exec_bind_SE_success, assumption) + apply(subst(asm) Monads.exec_bind_SE_success) + apply(rule_tac s="Some (aa, b)" in trans,simp_all add:C) + done +qed + + +section{* Valid Test Sequences in the State Exception Backtrack Monad *} +text{* This is still an unstructured merge of executable monad concepts +and specification oriented high-level properties initiating test procedures. *} + + + +section{* Hoare *} +(* STILL VERY EXPERIMENTAL STUFF *) + +definition hoare\<^sub>1 :: "('\ \ bool) \ ('\, '\)MON\<^sub>S\<^sub>E \ ('\ \ '\ \ bool) \ bool" ("\\<^sub>1 ({(1_)}/ (_)/ {(1_)})" 50) +where "(\\<^sub>1{P} M {Q} ) = (\\. \ \ (_ \ assume\<^sub>S\<^sub>E P ; x \ M; assert\<^sub>S\<^sub>E (Q x)))" + + +definition hoare\<^sub>2 :: "('\ \ bool) \ ('\, '\)MON\<^sub>S\<^sub>E \ ('\ \ '\ \ bool) \ bool" ("\\<^sub>2 ({(1_)}/ (_)/ {(1_)})" 50) +where "(\\<^sub>2{P} M {Q} ) = (\\. P \ \ (\ \ (x \ M; assert\<^sub>S\<^sub>E (Q x))))" + + + +find_theorems "assert\<^sub>S\<^sub>E" + +find_theorems "assume\<^sub>S\<^sub>E" + +lemma "P \ \ (_ \ assume\<^sub>S\<^sub>E P ; x \ M; assert\<^sub>S\<^sub>E (\\. (x=X) \ Q x \))" +oops + +lemma "\\. \ X. \ \ (_ \ assume\<^sub>S\<^sub>E P ; x \ M; assert\<^sub>S\<^sub>E (\\. x=X \ Q x \))" +oops + + +lemma "\ X. \ \ (_ \ assume\<^sub>S\<^sub>E P ; x \ M; assert\<^sub>S\<^sub>E (\\. x=X \ Q x \)) + \ + \ \ (_ \ assume\<^sub>S\<^sub>E P ; x \ M; assert\<^sub>S\<^sub>E (\\. Q x \))" +unfolding valid_SE_def assume_SE_def assert_SE_def bind_SE_def +by(auto split: if_split HOL.if_split_asm Option.option.split Option.option.split_asm) + + +lemma monadic_sequence_rule: + "\ X \\<^sub>1. (\ \ (_ \ assume\<^sub>S\<^sub>E (\\'. (\=\') \ P \) ; x \ M; assert\<^sub>S\<^sub>E (\\. (x=X) \ (\=\\<^sub>1) \ Q x \))) + \ + (\\<^sub>1 \ (_ \ assume\<^sub>S\<^sub>E (\\. (\=\\<^sub>1) \ Q x \) ; y \ M'; assert\<^sub>S\<^sub>E (\\. R x y \))) + \ + \ \ (_ \ assume\<^sub>S\<^sub>E (\\'. (\=\') \ P \) ; x \ M; y \ M'; assert\<^sub>S\<^sub>E (R x y))" +apply(elim exE impE conjE) +apply(drule Monads.assume_D) +apply(elim exE impE conjE) +unfolding valid_SE_def assume_SE_def assert_SE_def bind_SE_def +apply(auto split: if_split HOL.if_split_asm Option.option.split Option.option.split_asm) +apply (metis (mono_tags, lifting) option.simps(3) someI_ex) +oops + + + +subsection{* Legacy Bindings *} + + +lemma valid_true[simp]: (* legacy: special case *) + "(\ \ (s \ return x ; return (P s))) = P x" by simp + + +(* +lemmas valid_both = exec_mbindFSave (* legacy *) +lemmas valid_success = exec_mbindFSave_success (* legacy *) +lemmas valid_success'' = exec_mbindFSave_success(* legacy *) +lemmas valid_success' = exec_bind_SE_success (* legacy *) +lemmas valid_failure = exec_mbindFSave_failure (* legacy : *) +lemmas valid_failure' = exec_bind_SE_failure (* legacy *) +lemmas valid_failure''=valid_failure (* legacy : *) +lemmas valid_failure''' = exec_mbindFStop_failure (* legacy : *) +lemmas valid_propagate_fail = exec_fail_SE (* legacy *) +lemmas valid_propagate_fail' = exec_fail_SE' (* legacy *) +lemmas valid_propoagate_3' = valid_propagate_fail' (* legacy *) +lemmas valid_propagate_2 = exec_bind_SE_success''(* legacy *) +lemmas valid_propagate_1 = exec_unit_SE (* legacy *) +lemmas valid_successElem = exec_bind_SE_success' (* legacy *) +lemmas valid_propagate_2' = exec_bind_SE_success'''(* legacy *) +lemmas valid_propagate_2'' = exec_bind_SE_success'''' (* legacy *) +lemmas valid_propoagate_3 = exec_unit_SE' (* legacy *) + *) +(* legacy ?: +lemma valid_success'': +"ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind (a#S) ioprog ; return (P s))) = + (\' \ (s \ mbind S ioprog ; return (P (b#s))))" +unfolding valid_SE_def unit_SE_def bind_SE_def +by(cases "mbind S ioprog \'", auto) +*) + +subsection{* State Backtrack Monads *} +text{*This subsection is still rudimentary and as such an interesting +formal analogue to the previous monad definitions. It is doubtful that it is +interesting for testing and as a cmputational stucture at all. +Clearly more relevant is ``sequence'' instead of ``set,'' which would +rephrase Isabelle's internal tactic concept. *} + +type_synonym ('o, '\) MON\<^sub>S\<^sub>B = "'\ \ ('o \ '\) set" + +definition bind_SB :: "('o, '\)MON\<^sub>S\<^sub>B \ ('o \ ('o', '\)MON\<^sub>S\<^sub>B) \ ('o', '\)MON\<^sub>S\<^sub>B" +where "bind_SB f g \ = \ ((\(out, \). (g out \)) ` (f \))" +notation bind_SB ("bind\<^sub>S\<^sub>B") + +definition unit_SB :: "'o \ ('o, '\)MON\<^sub>S\<^sub>B" ("(returns _)" 8) +where "unit_SB e = (\\. {(e,\)})" +notation unit_SB ("unit\<^sub>S\<^sub>B") + +syntax (xsymbols) + "_bind_SB" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>B,('o','\)MON\<^sub>S\<^sub>B] \ ('o','\)MON\<^sub>S\<^sub>B" + ("(2 _ := _; _)" [5,8,8]8) +translations + "x := f; g" == "CONST bind_SB f (% x . g)" + + + +lemma bind_left_unit_SB : "(x := returns a; m x) = m a" + by (rule ext,simp add: unit_SB_def bind_SB_def) + +lemma bind_right_unit_SB: "(x := m; returns x) = m" + by (rule ext, simp add: unit_SB_def bind_SB_def) + + +lemma bind_assoc_SB: "(y := (x := m; k x); h y) = (x := m; (y := k x; h y))" + by (rule ext, simp add: unit_SB_def bind_SB_def split_def) + + + +subsection{* State Backtrack Exception Monad (vulgo: Boogie-PL) *} +text{* The following combination of the previous two Monad-Constructions +allows for the semantic foundation of a simple generic assertion language +in the style of Schirmers Simpl-Language or Rustan Leino's Boogie-PL language. +The key is to use the exceptional element None for violations of +the assert-statement. *} +type_synonym ('o, '\) MON\<^sub>S\<^sub>B\<^sub>E = "'\ \ (('o \ '\) set) option" + + +definition bind_SBE :: "('o,'\)MON\<^sub>S\<^sub>B\<^sub>E \ ('o \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E) \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E" +where "bind_SBE f g = (\\. case f \ of None \ None + | Some S \ (let S' = (\(out, \'). g out \') ` S + in if None \ S' then None + else Some(\ (the ` S'))))" + +syntax (xsymbols) + "_bind_SBE" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>B\<^sub>E,('o','\)MON\<^sub>S\<^sub>B\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E" + ("(2 _ :\ _; _)" [5,8,8]8) +translations + "x :\ f; g" == "CONST bind_SBE f (% x . g)" + +definition unit_SBE :: "'o \ ('o, '\)MON\<^sub>S\<^sub>B\<^sub>E" ("(returning _)" 8) +where "unit_SBE e = (\\. Some({(e,\)}))" + +definition assert_SBE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" +where "assert_SBE e = (\\. if e \ then Some({((),\)}) + else None)" +notation assert_SBE ("assert\<^sub>S\<^sub>B\<^sub>E") + +definition assume_SBE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" +where "assume_SBE e = (\\. if e \ then Some({((),\)}) + else Some {})" +notation assume_SBE ("assume\<^sub>S\<^sub>B\<^sub>E") + + +definition havoc_SBE :: " (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" +where "havoc_SBE = (\\. Some({x. True}))" +notation havoc_SBE ("havoc\<^sub>S\<^sub>B\<^sub>E") + + +lemma bind_left_unit_SBE : "(x :\ returning a; m x) = m a" + by (rule ext,simp add: unit_SBE_def bind_SBE_def) + +lemma bind_right_unit_SBE: "(x :\ m; returning x) = m" + apply (rule ext, simp add: unit_SBE_def bind_SBE_def) + apply (case_tac "m x", simp_all add:Let_def) + apply (rule HOL.ccontr, simp add: Set.image_iff) + done + + +lemmas aux = trans[OF HOL.neq_commute,OF Option.not_None_eq] + + +lemma bind_assoc_SBE: "(y :\ (x :\ m; k x); h y) = (x :\ m; (y :\ k x; h y))" +proof (rule ext, rename_tac z, simp add: unit_SBE_def bind_SBE_def, + case_tac "m z", simp_all add: Let_def Set.image_iff, safe, goal_cases) print_cases + case (1 z a aa b ab ba aaa baa) then show ?case + using case_prod_conv option.case_eq_if by force +next + case (2 z a aa b ab ba) then show ?case + apply(rule_tac x="(aa,b)" in bexI, simp_all add:split_def) + apply(erule_tac x="(aa,b)" in ballE) + apply(auto simp: aux image_def split_def intro!: rev_bexI) + done +next + case (3 z a aa b) then show ?case + by (metis (no_types, lifting) case_prod_conv option.case_eq_if) +next + case (4 z a aa b) then show ?case + apply(auto simp: aux image_def split_def intro!: rev_bexI) + apply (auto simp add: Let_def split: if_split_asm option.split_asm) + apply fastforce + done +next + case (5 z a aa b ab ba aaa baa) then show ?case + apply simp apply((erule_tac x="(ab,ba)" in ballE)+) + apply(simp_all add: aux Option.not_None_eq, (erule exE)+, simp add:split_def) + apply(erule rev_bexI,case_tac "None\(\p. h (fst p) (snd p))`y",auto simp:split_def) + done + +next + case (6 z a aa b aaa ba) then show ?case + apply simp apply((erule_tac x="(aaa,ba)" in ballE)+) + apply(simp_all add: aux, (erule exE)+, simp add:split_def) + apply(erule rev_bexI, case_tac "None\(\p. h(fst p)(snd p))`y",auto simp:split_def) + done +qed + + +interpretation SBE : Monad unit_SBE bind_SBE + by unfold_locales (simp_all add: bind_left_unit_SBE bind_right_unit_SBE bind_assoc_SBE) + + +definition valid_SBE :: "'\ \ ('a,'\) MON\<^sub>S\<^sub>B\<^sub>E \ bool" (infix "\\<^sub>S\<^sub>B\<^sub>E" 15) +where "\ \\<^sub>S\<^sub>B\<^sub>E m \ (m \ \ None)" +text{* This notation consideres all non-failures as valid. *} + + +lemma assume_assert: "(\ \\<^sub>S\<^sub>B\<^sub>E ( _ :\ assume\<^sub>S\<^sub>B\<^sub>E P ; assert\<^sub>S\<^sub>B\<^sub>E Q)) = (P \ \ Q \)" + by(simp add: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) + +lemma assert_intro: "Q \ \ \ \\<^sub>S\<^sub>B\<^sub>E (assert\<^sub>S\<^sub>B\<^sub>E Q)" + by(simp add: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) + +lemma assume_dest: + "\ \ \\<^sub>S\<^sub>B\<^sub>E (x :\ assume\<^sub>S\<^sub>B\<^sub>E Q; M x); Q \' \ \ \ \\<^sub>S\<^sub>B\<^sub>E M ()" + apply(auto simp: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) + apply(cases "Q \",simp_all) + oops + +text{* This still needs work. What would be needed is a kind + of wp - calculus that comes out of that. So far: nope. *} + +(* TODO: IF THEN ELSE and WHILE + Monadic Laws + Operational Rules. *) + + + + + +end diff --git a/src/test-gen/src/test/Observers.thy b/src/test-gen/src/test/Observers.thy new file mode 100644 index 0000000..aa38341 --- /dev/null +++ b/src/test-gen/src/test/Observers.thy @@ -0,0 +1,200 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * Observers.thy --- the base testing theory for reactive sequence testing. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007, ETH Zurich, Switzerland + * 2009 B. Wolff, Univ. Paris-Sud, France + * 2009 Achim D. Brucker, Germany + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Observers managing Abstract State*} + +theory Observers imports Monads +begin + + +section{* IO-stepping Function Transfomers *} + +text{* The following adaption combinator converts an input-output + program under test of type: $\iota \Rightarrow \sigma + \rightharpoonup o \times \sigma$ with program state $\sigma$ into a + state transition program that can be processed by mbind. The key + idea to turn mbind into a test-driver for a reactive system is by + providing an internal state $\sigma'$, managed by the test driver, + and external, problem-specific functions ``rebind'' and + ``substitute'' that operate on this internal state. For example, + this internal state can be instantiated with an environment $var + \rightharpoonup value$. The output (or parts of it) can then be + bound to vars in the environment. In contrast, substitute can then + explicit substitute variables occuring in value representations + into pure values, e.g. is can substitue $c~("X")$ into $c~3$ + provided the environment contained the map with $X \leadsto 3$. *} + +text{* The state of the test-driver consists of two parts: the state + of the observer (or: adaptor) $\sigma$ and the internal state $\sigma'$ of the + the step-function of the system under test $ioprog$ is allowed to use. *} + +definition observer :: "['\ \ 'o \ '\, '\ \ '\ \ '\, '\\'\' \ '\ \ 'o \ bool] + \ ('\ \ '\' \ 'o \'\') + \ ('\ \ ('\\'\' \ '\\'\'))" + +where "observer rebind substitute postcond ioprog = + (\ input. (\ (\, \'). let input'= substitute \ input in + case ioprog input' \' of + None \ None (* ioprog failure - eg. timeout ... *) + | Some (output, \''') \ let \'' = rebind \ output in + (if postcond (\'',\''') input' output + then Some(\'', \''') + else None (* postcond failure *) )))" + +text{* The subsequent $observer$ version is more powerful: it admits also preconditions +of $ioprog$, which make reference to the observer state $\sigma_{obs}$. The observer-state +may contain an environment binding values to explicit variables. In such a scenario, the +$precond\_solve$ may consist of a \emph{solver} that constructs a solution from +\begin{enumerate} +\item this environment, +\item the observable state of the $ioprog$, +\item the abstract input + (which may be related to a precondition which contains references to explicit + variables) +\end{enumerate} +such that all the explicit variables contained in the preconditions and the +explicit variables in the abstract input are substituted against values +that make the preconditions true. The values must be stored in the +environment and are reported in the observer-state $\sigma_{obs}$. +*} + +definition observer1 :: "['\_obs \ 'o_c \ '\_obs, + '\_obs \ '\ \ '\_a \ ('\_c \ '\_obs), + '\_obs \ '\ \ '\_c \ 'o_c \ bool] + \ ('\_c \ ('o_c, '\)MON\<^sub>S\<^sub>E) + \ ('\_a \ ('o_c, '\_obs \'\)MON\<^sub>S\<^sub>E) " + +where "observer1 rebind precond_solve postcond ioprog = + (\ in_a. (\ (\_obs, \). let (in_c,\_obs') = precond_solve \_obs \ in_a + in case ioprog in_c \ of + None \ None (* ioprog failure - eg. timeout ... *) + | Some (out_c, \') \(let \_obs'' = rebind \_obs' out_c + in if postcond \_obs'' \' in_c out_c + then Some(out_c, (\_obs', \')) + else None (* postcond failure *) )))" + + +definition observer2 :: "['\_obs \ 'o_c \ '\_obs, '\_obs \ '\_a \ '\_c, '\_obs \ '\ \ '\_c \ 'o_c \ bool] + \ ('\_c \ ('o_c, '\)MON\<^sub>S\<^sub>E) + \ ('\_a \ ('o_c, '\_obs \'\)MON\<^sub>S\<^sub>E) " + +where "observer2 rebind substitute postcond ioprog = + (\ in_a. (\ (\_obs, \). let in_c = substitute \_obs in_a + in case ioprog in_c \ of + None \ None (* ioprog failure - eg. timeout ... *) + | Some (out_c, \') \(let \_obs' = rebind \_obs out_c + in if postcond \_obs' \' in_c out_c + then Some(out_c, (\_obs', \')) + else None (* postcond failure *) )))" + +text{* Note that this version of the observer is just a +monad-transformer; it transforms the i/o stepping function $ioprog$ +into another stepping function, which is the combined sub-system +consisting of the observer and, for example, a program under test +$\PUT$. The observer takes the \emph{abstract} input $in_a$, +substitutes explicit variables in it by concrete values stored by its +own state $\sigma_{obs}$ and constructs \emph{concrete} input $in_c$, +runs $ioprog$ in this context, and evaluates the return: the concrete +output $out_c$ and the successor state $\sigma'$ are used to extract +from concrete output concrete values and stores them inside its own +successor state $\sigma_{obs}'$. Provided that a post-condition is +passed succesfully, the output and the combined successor-state is +reported as success. + +Note that we made the following testability assumptions: +\begin{enumerate} +\item $ioprog$ behaves wrt. to the reported state and input as a function, i.e. it behaves + deterministically, and +\item it is not necessary to destinguish internal failure and post-condition-failure. + (Modelling Bug? This is superfluous and blind featurism ... One could do this by + introducing an own "weakening"-monad endo-transformer.) +\end{enumerate} + +*} + +text{* observer2 can actually be decomposed into two combinators - one +dealing with the management of explicit variables and one that tackles +post-conditions. *} + +definition observer3 :: "['\_obs \ 'o \ '\_obs, '\_obs \ '\_a \ '\_c] + \ ('\_c \ ('o, '\)MON\<^sub>S\<^sub>E) + \ ('\_a \ ('o, '\_obs \'\)MON\<^sub>S\<^sub>E) " + +where "observer3 rebind substitute ioprog = + (\ in_a. (\ (\_obs, \). + let in_c = substitute \_obs in_a + in case ioprog in_c \ of + None \ None (* ioprog failure - eg. timeout ... *) + | Some (out_c, \') \(let \_obs' = rebind \_obs out_c + in Some(out_c, (\_obs', \')) )))" + + + +definition observer4 :: "['\ \ '\ \ 'o \ bool] + \ ('\ \ ('o, '\)MON\<^sub>S\<^sub>E) + \ ('\ \ ('o, '\)MON\<^sub>S\<^sub>E)" + +where "observer4 postcond ioprog = + (\ input. (\ \. case ioprog input \ of + None \ None (* ioprog failure - eg. timeout ... *) + | Some (output, \') \ (if postcond \' input output + then Some(output, \') + else None (* postcond failure *) )))" + +text{* The following lemma explains the relationsship between $observer2$ and the +decoposed versions $observer3$ and $observer4$. The full equality does not hold - +the reason is that the two kinds of preconditions are different in a subtle way: +the postcondition may make reference to the abstract state. (See our example +\verb+Sequence_test+ based on a symbolic environment in the observer state.) +If the postcondition does not do this, they are equivalent. *} + +find_theorems name:"prod" name:"case" name:"beta" +lemma observer_decompose: + " observer2 r s (\ x. pc) io = (observer3 r s (observer4 pc io))" + apply(rule ext, rule ext) + apply(auto simp: observer2_def observer3_def + observer4_def Let_def case_prod_beta) + apply(case_tac "io (s a x) b", auto) +done + +end diff --git a/src/test-gen/src/test/TestEnv.ML b/src/test-gen/src/test/TestEnv.ML new file mode 100644 index 0000000..ce89494 --- /dev/null +++ b/src/test-gen/src/test/TestEnv.ML @@ -0,0 +1,458 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * TestEnv.ML --- environment for configuration parameters + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007 ETH Zurich, Switzerland + * 2008-2013 Achim D. Brucker, Germany + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) + +signature TESTENV = +sig + type test_derivation_rule = Proof.context -> int -> thm -> (int * tactic) list + + (* Core data structure for the test environment. + Contains tables, parameters, all sorts of "screws" + to control the test generation process. *) + type testinfo = + {test_thm_tab : thm Symtab.table, (* name to testthm *) + + type_candicates : typ list, (* type grounding list *) + + cod_term_tab : (typ list -> term) Symtab.table, + (* random generators for terms, + tab assigns to each type constuctor + name a generator that gets the + instances of type constructor as args.*) + abstest_data_tab: (thm list) Symtab.table, + (* assigns name to list of + abstract (local) test data statements *) + test_data_tab : (thm list) Symtab.table, + + pre_safe_tac : Proof.context -> tactic, + pre_normalize_TNF_tac : Proof.context -> tactic, + pre_minimize_TNF_tac : Proof.context -> tactic, + test_derivation_rules : test_derivation_rule Unsynchronized.ref list + }; + + val prelude : bool Config.T + val wrapper : bool Config.T + val toString : string Config.T + val setup_code : string Config.T + val dataconv_code : string Config.T + + val depth : int Config.T (* data structure depth *) + val breadth : int Config.T (* no of free's in testspec *) + + val bound : int Config.T (* global bound for data statements *) + val case_breadth : int Config.T (* no of test data per case, + weakening uniformity *) + val iterations : int Config.T (* number of random attempts to solve a case *) + + val type_range_bound : int Config.T (* effectively used elements + type grounding list *) + + val SMT : bool Config.T + val get_smt_facts : Proof.context -> thm list + + type testenv + val rep_testenv : testenv -> testinfo + + val get_data : Proof.context -> testenv + val map_data : (testenv -> testenv) -> Context.generic -> Context.generic + val get_data_global : theory -> testenv + val map_data_global : (testenv -> testenv) -> theory -> theory + val print_data : Proof.context -> unit + + + val add_test_case : string * thm -> testenv -> testenv + (* add test theorem of "name". + The test-theorem is assumed to consist of either clauses in TNF or + of Test Hypotheses. *) + + val del_test_case : string -> testenv -> testenv + val add_test_data : string * thm -> testenv -> testenv + (* used to insert abstract test data registrated under + "string" into the test environment; abstract test data + (or "local test theorems") were used in gen_test_data *) + val del_test_data : string -> testenv -> testenv + (* erase abstract test data from test environment *) + + val thm_tab : testenv -> thm Symtab.table + val absdata_tab : testenv -> thm list Symtab.table + val jdmt_tab : testenv -> thm list Symtab.table + + val thm_tab_update : thm Symtab.table -> testenv -> testenv + val jdmt_tab_update : thm list Symtab.table -> testenv -> testenv + + val get_pre_safe_tac : testenv -> Proof.context -> tactic + val get_pre_normalize_TNF_tac : testenv -> Proof.context -> tactic + val get_pre_minimize_TNF_tac : testenv -> Proof.context -> tactic + + val get_test_derivation_rules : testenv -> test_derivation_rule list + val add_test_derivation_rule : test_derivation_rule -> testenv -> testenv + + val type_candidates_update : typ list -> testenv -> testenv + val pre_safe_tac_update : (Proof.context -> tactic) -> testenv -> testenv + val pre_normalize_TNF_tac_update : (Proof.context -> tactic) -> testenv -> testenv + val pre_minimize_TNF_tac_update : (Proof.context -> tactic) -> testenv -> testenv + + val setup: theory -> theory +end + +structure TestEnv : TESTENV = +struct + +open HOLogic; + +type test_derivation_rule = Proof.context -> int -> thm -> (int * tactic) list + +type testinfo ={test_thm_tab : thm Symtab.table, (* name to testthm *) + + type_candicates : typ list, (* type grounding list *) + + cod_term_tab : (typ list -> term) Symtab.table, + (* random generators for terms, + tab assigns to each type constuctor + name a generator that gets the + instances of type constructor as args.*) + abstest_data_tab: (thm list) Symtab.table, + (* assigns name to list of + data statements *) + test_data_tab : (thm list) Symtab.table, + pre_safe_tac : Proof.context -> tactic, + pre_normalize_TNF_tac : Proof.context -> tactic, + pre_minimize_TNF_tac : Proof.context -> tactic, + test_derivation_rules : test_derivation_rule Unsynchronized.ref list + }; + +val (prelude, prelude_setup) = Attrib.config_bool @{binding testgen_prelude} (K true); +val (wrapper, wrapper_setup) = Attrib.config_bool @{binding testgen_wrapper} (K true); +val (toString, toString_setup) = Attrib.config_string @{binding testgen_toString} (K ""); +val (setup_code, setup_code_setup) = Attrib.config_string @{binding testgen_setup_code} (K ""); +val (dataconv_code, dataconv_code_setup) = Attrib.config_string @{binding testgen_dataconv_code} (K ""); +val (depth, depth_setup) = Attrib.config_int @{binding testgen_depth} (K 3); +val (breadth, breadth_setup) = Attrib.config_int @{binding testgen_breadth} (K 1); +val (bound, bound_setup) = Attrib.config_int @{binding testgen_bound} (K 200); +val (case_breadth, case_breadth_setup) = Attrib.config_int @{binding testgen_case_breadth} (K 1); +val (iterations, iterations_setup) = Attrib.config_int @{binding testgen_iterations} (K 25); +val (type_range_bound, type_range_bound_setup) = Attrib.config_int @{binding testgen_type_range_bound} (K 1); +val (SMT, SMT_setup) = Attrib.config_bool @{binding testgen_SMT} (K false); + +structure TestGen_SMT + = Named_Thms (val name = @{binding "testgen_smt_facts"} val description = "Facts for HOL-TestGen SMT solving"); + +val get_smt_facts = TestGen_SMT.get; + +datatype testenv = Testenv of testinfo + +fun rep_testenv (Testenv X) = X; + + +val initial_testenv = Testenv + {test_thm_tab = Symtab.empty, + + type_candicates = [HOLogic.intT,HOLogic.unitT, + HOLogic.boolT, + HOLogic.mk_setT HOLogic.intT, + HOLogic.listT HOLogic.intT], + cod_term_tab = Symtab.empty, + abstest_data_tab= Symtab.empty, + test_data_tab = Symtab.empty, + pre_safe_tac = fn ctxt => all_tac, + pre_normalize_TNF_tac = fn ctxt => all_tac, + pre_minimize_TNF_tac = fn ctxt => all_tac, + test_derivation_rules = [] +}; + +fun merge_testenv + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}, + Testenv{test_thm_tab=tttab', + type_candicates=tc', + cod_term_tab=ctt', abstest_data_tab=tdt', + test_data_tab=tjt',pre_safe_tac=pst',pre_normalize_TNF_tac=pnt',pre_minimize_TNF_tac=pmt', + test_derivation_rules = tdr'}) = + + Testenv{test_thm_tab = Symtab.merge (Thm.eq_thm) (tttab,tttab'), + type_candicates = distinct (op=) (tc@tc'), + cod_term_tab = Symtab.empty, + (* don't know what a senseful + definition of override could + be; therefore constraint to the + simplest and most conservative + one *) + abstest_data_tab = Symtab.merge (fn (t1, t2) => List.all Thm.eq_thm (map2 (fn a => fn b => (a, b)) t1 t2)) (tdt,tdt'), + test_data_tab = Symtab.merge (fn (t1, t2) => List.all Thm.eq_thm (map2 (fn a => fn b => (a, b)) t1 t2)) (tjt,tjt'), + pre_safe_tac = pst, + pre_normalize_TNF_tac = pnt, + pre_minimize_TNF_tac = pmt, + test_derivation_rules = distinct (op=) (tdr@tdr')}; + + +(* Context data *) + +structure Data = Generic_Data +( + type T = testenv + val empty = initial_testenv + val extend = I + val merge = merge_testenv +); + +val get_data = Data.get o Context.Proof; +val map_data = Data.map; + +val get_data_global = Data.get o Context.Theory; +val map_data_global = Context.theory_map o map_data; + +fun print_data ctxt = + let val {test_thm_tab, + type_candicates, + cod_term_tab, abstest_data_tab,test_data_tab,pre_safe_tac, + pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules} = + rep_testenv (get_data ctxt) + val depth = Config.get ctxt depth + val breadth = Config.get ctxt breadth + val bound = Config.get ctxt bound + val iterations = Config.get ctxt iterations + fun H (n,thm) = [Pretty.str (n^":"), Display.pretty_thm ctxt thm] + fun H2(n,thms)= + [Pretty.str (n^":"), Pretty.str "======"] @ + map (Display.pretty_thm ctxt) thms @ [Pretty.str "======"] + in [Pretty.str ">>> Testenvironment >>>>>>>>>>>>>>>>>", + Pretty.str "+++ Control Data: +++++++++++++++++++", + Pretty.str ("*** default depth: "^Int.toString(depth)), + Pretty.str ("*** default breadth: "^Int.toString(breadth)), + Pretty.str ("*** bound: "^Int.toString(bound)), + Pretty.str ("*** iterations: "^Int.toString(iterations)), + Pretty.str "+++ Testtheoremtable: +++++++++++++++"] @ + maps H (Symtab.dest test_thm_tab) @ + [Pretty.str "+++ Testjudgements: +++++++++++++++++"] @ + maps H2 (Symtab.dest test_data_tab) @ + [Pretty.str "+++ Testdatatable: ++++++++++++++++++"] @ + maps H2 (Symtab.dest abstest_data_tab) @ + [Pretty.str "<<< Testenvironment <<<<<<<<<<<<<<<<<"] + end |> Pretty.chunks |> Pretty.writeln; + + +fun add_test_case (name,thm) + (Testenv {test_thm_tab, + type_candicates, + cod_term_tab, abstest_data_tab,test_data_tab,pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, + test_derivation_rules}) = + Testenv({test_thm_tab=Symtab.update(name,thm)test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=abstest_data_tab, + test_data_tab=test_data_tab, pre_safe_tac=pre_safe_tac,pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun del_test_case name (Testenv {test_thm_tab, + type_candicates,cod_term_tab, abstest_data_tab, + test_data_tab,pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, + test_derivation_rules}) = + Testenv({test_thm_tab=Symtab.delete_safe name test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, abstest_data_tab=abstest_data_tab, + test_data_tab=test_data_tab, pre_safe_tac=pre_safe_tac,pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + + + +fun add_test_data (name,thm) + (Testenv {test_thm_tab, + type_candicates, + cod_term_tab, abstest_data_tab,test_data_tab,pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, + test_derivation_rules}) = + Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=Symtab.cons_list (name,thm) abstest_data_tab, + test_data_tab=test_data_tab,pre_safe_tac=pre_safe_tac,pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun del_test_data name + (Testenv {test_thm_tab, + type_candicates, + cod_term_tab,abstest_data_tab,test_data_tab,pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, + test_derivation_rules}) = + Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=Symtab.delete_safe name abstest_data_tab, + test_data_tab=test_data_tab,pre_safe_tac=pre_safe_tac,pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun thm_tab te = #test_thm_tab (rep_testenv te) +fun absdata_tab te = #abstest_data_tab(rep_testenv te) +fun jdmt_tab te = #test_data_tab(rep_testenv te) +(* In the sequel we also use the term "test judgement" synonymously + for "test data statement". *) + +fun thm_tab_update thm_tab + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) = + (Testenv{test_thm_tab=thm_tab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + +fun jdmt_tab_update jdmt_tab + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) = + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=jdmt_tab,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + + +fun type_candidates_update (type_candidates) + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) + = + (Testenv{test_thm_tab=tttab,type_candicates=type_candidates, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + +fun pre_safe_tac_update (pre_safe_tat) + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) + = + (Testenv{test_thm_tab=tttab,type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pre_safe_tat,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + + +fun pre_normalize_TNF_tac_update (pre_normalize_TNF_tac) + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) + = + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + + +fun pre_minimize_TNF_tac_update (pre_minimize_TNF_tac) + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, + pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) + = + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = tdr}); + + + + +fun get_pre_safe_tac (Testenv{pre_safe_tac=pst, ...}) = pst; +fun get_pre_normalize_TNF_tac (Testenv{pre_normalize_TNF_tac=pnt, ...}) = pnt; +fun get_pre_minimize_TNF_tac (Testenv{pre_minimize_TNF_tac=pmt, ...}) = pmt; + +fun get_test_derivation_rules (Testenv{test_derivation_rules = tdr, ...}) = map (op !) tdr; + +fun add_test_derivation_rule (tdr) + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, + pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdrs}) + = + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, + pre_minimize_TNF_tac=pmt, + test_derivation_rules = (Unsynchronized.ref tdr)::tdrs}); + +(* module setup *) + +val setup = + prelude_setup #> + wrapper_setup #> + toString_setup #> + setup_code_setup #> + dataconv_code_setup #> + depth_setup #> + breadth_setup #> + bound_setup #> + case_breadth_setup #> + iterations_setup #> + SMT_setup #> + TestGen_SMT.setup; + +end (*struct *); + + diff --git a/src/test-gen/src/test/TestEnv.thy b/src/test-gen/src/test/TestEnv.thy new file mode 100644 index 0000000..e5ad3c7 --- /dev/null +++ b/src/test-gen/src/test/TestEnv.thy @@ -0,0 +1,726 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * TestEnv.ML --- environment for configuration parameters + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007 ETH Zurich, Switzerland + * 2008-2013 Achim D. Brucker, Germany + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +theory TestEnv +imports Main +begin +ML{* + +(* TODO : not correctly implemented : no ABSTRACT testcases. *) + +signature TESTENV = +sig + type test_derivation_rule = Proof.context -> int -> thm -> (int * tactic) list + + (* Core data structure for the test environment. + Contains tables, parameters, all sorts of "screws" + to control the test generation process. *) + type testinfo = + {test_thm_tab : thm Symtab.table, (* name to testthm *) + + test_thm_inst_tab : (thm list) Symtab.table, (* instantiated testthm *) + + type_candicates : typ list, (* type grounding list *) + + cod_term_tab : (typ list -> term) Symtab.table, + (* random generators for terms, + tab assigns to each type constuctor + name a generator that gets the + instances of type constructor as args.*) + abstest_data_tab : (thm list) Symtab.table, + (* assigns name to list of + abstract (local) test data statements given + by the user *) + + test_data_tab : (cterm list) Symtab.table, + (* concrete test data table *) + unsolved_PO_tab : (cterm list) Symtab.table, + (* proof obligations left unresolved in selection phase *) + test_thyps_tab : (cterm list) Symtab.table, + (* test-hypothesis *) + + + pre_safe_tac : Proof.context -> tactic, + pre_normalize_TNF_tac : Proof.context -> tactic, + pre_minimize_TNF_tac : Proof.context -> tactic, + test_derivation_rules : test_derivation_rule Unsynchronized.ref list + }; + + val prelude : bool Config.T + val wrapper : bool Config.T + val toString : string Config.T + val setup_code : string Config.T + val dataconv_code : string Config.T + + val depth : int Config.T (* data structure depth *) + val steps : int Config.T (* no of iterations of splittings *) + + val bound : int Config.T (* global bound for data statements *) + val case_steps : int Config.T (* no of test data per case, + weakening uniformity *) + val iterations : int Config.T (* number of random attempts to solve a case *) + + val type_range_bound : int Config.T (* effectively used elements + type grounding list *) + + val SMT : bool Config.T (* use the SMT backend (default: false) *) + val SMT2 : bool Config.T (* use the SMT2 backend (default: false ; false if SMT is set to true) *) + val get_smt_facts : Proof.context -> thm list + + val pon : int Config.T (* test data generation for only the chosen PO; + 0 for all the POs *) + val smt_model : bool Config.T + + val use_metis : bool Config.T (* if true, use metis to check SMT models *) + val use_smt : bool Config.T (* if true, use smt to check SMT models *) + + type testenv + val rep_testenv : testenv -> testinfo + + val get_testenv : Proof.context -> testenv + val map_data : (testenv -> testenv) -> Context.generic -> Context.generic + val get_testenv_global : theory -> testenv + val map_data_global : (testenv -> testenv) -> theory -> theory + val print_testenv : string -> Proof.context -> unit + val get_test_data : Proof.context -> string -> thm list option + + val get_test_data_tab : testenv -> cterm list Symtab.table + val get_unsolved_PO_tab: testenv -> cterm list Symtab.table + val get_test_thyps_tab : testenv -> cterm list Symtab.table + + val add_test_data_tab : string * cterm -> testenv -> testenv + val add_unsolved_PO_tab: string * cterm -> testenv -> testenv + val add_test_thyps_tab : string * cterm -> testenv -> testenv + + val set_test_data_tab : string * (cterm list) -> testenv -> testenv + val set_unsolved_PO_tab: string * (cterm list) -> testenv -> testenv + val set_test_thyps_tab : string * (cterm list) -> testenv -> testenv + + val add_test_case : string * thm -> testenv -> testenv + (* add test theorem of "name". + The test-theorem is assumed to consist of either clauses in TNF or + of Test Hypotheses. *) + + val del_test_case : string -> testenv -> testenv + val add_abstest_data : string * thm -> testenv -> testenv + (* used to insert abstract test data registrated under + "string" into the test environment; abstract test data + (or "local test theorems") were used in gen_test_data *) + val del_abstest_data : string -> testenv -> testenv + (* erase abstract test data from test environment *) + + val get_test_thm_tab : testenv -> thm Symtab.table + val get_absdata_tab : testenv -> thm list Symtab.table + val get_test_thm_inst_tab : testenv -> thm list Symtab.table + + val test_thm_tab_update : thm Symtab.table -> testenv -> testenv + val test_thm_inst_tab_update : thm list Symtab.table -> testenv -> testenv + + val get_pre_safe_tac : testenv -> Proof.context -> tactic + val get_pre_normalize_TNF_tac : testenv -> Proof.context -> tactic + val get_pre_minimize_TNF_tac : testenv -> Proof.context -> tactic + + val get_test_derivation_rules : testenv -> test_derivation_rule list + val add_test_derivation_rule : test_derivation_rule -> testenv -> testenv + + val type_candidates_update : typ list -> testenv -> testenv + val pre_safe_tac_update : (Proof.context -> tactic) -> testenv -> testenv + val pre_normalize_TNF_tac_update : (Proof.context -> tactic) -> testenv -> testenv + val pre_minimize_TNF_tac_update : (Proof.context -> tactic) -> testenv -> testenv + + val print_test_case : Proof.context -> string -> int option -> string + val print_test_data : Proof.context -> string -> int option -> string + val print_test_hyps : Proof.context -> string -> int option -> string + val print_unsolved_pos : Proof.context -> string -> int option -> string + + val setup: theory -> theory +end +*} + +ML{* + +structure TestEnv : TESTENV = +struct + +open HOLogic; + +type test_derivation_rule = Proof.context -> int -> thm -> (int * tactic) list + +type testinfo ={test_thm_tab : thm Symtab.table, (* name to testthm *) + + test_thm_inst_tab : (thm list) Symtab.table, + + type_candicates : typ list, (* type grounding list *) + + cod_term_tab : (typ list -> term) Symtab.table, + (* random generators for terms, + tab assigns to each type constuctor + name a generator that gets the + instances of type constructor as args.*) + abstest_data_tab : (thm list) Symtab.table, + (* assigns name to list of data statements *) + test_data_tab : (cterm list) Symtab.table, + unsolved_PO_tab : (cterm list) Symtab.table, + test_thyps_tab : (cterm list) Symtab.table, + pre_safe_tac : Proof.context -> tactic, + pre_normalize_TNF_tac : Proof.context -> tactic, + pre_minimize_TNF_tac : Proof.context -> tactic, + test_derivation_rules : test_derivation_rule Unsynchronized.ref list + }; + +val (prelude, prelude_setup) = Attrib.config_bool @{binding testgen_prelude} (K true); +val (wrapper, wrapper_setup) = Attrib.config_bool @{binding testgen_wrapper} (K true); +val (toString, toString_setup) = Attrib.config_string @{binding testgen_toString} (K ""); +val (setup_code, setup_code_setup) = Attrib.config_string @{binding testgen_setup_code} (K ""); +val (dataconv_code, dataconv_code_setup) = Attrib.config_string @{binding testgen_dataconv_code} (K ""); +val (depth, depth_setup) = Attrib.config_int @{binding testgen_depth} (K 3); +val (steps, steps_setup) = Attrib.config_int @{binding testgen_steps} (K 1); +val (bound, bound_setup) = Attrib.config_int @{binding testgen_bound} (K 200); +val (case_steps, case_steps_setup) = Attrib.config_int @{binding testgen_case_steps} (K 1); +val (iterations, iterations_setup) = Attrib.config_int @{binding testgen_iterations} (K 25); +val (type_range_bound, type_range_bound_setup) = Attrib.config_int @{binding testgen_type_range_bound} (K 1); +val (SMT, SMT_setup) = Attrib.config_bool @{binding testgen_SMT} (K false); +val (SMT2, SMT2_setup) = Attrib.config_bool @{binding testgen_SMT2} (K false); +val (pon, pon_setup) = Attrib.config_int @{binding testgen_pon} (K 0); +val (smt_model, smt_model_setup) = Attrib.config_bool @{binding testgen_smt_model} (K false); +val (use_metis, use_metis_setup) = Attrib.config_bool @{binding testgen_use_metis} (K false); +val (use_smt, use_smt_setup) = Attrib.config_bool @{binding testgen_use_smt} (K false); + +structure TestGen_SMT + = Named_Thms (val name = @{binding "testgen_smt_facts"} + val description = "Facts for HOL-TestGen SMT solving"); + +val get_smt_facts = TestGen_SMT.get; + +datatype testenv = Testenv of testinfo + +fun rep_testenv (Testenv X) = X; + + +val initial_testenv = Testenv + {test_thm_tab = Symtab.empty, + type_candicates = [HOLogic.intT,HOLogic.unitT, + HOLogic.boolT, + HOLogic.mk_setT HOLogic.intT, + HOLogic.listT HOLogic.intT], + cod_term_tab = Symtab.empty, + abstest_data_tab= Symtab.empty, + test_thm_inst_tab = Symtab.empty, + test_data_tab = Symtab.empty, + unsolved_PO_tab = Symtab.empty, + test_thyps_tab = Symtab.empty, + + pre_safe_tac = fn ctxt => all_tac, + pre_normalize_TNF_tac = fn ctxt => all_tac, + pre_minimize_TNF_tac = fn ctxt => all_tac, + test_derivation_rules = [] + }; + +fun merge_testenv + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab = ttdt, + unsolved_PO_tab = uPOt, + test_thyps_tab = ttht, + test_thm_inst_tab=tjt, pre_safe_tac=pst, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}, + Testenv{test_thm_tab=tttab', + type_candicates=tc', + cod_term_tab=ctt', abstest_data_tab=tdt', + test_data_tab = ttdt', + unsolved_PO_tab = uPOt', + test_thyps_tab = ttht', + test_thm_inst_tab=tjt',pre_safe_tac=pst', + pre_normalize_TNF_tac=pnt',pre_minimize_TNF_tac=pmt', + test_derivation_rules = tdr'}) = + + Testenv{test_thm_tab = Symtab.merge (Thm.eq_thm) (tttab,tttab'), + type_candicates = distinct (op=) (tc@tc'), + cod_term_tab = Symtab.empty, + (* don't know what a senseful + definition of override could + be; therefore constraint to the + simplest and most conservative + one *) + abstest_data_tab = Symtab.merge (fn (t1, t2) => List.all Thm.eq_thm (map2 (fn a => fn b => (a, b)) t1 t2)) (tdt,tdt'), + test_thm_inst_tab = Symtab.merge (fn (t1, t2) => List.all Thm.eq_thm (map2 (fn a => fn b => (a, b)) t1 t2)) (tjt,tjt'), + test_data_tab = Symtab.merge (fn (t1, t2) => List.all Thm.aconvc (map2 (fn a => fn b => (a, b)) t1 t2)) (ttdt,ttdt'), + unsolved_PO_tab = Symtab.merge (fn (t1, t2) => List.all Thm.aconvc (map2 (fn a => fn b => (a, b)) t1 t2)) (uPOt,uPOt'), + test_thyps_tab = Symtab.merge (fn (t1, t2) => List.all Thm.aconvc (map2 (fn a => fn b => (a, b)) t1 t2)) (ttht,ttht'), + pre_safe_tac = pst, + pre_normalize_TNF_tac = pnt, + pre_minimize_TNF_tac = pmt, + test_derivation_rules = distinct (op=) (tdr@tdr')}; + + +structure Data = Generic_Data +( + type T = testenv + val empty = initial_testenv + val extend = I + val merge = merge_testenv +); + +val get_testenv = Data.get o Context.Proof; +val map_data = Data.map; + +val get_testenv_global = Data.get o Context.Theory; +val map_data_global = Context.theory_map o map_data; + +fun print_testenv envname ctxt = + let val {test_thm_tab, type_candicates, + cod_term_tab, abstest_data_tab,test_thm_inst_tab,pre_safe_tac, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules} = + rep_testenv (get_testenv ctxt) + val depth = Config.get ctxt depth + val steps = Config.get ctxt steps + val bound = Config.get ctxt bound + val iterations = Config.get ctxt iterations + fun H (n,thm) = if n = envname orelse envname = "" + then [Pretty.str (n^":"), Thm.pretty_thm ctxt thm] + else [] + fun H2(n,thms)= if n = envname orelse envname = "" + then [Pretty.str (n^":"), Pretty.str "======"] @ + map (Thm.pretty_thm ctxt) thms @ + [Pretty.str "======"] + else [] + in [Pretty.str ">>> Testenvironment >>>>>>>>>>>>>>>>>", + Pretty.str "+++ Control Data: +++++++++++++++++++", + Pretty.str ("*** default depth: "^Int.toString(depth)), + Pretty.str ("*** default steps: "^Int.toString(steps)), + Pretty.str ("*** bound: "^Int.toString(bound)), + Pretty.str ("*** iterations: "^Int.toString(iterations)), + Pretty.str "+++ Testtheoremtable: +++++++++++++++"] @ +(* TODO bu: add cterm construction ... *) + maps H (Symtab.dest test_thm_tab) @ + [Pretty.str "+++ Testjudgements: +++++++++++++++++"] @ + maps H2 (Symtab.dest test_thm_inst_tab) @ + [Pretty.str "+++ Testdatatable: ++++++++++++++++++"] @ + maps H2 (Symtab.dest abstest_data_tab) @ + [Pretty.str "<<< Testenvironment <<<<<<<<<<<<<<<<<"] + end |> Pretty.chunks |> Pretty.writeln; + + +fun add_test_case (name,thm) + (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = + Testenv({test_thm_tab=Symtab.update(name,thm)test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_safe_tac=pre_safe_tac, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun del_test_case name (Testenv {test_thm_tab, + type_candicates,cod_term_tab, abstest_data_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + test_thm_inst_tab,pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, + test_derivation_rules}) = + Testenv({test_thm_tab=Symtab.delete_safe name test_thm_tab, type_candicates=type_candicates, + cod_term_tab=cod_term_tab, abstest_data_tab=abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab, pre_safe_tac=pre_safe_tac, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + + + +fun add_abstest_data (name,thm) + (Testenv {test_thm_tab, type_candicates, + cod_term_tab, abstest_data_tab,test_thm_inst_tab,pre_safe_tac, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_normalize_TNF_tac,pre_minimize_TNF_tac, + test_derivation_rules}) + = Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=Symtab.cons_list (name,thm) abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab,pre_safe_tac=pre_safe_tac, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun del_abstest_data name + (Testenv {test_thm_tab, + type_candicates, + cod_term_tab,abstest_data_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + test_thm_inst_tab,pre_safe_tac, + pre_normalize_TNF_tac,pre_minimize_TNF_tac, + test_derivation_rules}) + = Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=Symtab.delete_safe name abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab,pre_safe_tac=pre_safe_tac, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun get_test_thm_tab te = #test_thm_tab (rep_testenv te) +fun get_absdata_tab te = #abstest_data_tab(rep_testenv te) +fun get_test_thm_inst_tab te = #test_thm_inst_tab(rep_testenv te) + + + +fun get_test_data_tab te = #test_data_tab (rep_testenv te) +fun get_unsolved_PO_tab te = #unsolved_PO_tab (rep_testenv te) +fun get_test_thyps_tab te = #test_thyps_tab(rep_testenv te) + + +fun add_test_data_tab (name,ct) + (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = + Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab, + test_data_tab = Symtab.cons_list(name,ct) test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_safe_tac=pre_safe_tac, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + +fun add_unsolved_PO_tab (name,ct) + (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = + Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab, + test_data_tab = test_data_tab, + unsolved_PO_tab = Symtab.cons_list(name,ct) unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_safe_tac=pre_safe_tac, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun add_test_thyps_tab (name,ct) + (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = + Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = Symtab.cons_list(name,ct) test_thyps_tab, + pre_safe_tac=pre_safe_tac, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun set_test_data_tab (name,ctl) + (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = + Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab, + test_data_tab = Symtab.update(name,ctl) test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_safe_tac=pre_safe_tac, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + +fun set_unsolved_PO_tab (name,ctl) + (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = + Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab, + test_data_tab = test_data_tab, + unsolved_PO_tab = Symtab.update(name,ctl) unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_safe_tac=pre_safe_tac, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun set_test_thyps_tab (name,ctl) + (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = + Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = Symtab.update(name,ctl) test_thyps_tab, + pre_safe_tac=pre_safe_tac, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +(* In the sequel we also use the term "test judgement" synonymously for "test data statement". *) + +fun test_thm_tab_update thm_tab + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, + abstest_data_tab=tdt, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + test_thm_inst_tab=tjt,pre_safe_tac=pst, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) = + (Testenv{test_thm_tab=thm_tab, + type_candicates=tc, + cod_term_tab=ctt, + abstest_data_tab=tdt, + test_thm_inst_tab=tjt,pre_safe_tac=pst, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + +fun test_thm_inst_tab_update jdmt_tab + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, + abstest_data_tab=tdt, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + test_thm_inst_tab=tjt,pre_safe_tac=pst, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) = + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, + abstest_data_tab=tdt, + test_thm_inst_tab=jdmt_tab, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, + pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + + +fun type_candidates_update (type_candidates) + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, + abstest_data_tab=tdt, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + test_thm_inst_tab=tjt,pre_safe_tac=pst, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) + =(Testenv{test_thm_tab=tttab,type_candicates=type_candidates, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt,pre_safe_tac=pst, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + +fun pre_safe_tac_update (pre_safe_tat) + (Testenv{test_thm_tab=tttab, type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt,pre_safe_tac=pst, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) + =(Testenv{test_thm_tab=tttab,type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt, pre_safe_tac=pre_safe_tat, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + + +fun pre_normalize_TNF_tac_update (pre_normalize_TNF_tac) + (Testenv{test_thm_tab=tttab, type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt,pre_safe_tac=pst, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) + = (Testenv{test_thm_tab=tttab, type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt, pre_safe_tac=pst, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + + +fun pre_minimize_TNF_tac_update (pre_minimize_TNF_tac) + (Testenv{test_thm_tab=tttab, type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt, pre_safe_tac=pst, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_normalize_TNF_tac=pnt, pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) + =(Testenv{test_thm_tab=tttab, type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt, pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = tdr}); + + + + +fun get_pre_safe_tac (Testenv{pre_safe_tac=pst, ...}) = pst; +fun get_pre_normalize_TNF_tac (Testenv{pre_normalize_TNF_tac=pnt, ...}) = pnt; +fun get_pre_minimize_TNF_tac (Testenv{pre_minimize_TNF_tac=pmt, ...}) = pmt; + +fun get_test_derivation_rules (Testenv{test_derivation_rules = tdr, ...}) = map (op !) tdr; + +fun get_test_data ctxt name = let val Testenv {test_thm_inst_tab,...} = (get_testenv ctxt) + in Symtab.lookup test_thm_inst_tab name end + +fun add_test_derivation_rule (tdr) + (Testenv{test_thm_tab=tttab, type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_minimize_TNF_tac=pmt, test_derivation_rules = tdrs}) + = (Testenv{test_thm_tab=tttab, type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt, pre_safe_tac=pst, pre_normalize_TNF_tac=pnt, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_minimize_TNF_tac=pmt, + (* OH MY GOD --- WHAT CRAP IS THIS >>>>> bu *) + test_derivation_rules = (Unsynchronized.ref tdr)::tdrs}); + +(* module setup *) + + + +fun print_test_data_generic f ctxt name index_opt = + let val ctrms = case (Symtab.lookup (f (get_testenv ctxt)) name) of + SOME k => k + | NONE => error "No such test suite" + val print_cterm = Pretty.string_of o (Syntax.pretty_term ctxt) o Thm.term_of + in case index_opt of + NONE => String.concatWith "\n" (map print_cterm ctrms) + |SOME ind => print_cterm (nth ctrms ind) + end + + +val print_test_data = print_test_data_generic get_test_data_tab; +val print_test_case = print_test_data_generic get_test_data_tab; (* temporary hack by bu - + should be own implem with own + table*) + +val print_test_hyps = print_test_data_generic get_test_thyps_tab; + +val print_unsolved_pos = print_test_data_generic get_unsolved_PO_tab; + + +val setup = + prelude_setup #> + wrapper_setup #> + toString_setup #> + setup_code_setup #> + dataconv_code_setup #> + depth_setup #> + steps_setup #> + bound_setup #> + case_steps_setup #> + iterations_setup #> + SMT_setup #> + SMT2_setup #> + pon_setup #> + smt_model_setup #> + use_metis_setup #> + use_smt_setup #> + TestGen_SMT.setup; + +end (*struct *); + +*} + +end diff --git a/src/test-gen/src/test/TestGen.thy b/src/test-gen/src/test/TestGen.thy new file mode 100644 index 0000000..6f5bd1e --- /dev/null +++ b/src/test-gen/src/test/TestGen.thy @@ -0,0 +1,1704 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * TestGen.thy --- the core of the HOL-TestGen System. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2010 ETH Zurich, Switzerland + * 2008-2015 Achim D. Brucker, Germany + * 2009-2017 Universite Paris-Sud, France + * 2015-2017 The University of Sheffield, UK + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* The TestGen Core*} + +theory TestGen +imports + Complex_Main + Term_Tactics (*ML*) + clocks (*ML*) + log (* ML *) + TestEnv (* ML *) + BackendUtils + RandomBackend + QuickCheckBackend + SMTBackend + + +keywords "mk_test_suite" :: "qed_global" + and "test_spec" :: "thy_goal" + and "gen_test_data" :: "thy_decl_block" (*"thy_script"*) (* really ? *) + and "print_testenv" :: diag + and "print_conc_tests" :: diag + and "print_abs_tests" :: diag + and "print_thyps" :: diag + and "print_upos" :: diag + and "set_pre_safe_tac" :: diag + and "set_pre_normalize_TNF_tac" :: diag + and "set_pre_minimize_TNF_tac" :: diag +begin + + +section{*General Framework for Unit-Test*} + + +definition PO :: "bool \ bool" + where "PO x = x" + +lemma PO_norm1 [simp]: "PO(PO A) = PO A" by(simp add: PO_def) +lemma PO_norm2 [simp]: "(\ PO A) = (PO(\ A))" by(simp add: PO_def) +lemma PO_norm3 [simp]: "(A \ PO B) = (PO(A \ B))" by(simp add: PO_def) +lemma PO_norm4 [simp]: "(PO A \ B) = (PO(A \ B))" by(simp add: PO_def) +lemma PO_norm5 [simp]: "(A \ PO B) = (PO(A \ B))" by(simp add: PO_def) +lemma PO_norm6 [simp]: "(PO A \ B) = (PO(A \ B))" by(simp add: PO_def) +lemma PO_norm7 [simp]: "(A \ (PO B)) = (PO(A \ B))" by(simp add: PO_def) +lemma PO_norm8 [simp]: "((PO A) \ B) = (PO(A \ B))" by(simp add: PO_def) +lemma PO_norm9 [simp]: "(A = (PO B)) = (PO(A = B))" by(simp add: PO_def) +lemma PO_norm10 [simp]: "((PO A) = B) = (PO(A = B))" by(simp add: PO_def) +lemma PO_norm11 [simp]: "(\x. (PO (A x))) = (PO(\x. A x))" by(simp add: PO_def) +lemma PO_norm12 [simp]: "(\x. (PO (A x))) = (PO(\x. A x))" by(simp add: PO_def) + +lemma PO_I : "(PO P) \ Q \ P \ Q" by(simp add: PO_def) (* internal use only *) +lemma PO_rev[elim!]: "\ PO False \ PO False \ False" by auto +lemma PO_grow : "PO(P \ Q) \ R \ (PO P) \ (Q \ R)" by(simp add: PO_def) (* internal use only *) + + +definition THYP :: "bool \ bool" + where "THYP x = x" + +lemma THYP_triv [simp]: "THYP True = True" by(simp add: THYP_def) + +lemma THYP_I : "A \ THYP A" by(simp add: THYP_def) + +lemma THYP_D : "THYP A \ A" by(simp add: THYP_def) + +lemma THYP_E : "\ THYP A; (A \ R) \ \ R" by(simp add: THYP_def) + +lemma THYP_spec : "THYP(\x. P x) \ THYP(P x)" by(simp add: THYP_def) + +lemma THYP_app1 : "\ THYP(A \ B); A \ \ B" by(simp add: THYP_def) + (* to be used with etac in order to apply uniformity hyp *) + +lemma THYP_app1_rev : "\ A; THYP(A \ B) \ \ B" by(simp add: THYP_def) + (* to be used with etac in order to apply uniformity hyp *) + +lemma THYP_app2 : "\ THYP(A); A \ B \ \ B" by(simp add: THYP_def) + (* to be used with RS in order to apply regularity hyp *) + + +lemma THYP_E_reg : "\ size x \ k \ B; THYP((k < size x) \ B)\ \ B" + by(simp add: THYP_def, auto) + (* to be used with rtac in order to introduce regularity hyp *) + + +definition RSF :: "bool" + where "RSF = True" -- {* just to denote random solver failures *} + +lemma RSF_E : "\ RSF \ P \ \ P" + by(auto simp: RSF_def) + + +section {* Tool Setup *} + + +ML{* +infix 1 |$> + +infix 6 DOWNTO + +infix 0 COLLECT_SS + +signature TESTGEN = +sig + val mt_testenv : TestEnv.testenv + + val get_test_thm : theory -> Symtab.key -> thm list + + val trace : bool Config.T + (* switches on debug information *) + + val no_uniformity : bool Config.T + (* switches off uniformity generation *) + + val profiling : bool Config.T + (* switches on timer profiler *) + + val no_finalize : bool Config.T + (* don't generate constraints (POs) *) + + val is_neither_thyp_nor_PO : term -> bool + + val |$> : thm * tactic -> thm + val TRY' : (int -> tactic) -> int -> tactic + val COND' : (int -> thm -> bool) -> (int -> tactic) -> (int -> tactic) -> int -> tactic + val DOWNTO : int * int -> (int -> tactic) -> tactic + val DEPTH_SPLIT : (int -> tactic) -> int -> tactic + val REPEAT' : ('a -> tactic) -> 'a -> tactic + val ALLCASES : (int -> tactic) -> tactic + val to_TNF : Proof.context -> string list -> int -> thm -> thm Seq.seq + val uniformityI_tac : Proof.context -> string list -> int -> tactic + val CLOSURE : (int -> tactic) -> int -> thm -> thm Seq.seq + val case_tac_typ : Proof.context -> string list -> int -> thm -> thm Seq.seq + + val ONLY_POS : (int -> tactic) -> int -> tactic + + val finalize_tac : Proof.context -> string list -> tactic + + val abs_data_tac : Proof.context -> thm list -> int -> tactic + + val mp_fy : Proof.context -> int -> tactic + val all_ify : Proof.context -> string list -> int -> tactic + val thyp_ify : Proof.context -> int -> tactic + + val var_hyp_subst_tac : Proof.context -> int -> tactic + + val gen_test_case_tac : Proof.context -> string list -> tactic + (* "gen_test_case_tac ctxt prognames" is a + tactic that converts all subgoals of current proof state into + test-normal form - i.e. a conjunctive normal form + (instead of DNF), where the conclusions have the + form "P(progname x1 .. xn)", where progname does + not occur in the premisses (the "constraints") + of the test. As a prerequisite, a data-separation + theorem of depth k is resolved for up to >steps< number + of free variables in the current proof-state. + + NOTE: At present, the generation of data-separation + lemmas is limited to "real" free variables and will + not work for parameters of a test specification. Further, + the proof-state is assumed to have only one goal. + *) + + (* Included for complete backward compatibility *) + val gen_test_case_old_tac : Proof.context -> string list -> tactic + + val gen_test_data : string -> Context.generic -> Context.generic + (* "gen_test_data name context" constructs ground type instances of + test theorems associated to "name". In a further step, + "abstract" or local test data were resolved into the resulting test + theorems (if available in the testenv under "name"), remaining + meta variables were instantiated by a "random_solver" + that attempts to generate an instance and tries to solve + the constraints of a goal up to "iterations" times. + + The resulting simplified and "solved" test data theorems + were stored again in the test environment. + *) + + (* TODO: remove these three functions *) + val get_test_data : theory -> string -> thm list + (* get_test_data thy name extracts the test judgements + (i.e. the premisses of test data statement, that are not + test hypothesis or proof obligation.). *) + + val get_test_hyps : theory -> string -> thm list + (* get_test_hyps thy name extracts the test hypothesis + of a !!test data statement!!, not a test theorem. *) + + val get_pos : theory -> string -> thm list + (* extract remaining constraints (proof obligations) *) + + val animate : theory -> string -> unit + (* ground test data statements may be animated, if "P(progname x1 .. xn)" + is executable. "animate" presents test data statements by + unfolding "progname x1 .. xn" and simplifying on a stepwise + basis. (* for the moment, it's just asm_simp_tac with the + standard evaluation trace, in the future its more or less + the same except with slightly improved output *) + *) + val discharge_POs : string -> Proof.context -> tactic + + val setup: theory -> theory +end +*} + + +ML{* + +structure TestGen : TESTGEN = +struct + +open Thm; +open HOLogic; +open BNF_LFP_Compat ; +open Old_Datatype_Aux +(* open TestEnv; *) +(* open Clocks; *) +(* open BNF_LFP_Compat *) +open Int; +open List; +open Ctr_Sugar_Util ; (* rtac dtac etc. *) +open Rule_Insts; (* res_inst_tac etc *) + + + +val (trace, trace_setup) = Attrib.config_bool @{binding testgen_trace} (K false); +val (no_uniformity, no_uniformity_setup) = Attrib.config_bool @{binding no_uniformity} (K false); +val (profiling, profiling_setup) = Attrib.config_bool @{binding testgen_profiling} (K false); +val (no_finalize, no_finalize_setup) = Attrib.config_bool @{binding testgen_no_finalize} (K false); + + +(* ************************************************************************* *) +(* access to datatype package ... *) +(* ************************************************************************* *) + +fun isDataType thy (Type(tn,_)) = is_some (BNF_LFP_Compat.get_info thy [] tn) (* CHECK ME: empty preference list ok ? *) + |isDataType _ _ = false; + + +fun get_case_split_of_dt thy type_name = #exhaust (BNF_LFP_Compat.the_info thy [] type_name); + +fun get_nchotomy_of thy type_name = #nchotomy (BNF_LFP_Compat.the_info thy [] type_name); + + +(* ************************************************************************* *) +(* access to record package ... *) +(* ************************************************************************* *) + +fun isRecordType thy (Type(tn,_)) = is_some (Record.get_info thy tn) + |isRecordType _ _ = false; + +fun get_case_split_of_record thy type_name = #cases(the (Record.get_info thy type_name)); + + +(* ************************************************************************* *) +(* term computations ... *) +(* ************************************************************************* *) + +fun max_depth (Free h) = (0,[h]) + |max_depth (Const h)= (0,[]) + |max_depth (Abs(s,tt,t)) = max_depth t + |max_depth (s $ t) = let val (k,S) = max_depth (s) + val (k',S') = max_depth (t) + in if k > k'+1 then (k,S) + else if k = k'+1 then (k, S @ S') + else (k'+1, S') + end + |max_depth (_) = (0,[]); + +(* relies on definitions in Testing.thy ... *) +val is_thyp = exists_Const (fn (@{const_name THYP},_) => true | _ => false) + +fun is_po (Const (@{const_name Trueprop},_) $ (Const (@{const_name PO},_) $ _)) = true + |is_po _ = false; + +fun is_neither_thyp_nor_PO t = (not (is_thyp t)) andalso (not (is_po t)) + +fun is_test_data t = (is_neither_thyp_nor_PO t) andalso not (exists_subterm is_Var t) + +fun dest_TruepropAll (Const ("all", _) $ Abs(s,ty, t)) = + (Const (@{const_name "HOL.All"}, (ty --> HOLogic.boolT) --> HOLogic.boolT) + $ Abs(s,ty , dest_TruepropAll t)) + |dest_TruepropAll t = HOLogic.dest_Trueprop t; + + +fun exists_prog prog = Term.exists_subterm + (fn Free (x, _) => member (op =) prog x + | Const (c, _) => member (op =) prog (Long_Name.base_name c) (* FIXME proper internal name!? *) + | _ => false); + +(* complex type instantiations ... *) + +fun groundT_thm context bound candTs thm = + let val ctxt = Context.proof_of context + val thy = Proof_Context.theory_of ctxt + val thm = Thm.varifyT_global thm + val tvars = Term.add_tvars (list_comb(Thm.concl_of thm,Thm.prems_of thm)) [] + fun ctyp_of_sort x so t = if Sign.of_sort thy (t,so) + then (SOME((x,so), Thm.ctyp_of ctxt t) + handle _ => NONE) (* FIXME avoid handle _ *) + else NONE + fun prodS [] _ = [] + |prodS (a::R) B = (map(fn x => a::x) B) @ (prodS R B) + + fun compute_instsT [] = [[]] + |compute_instsT ((x,so)::R) = prodS (List.mapPartial (ctyp_of_sort x so) candTs) + (compute_instsT R) + in + map (fn x => Drule.instantiate_normalize (x,[]) thm) + (Library.take bound (compute_instsT tvars)) + end; + +(* test + + val thm' = groundT_thm thy + (#type_range_bound(rep_testenv te)) + (#type_candicates(rep_testenv te)) + thm + + *) + + + +(* *********************************************************************** *) +(* *) +(* SOME more universal tacticals -> tctical.ML *) +(* *) +(* *********************************************************************** *) + +fun thm |$> tac = + case Seq.pull (tac thm) of + SOME (m,_) => m + |_ => error"|$> : TACTIC FAILED" + +fun TRY' tac n = TRY(tac n) (* standard combinator, + not implemented so far in Isabelle *) + +fun REPEAT' tac n = REPEAT (tac n) + +fun COND' testfun thenf elsef x prf = + if testfun x prf then thenf x prf else elsef x prf; + + +fun (k DOWNTO (n:int)) = fn tac => (fn st => + let fun doall x = if x < n + then all_tac + else ((tac x) THEN doall(x-1)) + val m = Thm.nprems_of st + in if k <= 0 orelse k > m + then all_tac st + else doall k st + end) + + +fun DEPTH_SPLIT tac n st = +(* PRE tac should only exhausting - i.e. increase + the number of subgoals finitely many times + POST if tac never fails, DEPTH_SPLIT tac will not fail. + *) + let val m = Thm.nprems_of st + val res = st |$> (tac n) + val m' = Thm.nprems_of res + in if m' > m + then ((n + m' - m) DOWNTO n)(DEPTH_SPLIT tac) (res) + else all_tac res + end + +fun SOLVE_ASMS ctxt tac n state = + let val goal = Logic.get_goal (Thm.prop_of state) n + val params = Logic.strip_params goal + val prems = map HOLogic.dest_Trueprop (rev(Logic.strip_imp_prems goal)) + val eqTrueI2 = @{lemma "[|P;P |] ==> True" by auto} + fun decomp thm n = let val mm = (thm RS conjunct1) + in (dtac ctxt (mm RS eqTrueI2) n) THEN + (etac ctxt @{thm TrueE} n) THEN + (decomp (thm RS conjunct2) n) end + handle THM _ => (dtac ctxt (thm RS eqTrueI2) n) THEN + (etac ctxt @{thm TrueE} n) + + in case prems of + [] => all_tac state + |a::r => let val prem = List.foldl HOLogic.mk_conj a r + val prem'= (Term.subst_bounds ((rev(map Free params)),prem)) + val thm = Goal.prove ctxt [] [] (HOLogic.mk_Trueprop prem') tac + in (decomp thm n) state end + handle _ => no_tac state (* could not solve asms ... *) (* FIXME avoid handle _ *) + end + + +fun auto_solver thms {prems: thm list, context: Proof.context} state = + let val thm' = state |$> (TRY(safe_tac (context delrules [notI]))) + val goals = Thm.nprems_of thm' + fun solve_thm thm = FIRSTGOAL (fn n => (rtac context thm n) THEN + (IF_UNSOLVED (SOLVE(auto_tac context)))) + fun solve_thms [] = no_tac + |solve_thms (a::R) = solve_thm a ORELSE solve_thms R + in if goals > 0 then (solve_thms thms) thm' else all_tac thm' + end + + +(* *********************************************************************** *) +(* *) +(* SOME Testing specific tacticals *) +(* *) +(* *********************************************************************** *) + +(* PRE n>0 and n <= nprems_of thm *) +fun is_THYP n thm = is_thyp(Logic.nth_prem(n, Thm.prop_of thm)) + +fun is_PO n thm = is_po(Logic.nth_prem(n, Thm.prop_of thm)) + +fun is_TEST n thm = is_neither_thyp_nor_PO(Logic.nth_prem(n, Thm.prop_of thm)) + +fun ONLY_CASES tac = COND' is_TEST tac (K all_tac) + +fun ALLCASES_before k tac = (k DOWNTO 1) (ONLY_CASES tac) + +fun ALLCASES tac thm = (ALLCASES_before (Thm.nprems_of thm) tac thm) + +fun ONLY_POS tac = COND' is_PO tac (K all_tac) + + +fun unfold ctxt rules = asm_full_simp_tac (put_simpset HOL_basic_ss ctxt addsimps rules) + +(* Optimization potential: safe_tac against something more + specialized should do ... *) +fun if_splitter ctxt cs = asm_full_simp_tac (fold Splitter.add_split @{thms if_splits} (put_simpset HOL_basic_ss ctxt)) + THEN' TRY' (fn n => SELECT_GOAL (safe_tac cs) n) + +(* *********************************************************************** *) +(* *) +(* basic tactics ... *) +(* *) +(* *********************************************************************** *) + +fun strip_tac ctxt i = REPEAT (resolve_tac ctxt [impI, allI] i); (* declared Legacy in 2013-2*) + +(* Pre : A1 ==> .. ==> An ==> An+1 + Post: A1 --> .. --> An --> An+1 *) +fun mp_fy ctxt n = (REPEAT(etac ctxt rev_mp n)); (* Version replaced by more efficient one *) +fun mp_fy ctxt n = (REPEAT(ematch_tac ctxt [rev_mp] n)); + +fun all_ify ctxt excpt n state = + let val subgoal = Logic.nth_prem(n, Thm.prop_of state) + val params = Logic.strip_params subgoal + val frees = Term.add_frees subgoal [] + fun sbs(n,t) = res_inst_tac ctxt [((("x",0), Position.none), n)] [] spec + val vars = filter_out (member (op =) excpt o fst) (params @ frees) + in (if null vars + then all_tac + else (List.foldr(fn(x,ta) => (sbs x n) THEN ta) (prune_params_tac ctxt) vars) + (* NEW PATCH rev 12197*) + THEN + all_ify ctxt excpt n (* sbs can fail to eliminate *all* variables + due to alpha-renaming of parameters in + res_inst_tac. This recursive call makes + all_ify exhaustive.*) + (* END NEW PATCH rev 12197*) + ) + (state) + end + + +(*FIXME Free's are actually local consts, and cannot be closed in general + without disrupting Isar locality*) +fun allify_frees_tac ctxt excpt = SUBGOAL (fn (subgoal, i) => + let val thy = Proof_Context.theory_of ctxt + val a = (("'a", 0), @{sort type}) + fun spec_tac (x, T) = + rtac ctxt (Thm.instantiate ([(a, Thm.ctyp_of ctxt T)], [((("x", 0), T), Thm.cterm_of ctxt (Free (x, T)))]) + (Thm.transfer thy @{thm spec})) i + val vars = filter_out (member (op =) excpt o fst) (Term.add_frees subgoal []) + in + if null vars then all_tac + else EVERY (map spec_tac vars) + end) + +fun thyp_ify ctxt n = + EVERY[mp_fy ctxt n, all_ify ctxt [] n, rtac ctxt (@{thm THYP_def} RS @{thm iffD1}) n] + +fun vars_to_frees t = let + val termvars = Term.add_vars t [] + val freenames = Term.add_free_names t [] + val varnames = map (fn ((s, a),_) => s^(Int.toString a)) termvars + val newfreenames = Name.variant_list freenames varnames + val insts = map (fn (x as (_, T), freename) => (Var x, Free (freename, T))) (termvars ~~ newfreenames) +in + Term.subst_atomic insts t +end + +fun convert_goals_to_metahyps (test:(term -> bool)) thm = + let val thy = Thm.theory_of_thm thm + in map ((fn prop => Thm.forall_elim_vars 0 (Thm.assume (Thm.global_cterm_of thy prop))) o vars_to_frees) + (filter test (Thm.prems_of thm)) + end; + + +fun instantiate_tac ctxt insts n thm = + let val goal = Thm.global_cterm_of (Thm.theory_of_thm thm) (Logic.nth_prem(n, Thm.prop_of thm)) + in (rtac ctxt (Drule.instantiate_normalize ([],insts)(Thm.trivial goal)) n +) thm end + +fun var_hyp_subst_tac ctxt no state = + let val prms = Logic.strip_imp_prems(Logic.nth_prem(no, Thm.prop_of state)); + fun is_eq (Const (@{const_name HOL.eq}, _) $ (Var _) $ _) = true + |is_eq (Const (@{const_name HOL.eq}, _) $ _ $ (Var _)) = true + |is_eq _ = false + val i = Library.find_index(fn (Const(@{const_name Trueprop},_)$X) => is_eq X) + (prms) + fun mks thm = if i >= 0 + then ((rotate_tac i no) THEN etac ctxt @{thm thin_refl} no) thm + else no_tac thm + in ((REPEAT mks) + THEN + (full_simp_tac ctxt no)) state + end; + + +(* Code seems to be superseeded by Goals.PARALLEL_GOALS in Isabelle2013-2 ... *) +(* parallel tactic application, without any sanity checks *) + +local exception FAILED of unit in + +fun PARALLEL tac st = + let + fun try_tac g = + (case SINGLE tac g of + NONE => raise FAILED () + | SOME g' => g'); + val goals = Drule.strip_imp_prems (Thm.cprop_of st); + val results = Par_List.map (try_tac o Goal.init) goals; + in all_tac st (* ALLGOALS (fn i => retrofit i 1 (nth results (i - 1))) st *) + end + handle FAILED () => Seq.empty; + +end; + + + +(* A specialized map-reduce pattern for constraint-solving in TestGen. The + PO's were extracted, attempts to their solution parallized, and the retrofit + of the results into the test-theorem is again sequential, since it instantiates + the meta-variables. *) +fun PARALLEL_TRYSOLVE_POs ctxt test_solve_tac st = + let + val pon = Config.get ctxt TestEnv.pon + fun filter_pos filter S = + let fun h _ _ [] = [] + | h f n (a::S) = if f a then (n,a)::(h f (n+1) S) else (h f (n+1) S) + fun h_pon _ _ _ [] = [] + | h_pon f n k (a::S) = if f a then (if k = pon then (n,a):: nil + else (h_pon f (n+1) (k+1) S)) + else (h_pon f (n+1) k S) + in if pon = 0 then h filter 1 S else h_pon filter 1 1 S end; + fun try_solve ct = ((Option.map (Goal.finish ctxt)) o + (SINGLE (test_solve_tac ctxt))) + (Goal.init ct); + val goals = Drule.strip_imp_prems (Thm.cprop_of st); + val po_trms = (filter_pos (is_po o Thm.term_of) goals); + val jdmts = Par_List.map (fn(n,ct) => (n, try_solve ct)) po_trms + + in Seq.succeed(foldr (fn ((k, SOME a),thm) => (a RSN (k, thm)) + |((_, NONE),thm) => thm) st jdmts) + end + + (* val PARALLEL = PARALLEL_GOALS *) (* Experiment. See above. bu *) + + +(* applies exhaustively tactic tac ctxt i on subgoal i and all its descendants. + tac can solve current goal i. + PRE: tac must eventually terminate, also in all descendants ( otherwise CLOSURE loops). + PRE: current goal i must be in {1 .. nprems_of st}. + PRE: ctxt must be the ProofContext of st. +*) +fun CLOSURE tac i st = + let fun closure i k st = if i > k then all_tac st + else let val m = Thm.nprems_of st + in case SINGLE (tac i) st of + NONE => closure (i+1) k st + | SOME st' => let val m' = Thm.nprems_of st' + in if m' = m-1 (* tac eliminated subgoal *) + then closure i (k-1) st' + else closure i (k+(m'-m)) st' + (* superfluous case distinct *) + end + end + in closure i i st end + + + +(* *********************************************************************** *) +(* *) +(* special tactics for TestGen *) +(* *) +(* *********************************************************************** *) + +fun cpat ctxt str = Thm.cterm_of ctxt (Proof_Context.read_term_pattern ctxt str); + +(*PRE : Must not be THYP or PO *) +(* Known Bug: does not work for arbirary mixtures between HOL and Pure Quantifications *) +fun uniformityI_tac ctxt prog = + (if Config.get ctxt no_uniformity + then (fn _ => all_tac) + else SELECT_GOAL + let + val cert = Thm.cterm_of ctxt; + + val core_tac = + SUBGOAL (fn (term, i) => + let fun conv_All (Const (@{const_name All}, ty) $ Abs(n, ty', tt)) = + (Const (@{const_name Ex}, ty) $ Abs (n, ty', conv_All tt)) + |conv_All (Const (@{const_name HOL.implies}, _) $ P $ Q) = + (Const (@{const_name HOL.conj}, [boolT,boolT]--->boolT) $ P $ conv_All Q) + | conv_All tt = tt; + (* val tt = dest_TruepropAll term; *) + val tt = dest_Trueprop term; + val thyp = mk_Trueprop (@{const THYP} $ (@{const HOL.implies} $ conv_All tt $ tt)); + in rtac ctxt (Thm.instantiate ([], [((("psi",0),propT), cert thyp)]) cut_rl) i end); + + val x = cpat ctxt "?x::?'a"; + val T = Thm.typ_of_cterm x; + val count = Unsynchronized.ref 0; + fun new_var () = cert (Var (("?X" ^ Int.toString (Unsynchronized.inc count) ^ "X", 0), T)); + fun exI_tac i st = rtac ctxt (Thm.instantiate ([], [((("x",0),T), new_var ())]) @{thm exI}) i st; + fun check_tac' (t,i) = case t of + @{term Trueprop} $ t' => check_tac' (t',i) + | @{term HOL.conj} $ u $ _ => if exists_prog prog u then + no_tac + else all_tac + + val check_tac = SUBGOAL check_tac' + val intro_PO = (rtac ctxt @{thm PO_I} 1) + THEN (check_tac 1) + THEN REPEAT_DETERM ((rtac ctxt @{thm PO_grow} 1) + THEN (check_tac 1)) + + val finish_tac = (intro_PO THEN (rtac ctxt @{thm "conjI"} 1) THEN (strip_tac ctxt 2)) ORELSE (strip_tac ctxt 1) + in + EVERY [mp_fy ctxt 1, all_ify ctxt prog 1, + core_tac 1, etac ctxt @{thm THYP_app1} 1, + (REPEAT_DETERM o exI_tac) 1, + finish_tac] + end) + + +(* ************************************************************************* *) +(* data separation lemma generation ... *) +(* ************************************************************************* *) + + +fun refine_data_separation_lemma changed ctxt bal prem_no cands thm = +(* - bal controls of the refinement is in balancing mode + (only parameters whose occurrence in the term is + has strictly less than maximum height) + or not. + - prem_no indicates the number of the premise + - cands is the global list of candidates. + In balancing mode, parameters in maximal depth + position were eliminated. + - PRE maxidx is 0 for all mvars + - POST maxidx is 0 for all mvars + + *) +let + val thy = Proof_Context.theory_of ctxt + val prem = Logic.nth_prem(prem_no, Thm.prop_of thm) + val params = Logic.strip_params(prem) + val cands' = if bal then + let val term = HOLogic.dest_Trueprop(hd + (Logic.strip_assums_hyp prem)) + val (k,mp) = max_depth(Term.subst_bounds + ((rev(map Free params)),term)) + in filter(fn p => forall(fn p'=> not(p=p')) (mp)) cands end + else cands; + + val var_no = find_index (fn(s,t)=> exists(fn (name,ty)=> + s = name andalso t = ty) cands') + params + val (name,ty) = if var_no > ~1 then List.nth(params,var_no) + else raise (ERROR("no param found")) + val Type(tn,_) = ty; + val m = get_case_split_of_dt thy tn; + val n = m RSN (prem_no, thm); + (* pre: all occurrences of ?indname t1 ... tn have the same + arguments and types and occur in a critical pattern : + ?indname t1 ... tn = C x1 ... xn + ... + The Thm.adjust_maxidx_thm reflects the fact, that only one + MVar y should have been left - the others are erased due to + hypsubst internally + *) + fun find_mvar_pat indname goal_no thm = + let val goal = Logic.get_goal thm goal_no + val params = Logic.strip_params goal + fun cc(Const(@{const_name Pure.imp}, _) + $ (Const(@{const_name Trueprop}, _) + $ (Const(@{const_name HOL.eq},t) $ A $ B)) + $ C) = (case strip_comb(subst_bounds( + rev(map Free params), A)) of + (Var(s,_),S) =>(if s = indname + then (map dest_Free S, head_of B) + else cc(C)) + | (_, _) => cc C) + | cc _ = error("find_mvar_pat: No pattern found") + in cc (strip_all_body goal) end + fun lifter name S = + let fun ll k [] = if 0 <=k then Bound(k) + else error"find_mvar:impossible lifter" + |ll k ((s,t)::R) = + if s = name then Abs(s,t, ll 0 R) + else Abs(s,t, ll (k+1) R) + in ll (~(length S)) S end + fun typ_of_lifter (name:string) S = + let fun tll S [] = error"find_mvar:impossible type for lifter" + |tll S ((s,t)::R) = + if name = s + then ((rev S)@(t::(map snd R)), t) + else tll (t::S) R + in (op --->)(tll [] S) end + val (lifter_params, constr) = find_mvar_pat ("y",1) prem_no (Thm.prop_of n) + (* ("y",1) is a fact from the invariant maxidx=0 (see below) *) + val subst0 = lifter name lifter_params + val ty_lift = fastype_of subst0 + val ty_constr = fastype_of constr + val subst = Thm.cterm_of ctxt subst0 + val ty_lift_body = body_type ty_lift + val ty_lift_constr = body_type ty_constr + val tyenv = Sign.typ_match thy (ty_lift_constr,ty_lift_body) Vartab.empty + handle Type.TYPE_MATCH => Vartab.empty + (* Hack that works for cases, where non-recursive positions in a type + were GROUND types (like color in RBT_tree). *) + fun tysubs2ctyps(x,(s,t)) = ((x,s),Thm.ctyp_of ctxt t) (*(Thm.ctyp_of ctxt (TVar(x,s)),Thm.ctyp_of ctxt t) *) + val tsubst = map tysubs2ctyps (Vartab.dest tyenv) + val () = (changed:=true) + val var = (("y",1),ty_lift) + val result = (Drule.instantiate_normalize (tsubst, [(var,subst)]) n) + |$> (ALLGOALS(fn n => TRY(hyp_subst_tac_thin true ctxt n))) + |$> prune_params_tac ctxt +in Thm.adjust_maxidx_thm 0 result + (* establishes POST and makes this fact therefore invariant. *) + (* More safe, but also more expensive variant: standard result. *) +end + +(* This is the core procedure of the data_separation_lemma generation, and, + in a way, the core of the test case generator itself. + + It proceeds as follows: in a given theorem thm (usually an exhaustion lemma + itself), the premisses (having the form !!x1..xn. A x1 .. xn ==> B in + general) were selected and "refined" in a "sweep". This means that in a + sweep, for each premisse an xi is selected (which must have a data-type, + say ti) and the exhaustion theorem of ti is forward resolved into this + premisse; cleaning it up (by prems_hyp_subst_tac) also erases xi as + parameter of this premisse. + In a next sweep, another xj may be selected and so forth, up to the moment + were no parameters remained in the premisse that existed before the initial + sweep (this the list parameters is the "cand" (candidates) x1 .. xj for + all premisses which precomputed.) + + Then, the candidate list may be recomputed and the process repeated up to + n times. Since any group of sweeps erases one generation of parameters, + the parameter n corresponds to "depth+1" of the data term occuring in the + premisse increases, i.e. from: + + [| ?y = Leaf ==> ?P; + !!a tree1 tree2. ?y = Node a tree1 tree2 ==> ?P |] + ==> ?P + + we get: + + [| ?y = Leaf ==> ?P; !!a. ?y = Node a Leaf Leaf ==> ?P; + !!a aa tree1 tree2a. ?y = Node a Leaf (Node aa tree1 tree2a) ==> ?P; + !!a aa tree1a tree2a. ?y = Node a (Node aa tree1a tree2a) Leaf ==> ?P; + !!a aa tree1a tree2a ab tree1 tree2b. + ?y = Node a (Node aa tree1a tree2a) (Node ab tree1 tree2b) ==> ?P + |] ==> ?P + + This picture is slightly complicated by the fact that Isabelle does not + change the parameters, but may reintroduce parameter erased by a previous + sweep during the forward resolution step. Thus, the procedure may loop. + + Instead of not erasing the parameters during lemma-refinement (this + leads to very large liftings of the meta-variables over these + parameter lists and is thus expensive), we chose to use + a particular test for "maximal" parameters in order to avoid this problem. + + *) +fun nrefine_data_separation_lemma changed ctxt 0 thm = thm + | nrefine_data_separation_lemma changed ctxt n thm = + let + val thy = Proof_Context.theory_of ctxt + val isaDataType = fn(_,ty)=> isDataType thy ty + val thm_params = maps Logic.strip_params (Thm.prems_of thm) + val cands = distinct (op=) (filter isaDataType thm_params) + fun rds b(prno,t) = refine_data_separation_lemma changed ctxt b prno cands t + handle _ => t (* FIXME avoid handle _ *) + val () = (changed:=false) + fun sweep b th = List.foldr (rds b) th (1 upto (length(Thm.prems_of th))) + val thm1 = sweep false thm (* debalancing phase *) + fun repeat_till_stable thm = let val () = (changed:=false) + val thm' = sweep true thm + in if !changed + then repeat_till_stable thm' + else thm' + end + in nrefine_data_separation_lemma changed ctxt + (n-1) + (if !changed + then thm1 (* debalancing phase had no effect *) + else repeat_till_stable thm1) (* balancing phase *) + end; + +(* NOTE: Currently, this core routine is restricted to + "real free" variables; parameters will not be + considered as a simple means to avoid looping ... *) +fun data_separation_tac ctxt var no state = + let + val thy = Proof_Context.theory_of ctxt + val depth = Config.get ctxt TestEnv.depth + val (na,Type(tn,_)) = var + fun ds_thm tn = nrefine_data_separation_lemma (Unsynchronized.ref false) + ctxt depth + (get_case_split_of_dt thy tn) + + val data_sep_thm = Drule.export_without_context (ds_thm tn) + val brchs = length(Thm.prems_of data_sep_thm) + in ((res_inst_tac ctxt [((("y",0),Position.none),na)] [] data_sep_thm no) + THEN + (res_inst_tac ctxt [((("x",0),Position.none),na), + ((("k",0),Position.none),Int.toString depth)] + [] + @{thm THYP_E_reg} + (no + brchs - 1)) + THEN (thin_tac ctxt (na^"= _") [] (no+brchs))) state + end; + +(* Copied from Isabelle2011 old_term.ML sources, for providing old variable order *) +(*Accumulates the Frees in the term, suppressing duplicates.*) +fun add_term_frees (t, frees: term list) = case t of + Free _ => Ord_List.insert Term_Ord.term_ord t frees + | Abs (_,_,body) => add_term_frees(body,frees) + | f$t => add_term_frees (f, add_term_frees(t, frees)) + | _ => frees; + +fun term_frees t = add_term_frees(t,[]); + +(* Included for complete backward compatibility *) +fun data_separation_old_tac ctxt depth no state = + let + val thy = Proof_Context.theory_of ctxt + val prem = Logic.nth_prem(no, Thm.prop_of state) + val params = Logic.strip_params(prem) + val term = HOLogic.dest_Trueprop(Logic.strip_assums_concl prem) + val frees = map dest_Free (term_frees term) +(* val cands = filter (fn (_,ty) => isDataType thy ty) (params@frees) *) + val cands = filter (fn (_,ty) => isDataType thy ty) (frees) + fun ds_thm tn = + nrefine_data_separation_lemma (Unsynchronized.ref false) ctxt depth (get_case_split_of_dt thy tn) + in (case cands of + [] => no_tac + | (na,Type(tn,_))::_ => + let val data_sep_thm = Drule.export_without_context (ds_thm tn) + val brchs = length(Thm.prems_of data_sep_thm) + in (res_inst_tac ctxt [((("y",0),Position.none),na)] [] data_sep_thm no) + THEN (res_inst_tac ctxt [((("x",0),Position.none),na), + ((("k",0),Position.none),Int.toString depth)] [] + @{thm THYP_E_reg} + (no + brchs - 1)) + THEN (thin_tac ctxt (na^"= _") [] (no+brchs)) + end) + (state) +end + + +(* "Small", non-recursive Version of gen_test_cases: + (TODO for future releases: Integration into the testgen-Kernel). + searches parameter in goal no with typename tname and performs case-split on this + parameter. Note that the type-name must be the full name. + PRE: current goal i must be in {1 .. nprems_of st}. + PRE: ctxt must be the ProofContext of st. *) + (* op member : ('a * 'b -> bool) -> 'b list -> 'a -> bool *) + + fun case_tac_typ ctxt tnames no thm = + let + val prem = Logic.nth_prem(no, Thm.prop_of thm) + val params = Logic.strip_params(prem) + val max = Thm.nprems_of thm + val params = params (* @ free variables TODO !!! *) + fun ty_search (_,Type(s,_)) = member (op =) (tnames) (s) + |ty_search (_,_) = false + fun REP tac st = let val max' = Thm.nprems_of st + in (max'-max+no DOWNTO no) tac st end + in case List.find ty_search params of + SOME (X,_) => EVERY[Induct_Tacs.case_tac ctxt X [] NONE no , + REP (hyp_subst_tac_thin true ctxt), + unfold_tac ctxt [@{thm triv_forall_equality}] ] thm + (* probably not the most efficient - try simp only *) + | NONE => no_tac thm + end; + + + +(* ************************************************************************* *) +(* normal form computation ... *) +(* *************************** ********************************************** *) + +fun to_TNF ctxt prog no state = + let + val thy = Proof_Context.theory_of ctxt; + val term = Thm.prop_of state + val prems = Logic.prems_of_goal term no + val concl = Logic.concl_of_goal term no + fun term_string_of_goal_no () = (Pretty.unformatted_string_of o (Syntax.pretty_term ctxt)) + (Logic.list_implies(prems,concl)) + val prems' = filter (exists_prog prog) prems; + val concl' = filter (exists_prog prog) [concl]; + fun subst t = (Thm.cterm_of ctxt (Var(("P",0),HOLogic.boolT)), + Thm.cterm_of ctxt t); + (* swap_tac : tactic * term -> tactic; takes one term containing + "prog" and swap it into the conclusion as disjoint *) + val swap_accumulate = @{lemma "[| P ; ~P | Q |] ==> Q" by auto} + fun swap_tac (tm,tc) = tc THEN + (Term_Tactics.eres_terminst_tac + ctxt + [] + [subst (HOLogic.dest_Trueprop tm)] + swap_accumulate + no) + (* rev_tac swaps term "a" into conclusion *) + fun rev_tac thy a = Term_Tactics.eres_terminst_tac + ctxt + [] + [subst (HOLogic.dest_Trueprop a)] + @{thm rev_notE} + no + in ((case (prems',concl') of + ([],[]) => error ("Subgoal " ^Int.toString(no) + ^" can not be converted into TNF: \n \n" ^ + term_string_of_goal_no()) + (* case that conclusion does not contain "prog", but several + assumptions do: pick one of them, revert first *) + |(a::R,[]) =>(rev_tac thy a + THEN + (List.foldl swap_tac all_tac R)) + |(R,_) => (List.foldl swap_tac all_tac R) + ) + THEN + (simp_tac (put_simpset HOL_ss ctxt) no)) state + end; + + + +fun normalize_TNF ctxt prog top = + List.foldl (fn (arg,t) => t THEN to_TNF ctxt prog arg) + all_tac + ((length (prems_of top)) downto 1) + top + + + +(* TODO: adb *) +fun minimize_TNF ctxt = distinct_subgoals_tac +(* This should do: + - sorting assumptions in all subgoals + - sorting parameters of subgoals + - sortieren der clauses - + weniger asms zuerst (defer_tac) + - eliminieren von assumptions subsumed in other clauses + [| P;R |] ==> A; + [| P;Q;R |] ==> A + ------------------ + [| P;R |] ==> A; + [| P;R |] ==> A + durch thin_tac + - eliminieren von subsumed or's, + a particular weakness of our procedure so far. + [| P;R |] ==> A; + [| ~P;Q |] ==> B + [| R; Q|] ==> A|B; + ------------------ + [| P;R |] ==> A; + [| ~P;Q |] ==> B + - finally distinct_subgoals_tac +*) + + +(* orig version *) +(* +fun gen_test_case_tac thy clasimp depth steps prog = + EVERY[ALLGOALS (asm_full_simp_tac (snd clasimp)), + safe_tac ((global_claset_of thy) addss (global_simpset_of thy)), + normalize_TNF thy prog, + minimize_TNF, + ALLGOALS (fn n => TRY(uniformityI_tac prog n))]; +*) + + + +(* ************************************************************************* *) +(* test case generation ... *) +(* ************************************************************************* *) + +fun SORT ctxt rules n thm = let + val matches = maps (fn rule => rule ctxt n thm) rules + val sorted = map (fn (i,tac) => tac) + (sort (fn (x,y) => rev_order (int_ord (fst x, fst y))) matches) +in + (EVERY sorted) thm +end + + + +fun EXEC ctxt tac str = EVERY[if Config.get ctxt profiling + then Clocks.start_clock_tac str + else all_tac, + if Config.get ctxt trace + then print_tac ctxt ("\n:"^str^"\n") + else all_tac, + tac, + if Config.get ctxt profiling + then Clocks.stop_clock_tac str + else all_tac] + +fun EXEC' ctxt tac str n = EXEC ctxt (tac n) str + +fun PRINT_THM ctxt no = + (fn state => (if Config.get ctxt trace + then let val prem = Logic.nth_prem(no, prop_of state) + val str = (Pretty.string_of o (Syntax.pretty_term ctxt)) prem + in print_tac ctxt ("\n goal "^Int.toString(no)^"::"^str^"\n") end + else all_tac) state) + +fun finalize_tac ctxt prog = + EXEC ctxt (ALLCASES(TRY'(uniformityI_tac ctxt prog))) "main uniformity NF" + +fun gen_test_case_tac ctxt prog = let + val steps = Config.get ctxt TestEnv.steps + val testenv = TestEnv.get_testenv ctxt + val rules = TestEnv.get_test_derivation_rules testenv + + val begin_profiling_tac = if Config.get ctxt profiling + then Clocks.start_clock_tac "unnamed_test_thm" THEN Clocks.start_clock_tac "gen_test_cases" + else all_tac + + val end_profiling_tac = if Config.get ctxt profiling + then Clocks.stop_clock_tac "gen_test_cases" THEN Clocks.stop_clock_tac "unnamed_test_thm" + else all_tac + + fun pre_normalize_TNF_tac ctxt = TestEnv.get_pre_normalize_TNF_tac (TestEnv.get_testenv ctxt) ctxt + + val sort_tac = SELECT_GOAL (SORT ctxt rules 1) +in + EVERY[begin_profiling_tac, + EXEC ctxt (ALLCASES((asm_full_simp_tac ctxt))) "pre-simplification", + EXEC ctxt (REPEAT_DETERM_N steps (ALLCASES sort_tac)) "main completed", + EXEC ctxt (pre_normalize_TNF_tac ctxt) "pre_norm", + EXEC ctxt (normalize_TNF ctxt prog) "TNF", + if Config.get ctxt no_finalize + then all_tac + else finalize_tac ctxt prog, + end_profiling_tac] +end + +fun data_separation_rule ctxt no state = +let + val thy = Proof_Context.theory_of ctxt + val prem = Logic.nth_prem(no, prop_of state) + (*val params = Logic.strip_params(prem) *) + val term = HOLogic.dest_Trueprop(Logic.strip_assums_concl prem) + val frees = Term.add_frees term [] + (*val cands = filter (fn (_,ty) => isDataType thy ty) (params@frees) + : LOOPS AT PRESENT SINCE gen_test_cases active on frees *) + val cands = filter (fn (_,ty) => isDataType thy ty) (frees) + fun tac cand = TRY (CHANGED_PROP (ALLCASES (data_separation_tac ctxt cand)) + THEN (ALLCASES (TRY' (hyp_subst_tac_thin true ctxt)))) + THEN (if Config.get ctxt trace + then print_tac ctxt "After data separation" + else all_tac) +in + map (fn cand => (100, tac cand)) cands +end + +fun to_HCNF_rule ctxt no state = + [(50, EXEC ctxt (TestEnv.get_pre_safe_tac (TestEnv.get_testenv ctxt) ctxt) "Simp" + THEN EXEC ctxt (TRY (safe_tac ctxt)) "HCN")] + +fun minimize_rule ctxt no state = + [(10, EXEC ctxt (TestEnv.get_pre_minimize_TNF_tac (TestEnv.get_testenv ctxt) ctxt) "pre_minimize" + THEN EXEC ctxt (minimize_TNF ctxt) "MinimTNF" + THEN (ALLCASES(asm_full_simp_tac ctxt)) + THEN (TRY (safe_tac ctxt)))] + +(* Included for complete backward compatibility *) +fun gen_test_case_old_tac ctxt prog = + let val testenv = TestEnv.get_testenv ctxt + val depth = Config.get ctxt TestEnv.depth + val steps = Config.get ctxt TestEnv.steps + + val prep_HCNF = TestEnv.get_pre_safe_tac testenv + val to_HCNF = TRY (safe_tac ctxt) + val pre_normalize_TNF_tac = TestEnv.get_pre_normalize_TNF_tac testenv + val pre_minimize_TNF_tac = TestEnv.get_pre_minimize_TNF_tac testenv + fun trace_tac b str = (if Config.get ctxt trace + then print_tac ctxt ( "\n"^b^":"^str^"\n") + else all_tac) + fun exec phase b str = EVERY[if Config.get ctxt profiling + then Clocks.start_clock_tac str + else all_tac, + phase, trace_tac b str, + if Config.get ctxt profiling + then Clocks.stop_clock_tac str + else all_tac] + + fun standard k b = + EVERY[if Config.get ctxt trace then print_tac ctxt "Enter" else all_tac, + exec (prep_HCNF ctxt) b "Simp", + exec (to_HCNF) b "HCN", + exec (pre_normalize_TNF_tac ctxt) b "pre_norm", + exec (normalize_TNF ctxt prog) b "TNF", + exec (pre_minimize_TNF_tac ctxt) b "pre_minimize", + exec (minimize_TNF ctxt) b "MinimTNF"] + val dst = data_separation_old_tac ctxt depth + + fun pairself f (x, y) = (f x, f y); + + (*True if the two theorems have the same prop field, ignoring hyps, der, etc.*) + val eq_thm_prop = op aconv o pairself Thm.full_prop_of; + + fun main k st =(if k < steps (* not yet all frees per goal + less steps exhausted*) + then (ALLCASES(fn n => TRY(dst n) )) THEN + (* try exhaustion per goal *) + (COND(fn st' => not (eq_thm_prop(st, st'))) + (* something 's changed *) + ( (ALLCASES (fn n => TRY((hyp_subst_tac_thin true ctxt) n) ) ) (* REALLY ? *) + THEN (standard k "1") + THEN (main (k+1)) + ) + (standard k "2")) + else standard k "0" (* run out of steps *)) (st) + val begin_profiling_tac = if Config.get ctxt profiling then + Clocks.start_clock_tac "unnamed_test_thm" THEN Clocks.start_clock_tac "gen_test_cases" + else + all_tac + + val end_profiling_tac = if Config.get ctxt profiling then + Clocks.stop_clock_tac "gen_test_cases" THEN Clocks.stop_clock_tac "unnamed_test_thm" + else + all_tac + in EVERY[begin_profiling_tac, + exec (ALLCASES((asm_full_simp_tac ctxt))) "" "pre-simplification", + exec (main 0) "" "main completed", + exec (ALLCASES(TRY'(uniformityI_tac ctxt prog))) "" "main uniformity NF", + end_profiling_tac] + end; + + +(* ************************************************************************* *) +(* *) +(* testdata generation ... *) +(* *) +(* ************************************************************************* *) + + +fun abs_data_tac ctxt atdata = + let fun single_abs_data_tac thm = + Subgoal.FOCUS_PARAMS(fn {context, ...} => + SOLVE (Method.insert_tac context [thm] 1 THEN auto_tac context)) ctxt + in + FIRST' (map single_abs_data_tac atdata) + end + + +fun test_solve_tac ctxt atdata = + let val thy = Proof_Context.theory_of ctxt + val remove_po = EqSubst.eqsubst_tac ctxt [0] [@{thm PO_def}] + val total_iterations = Config.get ctxt TestEnv.iterations + val max_simple_iterations = 50 + val simple_iterations = Int.min(total_iterations, max_simple_iterations) + val further_iterations = Int.max(total_iterations - max_simple_iterations,0) + in + remove_po + THEN' PRINT_THM ctxt + THEN' (FIRST'[EXEC' ctxt (abs_data_tac ctxt atdata) "abs_data", + EXEC' ctxt (RandomBackend.random_inst_tac ctxt simple_iterations) "random_inst", + EXEC' ctxt (QuickCheckBackend.quickcheck_tac ctxt further_iterations) "quickcheck", + EXEC' ctxt (SMTBackend.testgen_smt_tac ctxt) "smt"]) + end + + + +fun is_solved n thm = + let fun is_unsolved_po i = not (null (inter (op =) (BackendUtils.premvars n thm) (BackendUtils.premvars i thm))) + andalso i <> n + in not (exists is_unsolved_po (1 upto (nprems_of thm))) + end + +(* Instantiate the remaining variables in test cases +with random terms *) + +fun finalize_test_tac ctxt n thm = let +in + (COND (is_solved n) (RandomBackend.single_rand_inst_tac ctxt (BackendUtils.premvars n thm)) all_tac) thm +end + + + +fun solve_all ctxt atdata state = + let (* adding a let expression.. in order to check if all free vars were instatiated by smt.. + if true then error code else solve_all*) + val PARALLEL_TRYSOLVE = PARALLEL_TRYSOLVE_POs ctxt (fn ctxt => test_solve_tac ctxt atdata 1) state + + val term = PARALLEL_TRYSOLVE |> Seq.list_of |> + (List.map prop_of) |> HOLogic.mk_list @{typ "term"} + val use_smt = Config.get ctxt TestEnv.use_smt; +in + if (use_smt andalso (Term.exists_subterm (Term.is_Var) term)) + then error("One or more free variables were not instantiated by the solver!") + + else state |$> PARALLEL_TRYSOLVE_POs ctxt (fn ctxt => test_solve_tac ctxt atdata 1) + |$> ALLCASES (finalize_test_tac ctxt) (* cannot parallelize here *) + +end + + + (* *********************************************************************** *) + (* Normalizer *) + (* *********************************************************************** *) + (* motivation: type instantiation may make + constraints of test-cases unsolvable. + Unsolvable cases should be removed before random-solving. + Since constraints with Mvars were not considered + by simptac and arith_tac, they were substituted + against arbitrary free vars aforehand. + +fun norm_tac ctxt n thm = + let val thy = Proof_Context.theory_of ctxt + val m = Thm.nprems_of thm + val prem = Logic.nth_prem(n, prop_of thm) + val k = length (Logic.strip_imp_prems(prem)) + val goal = Thm.trivial(cterm_of thy prem) + val tvars = OldTerm.term_vars prem + val insts = map(fn(x as Var((s,a),t))=> + (cterm_of thy x, + cterm_of thy (Free("XXX"^s^(Int.toString a),t)))) + (tvars) + val cleanup = List.foldr (fn (_,tac) => (atac (n)) THEN tac) + all_tac (1 upto k) + in ( + (rtac (instantiate ([],insts) goal) n) + THEN + (full_simp_tac (simpset_of ctxt) n) (* may erase goal n. + --> test case unsolvable. *) + THEN + (fn thm' => if (nprems_of thm' < m+k) (* goal erased *) + then cleanup thm' + else no_tac thm )(* report failure *) + )(thm) + end *) + +fun gen_test_data name context = + let val ctxt = Context.proof_of context + val te = TestEnv.get_testenv ctxt + val bound = Config.get ctxt TestEnv.type_range_bound + val candTs= #type_candicates(TestEnv.rep_testenv te) + val type_grounds = groundT_thm context bound candTs + val testthm = (the(Symtab.lookup(TestEnv.get_test_thm_tab te)name) + handle Option.Option => error("No test theorem: "^name^" available!")) + val atdata = (the(Symtab.lookup(TestEnv.get_absdata_tab te)name) + handle Option.Option => []) + val jdmts = map (solve_all ctxt atdata) (type_grounds testthm) + val te1 = TestEnv.test_thm_inst_tab_update (Symtab.update(name,jdmts)(TestEnv.get_test_thm_inst_tab te)) te + val prems = List.concat (map Thm.prems_of jdmts) + val data = map (Thm.cterm_of ctxt) (filter is_test_data prems) + val te2 = TestEnv.set_test_data_tab (name,data) te1 + val hyps = map (Thm.cterm_of ctxt) (filter is_thyp prems) + val te3 = TestEnv.set_test_thyps_tab (name,hyps) te2 + val pos = map (Thm.cterm_of ctxt) (filter is_po prems) + val te4 = TestEnv.set_unsolved_PO_tab (name,pos) te3 + + val t = LogThy.get_td_delta () + val _ = writeln (String.concat ["Test theorem (gen_test_data) '",name,"': ", + Int.toString (List.length data)," test cases in ", + Time.toString t, " seconds"]) + val _ = if not (null pos) + then writeln (String.concat ["Warning: There were ", Int.toString (List.length pos) , " unsolved POs."]) + else () + val _ = LogThy.append (String.concat [ + Context.theory_name (Proof_Context.theory_of ctxt), ", " ,name, ", ", + "test data, " ,Int.toString (List.length data), + ", " ,Time.toString t,"\n"]) + + in TestEnv.map_data (K te4) context end; + +fun select_goals pred thy name = let + val te = TestEnv.get_testenv_global thy +in + maps (convert_goals_to_metahyps pred) + (the(Symtab.lookup(TestEnv.get_test_thm_inst_tab te)name) + handle Option.Option => error("No data statement: "^name^" available!")) +end + +val get_test_data = select_goals is_test_data + +val get_test_hyps = select_goals is_thyp + +val get_pos = select_goals is_po + +fun get_test_thm thy name = let + val te = TestEnv.get_testenv_global thy +in + (the(Symtab.lookup(TestEnv.get_test_thm_inst_tab te)name) + handle Option.Option => error("No data statement: "^name^" available!")) +end + +fun discharge_POs name context = + let fun use_test_instances_tac name ctxt = + let val te = TestEnv.get_testenv ctxt + val atdata = (the(Symtab.lookup(TestEnv.get_absdata_tab te)name) + handle Option.Option => []) + in (TRY' ( EqSubst.eqsubst_tac ctxt [0] [@{thm PO_def}] + THEN' abs_data_tac ctxt atdata)) + end + in ALLGOALS(ONLY_POS (use_test_instances_tac name context)) end + +(* *********************************************************************** *) +(* *) +(* Initial test environment *) +(* *) +(* *********************************************************************** *) + +val _ = Context.>> (TestEnv.map_data + ((TestEnv.pre_safe_tac_update (fn ctxt => ALLCASES (asm_full_simp_tac ctxt))) + #> (TestEnv.add_test_derivation_rule data_separation_rule) + #> (TestEnv.add_test_derivation_rule to_HCNF_rule) + #> (TestEnv.add_test_derivation_rule minimize_rule))); + +val mt_testenv = TestEnv.get_testenv (Context.the_local_context ()); + +val _ = + Outer_Syntax.command @{command_keyword "print_testenv"} "print test environment" + (Parse.name >> (fn name => Toplevel.keep ((TestEnv.print_testenv name) o Toplevel.context_of))); + +val options = Scan.optional (Parse.$$$ "(" |-- Parse.!!! (Scan.option Parse.nat--| Parse.$$$ ")")) (NONE); + +val _ = let fun print name opts ctxt = writeln (TestEnv.print_test_data ctxt name opts) in + Outer_Syntax.command @{command_keyword "print_conc_tests"} "print concrete tests" + (options -- Parse.name >> (fn (opts, name) => + Toplevel.keep ((print name opts) o Toplevel.context_of))) + end; + +val _ = let fun print name opts ctxt = writeln (TestEnv.print_test_case ctxt name opts) in + Outer_Syntax.command @{command_keyword "print_abs_tests"} "print abstract tests" + (options -- Parse.name >> (fn (opts, name) => + Toplevel.keep ((print name opts) o Toplevel.context_of))) + end; + +val _ = let fun print name opts ctxt = writeln (TestEnv.print_test_hyps ctxt name opts) in + Outer_Syntax.command @{command_keyword "print_thyps"} "print test hypothesis" + (options -- Parse.name >> (fn (opts, name) => + Toplevel.keep ((print name opts) o Toplevel.context_of))) + end; + +val _ = let fun print name opts ctxt = writeln (TestEnv.print_unsolved_pos ctxt name opts) in + Outer_Syntax.command @{command_keyword "print_upos"} "print unsolved proof obligations" + (options -- Parse.name >> (fn (opts, name) => + Toplevel.keep ((print name opts) o Toplevel.context_of))) + end; + + +(* ************************************************************************* *) +(* micellaneous functions ... *) +(* ************************************************************************* *) + + +fun animate thy name = (* HACK - very simplistic implementation *) + let val ctxt = Proof_Context.init_global thy |> Config.put simp_trace true + fun animate_data_statement thm = + (Output.writeln ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"; + Pretty.writeln (Thm.pretty_thm ctxt thm); + Output.writeln "============================================"; + (* asm_full_simplify (global_simpset_of(theory_of_thm(thm))) thm; *) + asm_full_simplify ctxt thm; + Output.writeln "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"); + in + List.app animate_data_statement (get_test_data thy name) + end; + +(* module setup *) + +val setup = + trace_setup #> + no_uniformity_setup #> + profiling_setup #> + no_finalize_setup; + +end (*struct *); + +*} + +setup {* TestEnv.setup #> TestGen.setup *} +text{* A key concept of HOL/TestGen are \emph{test suites}. These can be seen as a kind of +container data-structure that relate to a test-specification a number of data, namely the +test theorem (the TNF), abstract and concrete test data, and specific configuration data. +Test-suites were created at the end of a test-specification statement and initial test-refinement. +Later on they were enriched and finally used to generate test drivers and test executions. + +So, a test suite with name "X" will be finally run at a test-execution executed outside HOL/TestGen +started by the shell-command "run_X". +*} + +ML{* + +fun mk_test_suite name = Toplevel.end_proof (fn _ => fn state => + let + val ctxt = Proof.context_of state; + val _ = if Config.get ctxt TestGen.profiling + then (Clocks.start_clock "unnamed_test_thm"; + Clocks.rename_clock "unnamed_test_thm" name; + Clocks.stop_clock name) + else () + + val _ = Proof.assert_backward state; + val {context = goal_ctxt, goal, ...} = Proof.goal state + fun up_thy thy = + let + fun add_to_dm name thm thy = + let + val thy = TestEnv.map_data_global (TestEnv.add_test_case (name,thm)) thy + val t = LogThy.get_tc_delta () + val num_of_tc = List.length + (List.filter (fn tc => TestGen.is_neither_thyp_nor_PO tc) (Thm.prems_of thm)) + val _ = writeln (String.concat ["Test theorem (gen_test_cases) '", + name,"': ", Int.toString num_of_tc, + " test cases in ",Time.toString t, + " seconds"]) + + val _ = LogThy.append (String.concat + [Context.theory_name thy, ", ", + name, ", " ,"test case, " ,Int.toString num_of_tc, + ", ", Time.toString t,"\n"]) + in + thy + end + in + thy |> Sign.add_path (space_implode "_" [name]) + |> (Global_Theory.add_thms [((@{binding test_thm}, goal), [])]) + |> snd + |> Sign.parent_path + |> add_to_dm (space_implode "_" [name]) goal + |> Clocks.flush_clocks + end + + in goal_ctxt |> Local_Theory.background_theory up_thy end); + + +val _ = Outer_Syntax.command + @{command_keyword mk_test_suite} + "store test state (theorem)" + (Parse.name >> mk_test_suite); + + + +(**********************) +fun gen_test_dataT name thy = + let val _ = LogThy.start_td_timer () + val _ = if Config.get_global thy TestGen.profiling + then (Clocks.init_clocks thy;Clocks.start_clock name;Clocks.start_clock "gen_test_data") + else () + + val thy = Context.theory_map (TestGen.gen_test_data name) thy (* call of the core-routine*) + + val t = LogThy.get_td_delta () + val thy = if Config.get_global thy TestGen.profiling + then (Clocks.stop_clock "gen_test_data"; Clocks.stop_clock name; Clocks.flush_clocks thy) + else thy + + val thy = Sign.add_path (space_implode "_" [name]) thy; + val thm = TestGen.get_test_thm thy name + val thy = snd(Global_Theory.add_thmss [((@{binding test_thm_inst},thm),[])] (thy)) + + val thy = Sign.parent_path thy; + in + thy + end + +val _ = Outer_Syntax.command + (*@{command_spec "gen_test_data"}*) + @{command_keyword gen_test_data} + "generate test data" + (Parse.name >> (Toplevel.theory o gen_test_dataT)); + +(**********************) + +val general_statement = + Parse_Spec.statement >> (fn x => ([], Element.Shows x)) || + Parse_Spec.long_statement; + +val _ = Outer_Syntax.local_theory_to_proof + @{command_keyword test_spec} + "define test specification" + (Scan.optional (Parse_Spec.opt_thm_name ":" --| + Scan.ahead (Parse_Spec.includes >> K "" || Parse_Spec.long_statement_keyword)) + Binding.empty_atts -- + Scan.optional Parse_Spec.includes [] -- + general_statement >> + (fn ((a, includes), (elems, concl)) => fn lthy => + let val _ = if Config.get lthy TestGen.profiling + then Clocks.init_clocks (Proof_Context.theory_of lthy) + else () + in + Specification.schematic_theorem_cmd true + "test_spec" NONE (K I) a includes elems concl false lthy + end)); + +*} +ML{* +fun set_wrapper str H src = + let val txt = Input.text_of src + val range = Input.range_of src + val txt' = str ^ "(" ^ txt ^ ")" + in H (Input.source true txt' range) end; + +(* BU : TO BE REVISED BY BU TO 2016. WAS SORT OF A HACK ANYWAY. + Chantal: did revise it; please double check*) +val _ = Outer_Syntax.command + (*@{command_spec "set_pre_safe_tac"}*) + @{command_keyword set_pre_safe_tac} + "configure gen_test_gen: set pre_safe_tac" + (Parse.ML_source >> (Toplevel.theory o + (set_wrapper + "(TestEnv.map_data_global o TestEnv.pre_safe_tac_update)" + Isar_Cmd.setup))); + + +val _ = Outer_Syntax.command + (*@{command_spec "set_pre_normalize_TNF_tac"}*) + @{command_keyword set_pre_normalize_TNF_tac} + "configure gen_test_gen: pre_normalize_TNF_tac" + (Parse.ML_source >> (Toplevel.theory o + (set_wrapper + "(TestEnv.map_data_global o TestEnv.pre_normalize_TNF_tac_update)" + Isar_Cmd.setup))) + +val _ = Outer_Syntax.command + (*@{command_spec "set_pre_minimize_TNF_tac"}*) + @{command_keyword set_pre_minimize_TNF_tac} + "configure gen_test_gen: set pre_minimize_TNF_tac" + (Parse.ML_source >> (Toplevel.theory o + (set_wrapper + "(TestEnv.map_data_global o TestEnv.pre_minimize_TNF_tac_update)" + Isar_Cmd.setup))) + + +fun pretty_cterm_style_generic f ctxt (style, (name,pos:Position.T)) = + let val termS = case (Symtab.lookup (f (TestEnv.get_testenv ctxt)) name) of + SOME k => List.map (style o Thm.term_of) k (* (nth(k,0)) *) + | NONE => error "No such test suite" + in Pretty.big_list "\\\\" (List.map (Thy_Output.pretty_term ctxt) ( termS)) end + +val pretty_thyp_style = pretty_cterm_style_generic TestEnv.get_test_thyps_tab +val pretty_conctest_style = pretty_cterm_style_generic TestEnv.get_test_data_tab +val pretty_uPO_style = pretty_cterm_style_generic TestEnv.get_unsolved_PO_tab + +(* code copy from Thy_Output (2016) since this is not exported there ... *) + +fun basic_entities name scan pretty = + Thy_Output.antiquotation name scan (fn {source, context = ctxt, ...} => + Thy_Output.output ctxt o Thy_Output.maybe_pretty_source pretty ctxt source); + +fun basic_entity name scan = basic_entities name (scan >> single); + +(* end copy *) + +val _ = Theory.setup +( (basic_entity @{binding thyps} (Term_Style.parse -- (Scan.lift (Parse.position Args.name))) + pretty_thyp_style) #> + (basic_entity @{binding uPOs} (Term_Style.parse -- (Scan.lift (Parse.position Args.name))) + pretty_uPO_style) #> + (basic_entity @{binding conctests} (Term_Style.parse -- (Scan.lift (Parse.position Args.name))) + pretty_conctest_style)) + ; +*} + + +attribute_setup test_inst = + {* Scan.lift Args.name >> (fn name => + Thm.declaration_attribute (fn thm => + TestEnv.map_data (TestEnv.add_abstest_data (name, thm)))) *} + "declare theorems for test case generation" + +attribute_setup testgen_type_candidates = + {* Scan.repeat1 Args.typ >> + (fn Ts => + Thm.declaration_attribute (fn _ => + TestEnv.map_data (TestEnv.type_candidates_update Ts))) *} + "declare testgen type candidates" + + +method_setup discharge_POs = + {* Scan.lift (Scan.option(Parse.string) >> + (fn params => fn ctxt => + let val _ = LogThy.start (); + val name = (case params of + NONE => "" + | SOME (s) => s); + in Method.SIMPLE_METHOD (CHANGED_PROP + (TestGen.discharge_POs name ctxt)) + end)) *} + "eliminate POs by test instances" + +method_setup gen_test_cases = + {* Scan.lift (Scan.option (Parse.nat -- Parse.nat) + -- Scan.repeat1 (Scan.unless (Scan.first clasimp_modifiers) Args.name)) + --| Method.sections clasimp_modifiers >> + (fn (params, prog) => fn ctxt => + let val _ = LogThy.start (); + val ctxt' = (case params of + NONE => ctxt + | SOME (depth, steps) => ctxt + |> Config.put TestEnv.depth depth + |> Config.put TestEnv.steps steps); + in Method.SIMPLE_METHOD (CHANGED_PROP + (TestGen.gen_test_case_tac ctxt' prog)) + end) *} + "generate symbolic test cases" + +method_setup gen_test_cases_old = + {* Scan.lift (Scan.option (Parse.nat -- Parse.nat) -- + Scan.repeat1 (Scan.unless (Scan.first clasimp_modifiers) Args.name)) --| + Method.sections clasimp_modifiers >> + (fn (params, prog) => fn ctxt => + let val _ = LogThy.start (); + val ctxt' = (case params of + NONE => ctxt + | SOME (depth, steps) => ctxt + |> Config.put TestEnv.depth depth + |> Config.put TestEnv.steps steps); + in Method.SIMPLE_METHOD (CHANGED_PROP + (TestGen.gen_test_case_old_tac ctxt' prog)) + end) + *} + "old variant of gen_test_cases" + +ML{* fn ctxt => HEADGOAL o (K(TestGen.thyp_ify ctxt ))*} +ML{* fn ctxt => METHOD (HEADGOAL o (K(TestGen.thyp_ify ctxt )))*} + + + +ML{* +val _ = + Theory.setup + (Method.setup @{binding thyp_ify} + (Attrib.thms >> (fn _ => fn ctxt => METHOD (HEADGOAL o (K(TestGen.thyp_ify ctxt ))))) + "making testhyps explicit" #> + Method.setup @{binding mp_ify} + (Attrib.thms >> (fn _ => fn ctxt => METHOD (HEADGOAL o (K(TestGen.mp_fy ctxt )) ))) + "fast destruction matching" #> + Method.setup @{binding all_ify} + (Attrib.thms >> (fn _ => fn ctxt => METHOD (HEADGOAL o (K(TestGen.all_ify ctxt [])) ))) + "replace all free variables by quantifiers") +*} + +end diff --git a/src/test-gen/src/test/TestLib.thy b/src/test-gen/src/test/TestLib.thy new file mode 100644 index 0000000..b3ccc0a --- /dev/null +++ b/src/test-gen/src/test/TestLib.thy @@ -0,0 +1,57 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * Testing.thy --- wiring everything together. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007 ETH Zurich, Switzerland + * 2008-2012 Achim D. Brucker, Germany + * 2009-2012 Université Paris-Sud, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Summing up the Library *} + +theory + TestLib +imports + Monads + Automata + Interleaving + Observers + EFSM_Toolkit + TestRefinements +begin + +end diff --git a/src/test-gen/src/test/TestRefinements.thy b/src/test-gen/src/test/TestRefinements.thy new file mode 100644 index 0000000..4fa18cf --- /dev/null +++ b/src/test-gen/src/test/TestRefinements.thy @@ -0,0 +1,248 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * TestRefinements.thy --- for sequence testing. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007, ETH Zurich, Switzerland + * 2009 B. Wolff, Univ. Paris-Sud, France + * 2009 Achim D. Brucker, Germany + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Test-Refinements (IOCO and Friends) *} + +theory TestRefinements imports Monads Automata +begin + + + +text{* converts infinite trace sets to prefix-closed sets of finite traces, +reconciling the most common different concepts of traces ... *} + +consts cnv :: "(nat \ '\) \ '\ list" +(* NAIV !!! *) + +(* I WOULD PREFER A SYMMETRIC NOTION OF REFINEMENT ... However, this makes the use +of mbind less clear and evident. Sufficient conditions when these two things +are equivalent need to be explored ... *) + + +definition input_refine :: + "[('\,'o,'\) det_io_atm,('\,'o,'\) ndet_io_atm] \ bool" +where + input_refine_def: + "input_refine I SP = + (({det_io_atm.init I} = ndet_io_atm.init SP) \ + (\ t \ cnv ` (in_trace `(etraces SP)). + (det_io_atm.init I) + \ (os \ (mbind t (det_io_atm.step I)) ; + return(length t = length os))))" +notation input_refine ("(_/ \\<^sub>I _)" [51, 51] 50) + +text{* This testing notion essentially says: whenever we can run an +input sequence succesfully on the PUT (the program does not throw an exception), +it is ok. *} + +definition input_output_refine :: + "[('\,'o,'\) det_io_atm,('\,'o,'\) ndet_io_atm] \ bool" +where + input_output_refine_def: + "input_output_refine i s \ + ({det_io_atm.init i} = ndet_io_atm.init s) \ + (\ t \ prefixes (etraces s). + (det_io_atm.init i) + \ (os \ (mbind (map fst t) (det_io_atm.step i)); + return((map snd t) = os)))" + +notation input_output_refine ("(_/ \\<^sub>IO _)" [51, 51] 50) +text{* Our no-frills-approach to I/O conformance testing: +no quiescense, and strict alternation between input and output. *} + +text{* PROBLEM : Tretmanns / Belinfantes notion of ioco is inherently +different from Gaudels, which is refusal based. +See definition in accompanying IOCO.thy file: +\begin{verbatim} + +definition ioco :: "[('\,'o option,'\)IO_TS,('\,'o option,'\)IO_TS] \ bool" + (infixl "ioco" 200) + "i ioco s \ (\ t \ Straces(s). + out i (i after t) \ out s (s after t))" + +\end{verbatim} + +*} + + +definition after :: "[('\, 'o, '\) ndet_io_atm, ('\ \ 'o) list] \ '\ set" + (infixl "after" 100) +where "atm after l = {\' . \ t \ rtraces atm. (\' = fst(t (length l)) \ + (\ n \ {0 .. (length l) - 1}. l!n = fst(snd(t n))))}" + +definition out :: "[('\, 'o, '\) ndet_io_atm,'\ set, '\] \ 'o set" +where "out atm ss \ = {a. \\ \ ss. \\'. (a,\') \ ndet_io_atm.step atm \ \}" + +(* OLD: +definition ioco :: "[('\, 'o, '\) det_io_atm,('\, 'o, '\) ndet_io_atm] \ bool" + (infixl "ioco" 200) +where "i ioco s = (({det_io_atm.init i} = ndet_io_atm.init s) \ + (\ t \ cnv ` (etraces s). + \ \ \. + ((det_io_atm.init i) + \ (os \ (mbind (map fst t @ [\]) (det_io_atm.step i)) ; + return(os = map snd t @ [\]))) + \ + \ \ out s (s after t)))" *) + +definition ready :: "[('\, 'o, '\) ndet_io_atm,'\ set] \ '\ set" +where "ready atm ss = {\. \\ \ ss. ndet_io_atm.step atm \ \ \ {}}" + +definition ioco :: "[('\,'o,'\)ndet_io_atm, ('\,'o,'\)ndet_io_atm] \ bool" + (infixl "ioco" 200) +where "i ioco s = (\ t \ prefixes(etraces s). + \ \ \ ready s (s after t). out i (i after t) \ \ out s (s after t) \)" + +definition oico :: "[('\,'o,'\)ndet_io_atm,('\,'o,'\)ndet_io_atm] \ bool" + (infixl "oico" 200) +where "i oico s = (\ t \ prefixes(etraces s). + ready i (i after t) \ ready s (s after t))" + +definition ioco2 :: "[('\,'o,'\)ndet_io_atm, ('\,'o,'\)ndet_io_atm] \ bool" + (infixl "ioco2" 200) +where "i ioco2 s = (\ t \ eprefixes (ndet_io_atm.step s) (rtraces s). + \ \ \ ready s (s after t). + out i (i after t) \ \ out s (s after t) \)" + +definition ico :: "[('\, 'o, '\) det_io_atm,('\, 'o, '\) ndet_io_atm] \ bool" + (infixl "ico" 200) +where "i ico s = (\ t \ prefixes(etraces s). + let i' = det2ndet i in ready i' (i' after t) \ ready s (s after t))" + +lemma full_det_refine: "s = det2ndet s' \ + (det2ndet i) ioco s \ (det2ndet i) oico s \ input_output_refine i s" +apply(safe) +oops + +definition ico2 :: "[('\,'o,'\)ndet_io_atm,('\,'o,'\)ndet_io_atm] \ bool" + (infixl "ico2" 200) +where "i ico2 s \ \ t \ eprefixes (ndet_io_atm.step s) (rtraces s). + ready i (i after t) \ ready s (s after t)" + + +text{* There is lots of potential for optimization. +\begin{itemize} +\item only maximal prefixes +\item draw the $\omega$ tests inside the return +\item compute the $\omega$ by the $\ioprog$, not quantify over it. +\end{itemize} +*} + +section{* Generic Monadic Test Refinement Notions *} + +text{* Generic Notion *} + +definition monadic_test_refinement :: + "('\ \ ('o, '\)MON\<^sub>S\<^sub>E) \ + '\ set \ (* state invariant *) + '\ list set \ (* coverage or test-purpose *) + ('\ list \ 'o list \ '\ \ bool) \ (* post-cond *) + ('\ \ ('o, '\)MON\<^sub>S\<^sub>E) \ bool" + ("_ \\<^sub>\_,_,_\<^sub>\ _" [100,100,100,100,100]101) +where + + + "(MODEL \\<^sub>\Init,CovCrit,conf\<^sub>\ SUT) = + (\ \\<^sub>0 \ Init. \ \s \ CovCrit. \ res. + (\\<^sub>0 \ (os \ mbind \s MODEL; return (conf \s os res))) + \ + (\\<^sub>0 \ (os \ mbind \s SUT; return (conf \s os res))))" + +subsection{* Inclusion Test Refinements *} + +definition incl_ref :: "('\ \ ('o, '\)MON\<^sub>S\<^sub>E) \ + '\ set \ (* state invariant *) + '\ list set \ (* coverage or test-purpose *) + ('\ \ ('o, '\)MON\<^sub>S\<^sub>E) \ bool" + ("_ \\<^sub>I\<^sub>\_,_\<^sub>\ _" [100,100,100,100]101) +where "(S \\<^sub>I\<^sub>\Init,CC\<^sub>\ I) = (S \\<^sub>\Init,CC,(\ is os x. length is = length os \ os=x)\<^sub>\ I)" + + +lemma inclusion_test_I : + "(\\\<^sub>0 \s res. + \\<^sub>0 \ Init \ + \s \ CC \ + \\<^sub>0 \ ( os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s S; unit\<^sub>S\<^sub>E (length \s = length os \ os = res)) + \ + \\<^sub>0 \ ( os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s I; unit\<^sub>S\<^sub>E (length \s = length os \ os = res))) + \ (S \\<^sub>I\<^sub>\Init,CC\<^sub>\ I)" +unfolding incl_ref_def monadic_test_refinement_def +by auto + +lemma inclusion_test_I_opt : + "(\\\<^sub>0 \s res. + \\<^sub>0 \ Init \ + \s \ CC \ + \\<^sub>0 \ ( os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s S; unit\<^sub>S\<^sub>E (os = res)) + \ + \\<^sub>0 \ ( os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s I; unit\<^sub>S\<^sub>E (os = res))) + \ (S \\<^sub>I\<^sub>\Init,CC\<^sub>\ I)" +apply(rule inclusion_test_I) +apply(subst Monads.mbindFSave_vs_mbindFStop) +apply(subst (asm) Monads.mbindFSave_vs_mbindFStop) +by(auto) + + + +subsection{* Inclusion Test Refinements with Abort *} + +text{* The abort scenario allows operations to fail, so violations + to input-enabledness are permitted. The model assumes that a particular + abort-semantics is respected: the operation is assumed to fail explicitly, + \ie{} observably by the tester, and to leave the state unchanged. *} + + +definition incl_aborts_ref :: "('\ \ ('o, '\)MON\<^sub>S\<^sub>E) \ + '\ set \ (* state invariant *) + '\ list set \ (* coverage or test-purpose *) + ('\ \ ('o, '\)MON\<^sub>S\<^sub>E) \ bool" + ("_ \\<^sub>I\<^sub>A\<^sub>\_,_\<^sub>\ _" [100,100,100,100]101) +where "(S \\<^sub>I\<^sub>A\<^sub>\Inv,IS\<^sub>\ I) = (S \\<^sub>\Inv,IS,(\ is os x. os=x)\<^sub>\ I)" + + +(* TODO : Deadlock Refinement *) + +(* TODO : Monadic IOCO *) + + +end diff --git a/src/test-gen/src/test/TestScript.thy b/src/test-gen/src/test/TestScript.thy new file mode 100644 index 0000000..6cf2e42 --- /dev/null +++ b/src/test-gen/src/test/TestScript.thy @@ -0,0 +1,164 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * TestScript.thy --- Test Script and Test Data exporter + * This file is part of HOL-TestGen. + * + * Copyright (c) 2011-2015 Achim D. Brucker, Germany + * 2016-2017 The University of Sheffield, UK + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: TestScript.thy 13147 2017-08-18 19:42:12Z brucker $ *) + + +theory TestScript +imports TestGen + "codegen_fsharp/code_fsharp" +keywords "generate_test_script" :: thy_decl + and "export_test_data" :: diag +begin + +code_printing constant "RSF" \ + (SML) "TestHarness.rsf" + and (Fsharp) "TestHarness.rsf" + and (Scala) "TestHarness.rsf" + +datatype Lazy = "lazy" bool + +code_printing constant "lazy" \ + (SML) "fn () => (_)" + and (Fsharp) "lazy ((_))" + and (Scala) "() => ((_))" + +code_printing type_constructor Lazy \ + (SML) "(unit -> bool)" + and (Fsharp) "Lazy" + and (Scala) "() => Boolean" + +code_printing code_module "Header" \ (SML) +{* + (********************************************************) + (* This file is generated automatically. Do not edit *) + (********************************************************) +*} + +ML{* +open List; + fun gen_ts_def dataname thy = let + + val tsname = "test_script" + val fqtsname = (Context.theory_name thy)^"."^dataname^"."^tsname + val tsbnd = Binding.name tsname + val tstype = HOLogic.listT (HOLogic.mk_tupleT[HOLogic.listT @{typ "Lazy"},@{typ "Lazy"}]) + + val (td,_) = (TestGen.get_test_data thy dataname,thy); + + fun def const bnd term thy = snd (Global_Theory.add_defs false [((bnd, + (Logic.mk_equals (const, term))),[Code.add_default_eqn_attribute Code.Equation])] thy) + + val prop_terms = List.map (fn p => let val _ $ ret = Thm.concl_of p in ret end) td; + val prems_terms = map (fn data => (map (fn p => let val _ $ ret = p in ret end) + (Thm.prems_of data))) td; + val spec = map2 (fn x => fn y => (x,y)) prems_terms prop_terms + + fun mk_abs t = Const("TestScript.Lazy.lazy",@{typ "bool"} --> @{typ "Lazy"})$t + + val ts_term = HOLogic.mk_list (HOLogic.mk_tupleT[HOLogic.listT @{typ "Lazy"},@{typ "Lazy"}]) + (map (fn (a,b) => HOLogic.mk_tuple [HOLogic.mk_list @{typ "Lazy"} + (map mk_abs a), mk_abs b]) spec) + in + thy |> (Sign.add_path (space_implode "_" [dataname])) + |> Sign.add_consts [(tsbnd, tstype, NoSyn)] + |> def (Const(fqtsname,tstype)) (tsbnd) ts_term + |> (Sign.parent_path) + end + +*} + + +ML {* (Outer_Syntax.command (("generate_test_script"), Position.none) + "generate code equation representing test script" + (Parse.name >> (fn name => Toplevel.theory (gen_ts_def name)))) +*} + +ML{* +fun export_test_data ctxt filename dataname = + let + val filename = Path.explode filename + val thy = Proof_Context.theory_of ctxt + val master_dir = Resources.master_directory thy + val td = TestGen.get_test_data thy dataname + val today = (Date.toString(Date.fromTimeUniv (Time.now())))^" (UTC)"; + val abs_filename = if (Path.is_absolute filename) then filename else Path.append master_dir filename + val thyname = Context.theory_name thy + (* fun string_of_term ctxt = Pretty.unformatted_string_of o (Syntax.unparse_term ctxt) *) + val string_of_term = Sledgehammer_Util.hackish_string_of_term + fun generate () = + let + val num = Int.toString(length td) + fun string_of_prems ctxt [] = "" + | string_of_prems ctxt (p::ps) = ("\n" + ^" "^(string_of_term ctxt p) ^ "\n" + ^" \n")^(string_of_prems ctxt ps) + val thmsstrings = String.concat (map (fn t => "\n" + ^(string_of_prems ctxt (Thm.prems_of t)) + ^" \n" + ^" "^string_of_term ctxt (Thm.prop_of t) ^ "\n" + ^" \n" + ^"\n" + ) td) + in + "\n\n" + ^thmsstrings + end + + val test_data_str = Print_Mode.setmp [] generate (); + val _ = File.write (abs_filename) test_data_str + handle (IO.Io{name=name,...}) => warning ("Could not write \""^(name)^"\".") + in () end +*} + +ML {* + Outer_Syntax.command (("export_test_data"), Position.none) + "export test data to an external file" + (Parse.name -- Parse.name + >> (fn (filename,name) => + (Toplevel.keep (fn state => export_test_data (Toplevel.context_of state) filename name)))); + + *} + +end diff --git a/src/test-gen/src/test/TestSequence.thy b/src/test-gen/src/test/TestSequence.thy new file mode 100644 index 0000000..861e8df --- /dev/null +++ b/src/test-gen/src/test/TestSequence.thy @@ -0,0 +1,1001 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * Monads.thy --- a base testing theory for sequential computations. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007, ETH Zurich, Switzerland + * 2009 B. Wolff, Univ. Paris-Sud, France + * 2009, 2012 Achim D. Brucker, Germany + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Basic Monad Theory for Sequential Computations *} + +theory Monads imports Main +begin + + +section{*General Framework for Monad-based Sequence-Test *} +text{* As such, Higher-order Logic as a purely functional specification + formalism has no built-in mechanism for state and state-transitions. + Forms of testing involving state require therefore explicit mechanisms + for their treatment inside the logic; a well-known technique to model + states inside purely functional languages are \emph{monads} made popular + by Wadler and Moggi and extensively used in Haskell. \HOL is powerful + enough to represent the most important standard monads; + however, it is not possible to represent monads as such due to well-known + limitations of the Hindley-Milner type-system. *} + +text{* Here is a variant for state-exception monads, that models precisely + transition functions with preconditions. Next, we declare the + state-backtrack-monad. In all of them, our concept of i/o stepping + functions can be formulated; these are functions mapping input + to a given monad. Later on, we will build the usual concepts of: + \begin{enumerate} + \item deterministic i/o automata, + \item non-deterministic i/o automata, and + \item labelled transition systems (LTS) + \end{enumerate} +*} + +subsection{* Standard State Exception Monads *} +type_synonym ('o, '\) MON\<^sub>S\<^sub>E = "'\ \ ('o \ '\)" (* = '\ \ ('o \ '\)option *) + + +definition bind_SE :: "('o,'\)MON\<^sub>S\<^sub>E \ ('o \ ('o','\)MON\<^sub>S\<^sub>E) \ ('o','\)MON\<^sub>S\<^sub>E" +where "bind_SE f g = (\\. case f \ of None \ None + | Some (out, \') \ g out \')" + +notation bind_SE ("bind\<^sub>S\<^sub>E") + +syntax (xsymbols) + "_bind_SE" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>E,('o','\)MON\<^sub>S\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>E" + ("(2 _ \ _; _)" [5,8,8]8) +translations + "x \ f; g" == "CONST bind_SE f (% x . g)" + + +definition unit_SE :: "'o \ ('o, '\)MON\<^sub>S\<^sub>E" ("(return _)" 8) +where "unit_SE e = (\\. Some(e,\))" +notation unit_SE ("unit\<^sub>S\<^sub>E") + +definition fail_SE :: "('o, '\)MON\<^sub>S\<^sub>E" +where "fail_SE = (\\. None)" +notation fail_SE ("fail\<^sub>S\<^sub>E") + +definition assert_SE :: "('\ \ bool) \ (bool, '\)MON\<^sub>S\<^sub>E" +where "assert_SE P = (\\. if P \ then Some(True,\) else None)" +notation assert_SE ("assert\<^sub>S\<^sub>E") + +definition assume_SE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>E" +where "assume_SE P = (\\. if \\ . P \ then Some((), SOME \ . P \) else None)" +notation assume_SE ("assume\<^sub>S\<^sub>E") + +text{* The standard monad theorems about unit and associativity: *} + +lemma bind_left_unit [simp]: "(x \ return c; P x) = P c" + by (simp add: unit_SE_def bind_SE_def) + + +lemma bind_left_fail_SE[simp] : "(x \ fail\<^sub>S\<^sub>E; P x) = fail\<^sub>S\<^sub>E" + by (simp add: fail_SE_def bind_SE_def) + + +lemma bind_right_unit[simp]: "(x \ m; return x) = m" + apply (simp add: unit_SE_def bind_SE_def) + apply (rule ext) + apply (case_tac "m \", simp_all) + done + +lemma bind_assoc[simp]: "(y \ (x \ m; k x); h y) = (x \ m; (y \ k x; h y))" + apply (simp add: unit_SE_def bind_SE_def, rule ext) + apply (case_tac "m \", simp_all) + apply (case_tac "a", simp_all) + done + + +text{* The bind-operator in the state-exception monad yields already + a semantics for the concept of an input sequence on the meta-level: *} +lemma syntax_test: "(o1 \ f1 ; o2 \ f2; return (post o1 o2)) = X" +oops + + + +subsection {* "Pipe-free" - variant of the bind. *} + +definition seq_SE :: "[('\, '\)MON\<^sub>S\<^sub>E, ('\, '\)MON\<^sub>S\<^sub>E] \ ('\, '\)MON\<^sub>S\<^sub>E" (infixl ";-" 65) +where "f ;- g = (_ \ f ; g)" + + + +subsection {* Monadic If *} + +definition if_SE :: "['\ \ bool, ('\, '\)MON\<^sub>S\<^sub>E, ('\, '\)MON\<^sub>S\<^sub>E] \ ('\, '\)MON\<^sub>S\<^sub>E" +where "if_SE c E F = (\\. if c \ then E \ else F \)" + +syntax (xsymbols) + "_if_SE" :: "['\ \ bool,('o,'\)MON\<^sub>S\<^sub>E,('o','\)MON\<^sub>S\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>E" + ("(if\<^sub>S\<^sub>E _ then _ else _fi)" [5,8,8]8) +translations + "(if\<^sub>S\<^sub>E cond then T1 else T2 fi)" == "CONST if_SE cond T1 T2" + + + +subsubsection{* Theory of a Monadic While *} + +text{* First Step : Establishing an embedding between partial functions and relations *} +(* plongement *) +definition Mon2Rel :: "(unit, '\)MON\<^sub>S\<^sub>E \ ('\ \ '\) set" +where "Mon2Rel f = {(x, y). (f x = Some((), y))}" +(* ressortir *) +definition Rel2Mon :: " ('\ \ '\) set \ (unit, '\)MON\<^sub>S\<^sub>E " +where "Rel2Mon S = (\ \. if \\'. (\, \') \ S then Some((), SOME \'. (\, \') \ S) else None)" + +lemma Mon2Rel_Rel2Mon_id: assumes det:"single_valued R" shows "(Mon2Rel \ Rel2Mon) R = R" +apply (simp add: comp_def Mon2Rel_def Rel2Mon_def,auto) +apply (case_tac "\\'. (a, \') \ R", auto) +apply (subst some_eq_ex) back +apply (insert det[simplified single_valued_def]) +apply (auto) +done + +lemma Rel2Mon_Id: "(Rel2Mon \ Mon2Rel) x = x" +apply (rule ext) +apply (auto simp: comp_def Mon2Rel_def Rel2Mon_def) +apply (erule contrapos_pp, drule HOL.not_sym, simp) +done + +lemma single_valued_Mon2Rel: "single_valued (Mon2Rel B)" +by (auto simp: single_valued_def Mon2Rel_def) + +text{* Second Step : Proving an induction principle allowing to establish that lfp remains + deterministic *} + + +(* Due to Tobias Nipkow *) +definition chain :: "(nat => 'a set) => bool" +where "chain S = (\i. S i \ S(Suc i))" + +lemma chain_total: "chain S ==> S i \ S j \ S j \ S i" +by (metis chain_def le_cases lift_Suc_mono_le) + +definition cont :: "('a set => 'b set) => bool" +where "cont f = (\S. chain S \ f(UN n. S n) = (UN n. f(S n)))" + +lemma mono_if_cont: fixes f :: "'a set => 'b set" + assumes "cont f" shows "mono f" +proof + fix a b :: "'a set" assume "a \ b" + let ?S = "\n::nat. if n=0 then a else b" + have "chain ?S" using `a \ b` by(auto simp: chain_def) + hence "f(UN n. ?S n) = (UN n. f(?S n))" + using assms by(simp add: cont_def) + moreover have "(UN n. ?S n) = b" using `a \ b` by (auto split: if_splits) + moreover have "(UN n. f(?S n)) = f a \ f b" by (auto split: if_splits) + ultimately show "f a \ f b" by (metis Un_upper1) +qed + +lemma chain_iterates: fixes f :: "'a set => 'a set" + assumes "mono f" shows "chain(\n. (f^^n) {})" +proof- + { fix n have "(f ^^ n) {} \ (f ^^ Suc n) {}" using assms + by(induction n) (auto simp: mono_def) } + thus ?thesis by(auto simp: chain_def) +qed + +theorem lfp_if_cont: + assumes "cont f" shows "lfp f = (UN n. (f^^n) {})" (is "_ = ?U") +proof + show "lfp f \ ?U" + proof (rule lfp_lowerbound) + have "f ?U = (UN n. (f^^Suc n){})" + using chain_iterates[OF mono_if_cont[OF assms]] assms + by(simp add: cont_def) + also have "\ = (f^^0){} \ \" by simp + also have "\ = ?U" + by(auto simp del: funpow.simps) (metis not0_implies_Suc) + finally show "f ?U \ ?U" by simp + qed +next + { fix n p assume "f p \ p" + have "(f^^n){} \ p" + proof(induction n) + case 0 show ?case by simp + next + case Suc + from monoD[OF mono_if_cont[OF assms] Suc] `f p \ p` + show ?case by simp + qed + } + thus "?U \ lfp f" by(auto simp: lfp_def) +qed + + +lemma single_valued_UN_chain: + assumes "chain S" "(!!n. single_valued (S n))" + shows "single_valued(UN n. S n)" +proof(auto simp: single_valued_def) + fix m n x y z assume "(x, y) \ S m" "(x, z) \ S n" + with chain_total[OF assms(1), of m n] assms(2) + show "y = z" by (auto simp: single_valued_def) +qed + +lemma single_valued_lfp: +fixes f :: "('a \ 'a) set => ('a \ 'a) set" +assumes "cont f" "\r. single_valued r \ single_valued (f r)" +shows "single_valued(lfp f)" +unfolding lfp_if_cont[OF assms(1)] +proof(rule single_valued_UN_chain[OF chain_iterates[OF mono_if_cont[OF assms(1)]]]) + fix n show "single_valued ((f ^^ n) {})" + by(induction n)(auto simp: assms(2)) +qed + + +text{* Third Step: Definition of the Monadic While *} +definition \ :: "['\ \ bool,('\ \ '\) set] \ (('\ \ '\) set \ ('\ \ '\) set)" +where "\ b cd = (\cw. {(s,t). if b s then (s, t) \ cd O cw else s = t})" + + +definition while_SE :: "['\ \ bool, (unit, '\)MON\<^sub>S\<^sub>E] \ (unit, '\)MON\<^sub>S\<^sub>E" +where "while_SE c B \ (Rel2Mon(lfp(\ c (Mon2Rel B))))" + +syntax (xsymbols) + "_while_SE" :: "['\ \ bool, (unit, '\)MON\<^sub>S\<^sub>E] \ (unit, '\)MON\<^sub>S\<^sub>E" + ("(while\<^sub>S\<^sub>E _ do _ od)" [8,8]8) +translations + "while\<^sub>S\<^sub>E c do b od" == "CONST while_SE c b" + +lemma cont_\: "cont (\ c b)" +by (auto simp: cont_def \_def) + +text{* The fixpoint theory now allows us to establish that the lfp constructed over + @{term Mon2Rel} remains deterministic *} + +theorem single_valued_lfp_Mon2Rel: "single_valued (lfp(\ c (Mon2Rel B)))" +apply(rule single_valued_lfp, simp_all add: cont_\) +apply(auto simp: \_def single_valued_def) +apply(metis single_valued_Mon2Rel[of "B"] single_valued_def) +done + + +lemma Rel2Mon_if: + "Rel2Mon {(s, t). if b s then (s, t) \ Mon2Rel c O lfp (\ b (Mon2Rel c)) else s = t} \ = + (if b \ then Rel2Mon (Mon2Rel c O lfp (\ b (Mon2Rel c))) \ else Some ((), \))" +by (simp add: Rel2Mon_def) + +lemma Rel2Mon_homomorphism: + assumes determ_X: "single_valued X" and determ_Y: "single_valued Y" + shows "Rel2Mon (X O Y) = (Rel2Mon X) ;- (Rel2Mon Y)" +proof - + have relational_partial_next_in_O: "!!x E F. (\y. (x, y) \ (E O F)) + \ (\y. (x, y) \ E)" by (auto) +show ?thesis +apply (simp add: Rel2Mon_def seq_SE_def bind_SE_def) +apply (rule ext) +apply (case_tac " \\'. (\, \') \ X O Y") +apply (simp only: HOL.if_True) +apply (frule relational_partial_next_in_O) +apply (auto) +apply (insert determ_X determ_Y) + +defer 1 + +apply (rule_tac x=z in exI) +apply (rule someI2) +apply (assumption)+ +apply (simp add: single_valued_def) +apply (metis) + +apply (erule contrapos_pp) +apply (simp) +apply (rule_tac x=\' in exI) + apply (subgoal_tac "(SOME \'. (\, \') \ X) = \''") + apply (auto) + apply (auto simp: single_valued_def) +apply (fold single_valued_def) + + apply (subgoal_tac "(SOME \'. (x, \') \ X) = y") + apply (simp) + apply (subgoal_tac "(SOME \'. (y, \') \ Y) = z") + apply (simp) + apply (subgoal_tac "(SOME \'. (x, \') \ X O Y) = z") + apply (simp) + apply (auto simp: single_valued_def) + (* TODO IMPORTANT *) + + apply (rule someI2) + apply (assumption) + apply (rule someI2) + apply (auto) +oops +(* qed *) + +text{* Putting everything together, the theory of embedding and the invariance of + determinism of the while-body, gives us the usual unfold-theorem: *} +theorem "(while\<^sub>S\<^sub>E b do c od) = (if\<^sub>S\<^sub>E b then (c ;- (while\<^sub>S\<^sub>E b do c od)) else return () fi)" +apply (simp add: if_SE_def seq_SE_def while_SE_def unit_SE_def) +apply (subst lfp_unfold [OF mono_if_cont, OF cont_\]) +apply (rule ext) +apply (subst \_def) +apply (auto simp: Rel2Mon_if Rel2Mon_homomorphism seq_SE_def Rel2Mon_Id [simplified comp_def] + single_valued_Mon2Rel single_valued_lfp_Mon2Rel ) +done + + + +subsection{* Multi-binds *} + +text{* In order to express test-sequences also on the object-level and +to make our theory amenable to formal reasoning over test-sequences, +we represent them as lists of input and generalize the bind-operator +of the state-exception monad accordingly. The approach is straightforward, +but comes with a price: we have to encapsulate all input and output data +into one type. Assume that we have a typed interface to a module with +the operations $op_1$, $op_2$, \ldots, $op_n$ with the inputs +$\iota_1$, $\iota_2$, \ldots, $\iota_n$ (outputs are treated analogously). +Then we can encode for this interface the general input - type: +\begin{displaymath} +\texttt{datatype}\ \texttt{in}\ =\ op_1\ ::\ \iota_1\ |\ ...\ |\ \iota_n +\end{displaymath} +Obviously, we loose some type-safety in this approach; we have to express +that in traces only \emph{corresponding} input and output belonging to the +same operation will occur; this form of side-conditions have to be expressed +inside \HOL. From the user perspective, this will not make much difference, +since junk-data resulting from too weak typing can be ruled out by adopted +front-ends. +*} + +text{* Note that the subsequent notion of a test-sequence allows the io stepping +function (and the special case of a program under test) to stop execution +\emph{within} the sequence; such premature terminations are characterized by an +output list which is shorter than the input list. *} + +fun mbind :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" +where "mbind [] iostep \ = Some([], \)" | + "mbind (a#H) iostep \ = + (case iostep a \ of + None \ Some([], \) + | Some (out, \') \ (case mbind H iostep \' of + None \ Some([out],\') + | Some(outs,\'') \ Some(out#outs,\'')))" + +notation mbind ("mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e") (* future name: mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e *) + +text{* This definition is fail-safe; in case of an exception, the current state is maintained, + the computation as a whole is marked as success. + Compare to the fail-strict variant @{text "mbind'"}: *} + +lemma mbind_unit [simp]: + "mbind [] f = (return [])" + by(rule ext, simp add: unit_SE_def) + +text{* The characteristic property of @{term mbind} --- which distinguishes it from + @{text mbind'} defined in the sequel --- is that it never fails; it ``swallows'' internal + errors occuring during the computation. *} +lemma mbind_nofailure [simp]: + "mbind S f \ \ None" + apply(rule_tac x=\ in spec) + apply(induct S, auto simp:unit_SE_def) + apply(case_tac "f a x", auto) + apply(erule_tac x="b" in allE) + apply(erule exE, erule exE, simp) + done + + +fun mbind' :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" +where "mbind' [] iostep \ = Some([], \)" | + "mbind' (a#S) iostep \ = + (case iostep a \ of + None \ None + | Some (out, \') \ (case mbind' S iostep \' of + None \ None (* fail-strict *) + | Some(outs,\'') \ Some(out#outs,\'')))" +notation mbind' ("mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p") (* future name: mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p *) + +lemma mbind'_unit [simp]: + "mbind' [] f = (return [])" + by(rule ext, simp add: unit_SE_def) + +lemma mbind'_bind [simp]: + "(x \ mbind' (a#S) F; M x) = (a \ (F a); (x \ mbind' S F; M (a # x)))" + by(rule ext, rename_tac "z",simp add: bind_SE_def split: option.split) + +declare mbind'.simps[simp del] (* use only more abstract definitions *) + + +fun mbind'' :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" +where "mbind'' [] iostep \ = Some([], \)" | + "mbind'' (a#S) iostep \ = + (case iostep a \ of + None \ mbind'' S iostep \ + | Some (out, \') \ (case mbind'' S iostep \' of + None \ None (* does not occur *) + | Some(outs,\'') \ Some(out#outs,\'')))" + +notation mbind'' ("mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e") (* future name: mbind\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e\<^sub>F\<^sub>a\<^sub>i\<^sub>l *) +declare mbind''.simps[simp del] (* use only more abstract definitions *) + + +text{* mbind' as failure strict operator can be seen as a foldr on bind - + if the types would match \ldots *} + +definition try_SE :: "('o,'\) MON\<^sub>S\<^sub>E \ ('o option,'\) MON\<^sub>S\<^sub>E" ("try\<^sub>S\<^sub>E") +where "try\<^sub>S\<^sub>E ioprog = (\\. case ioprog \ of + None \ Some(None, \) + | Some(outs, \') \ Some(Some outs, \'))" +text{* In contrast, mbind as a failure safe operator can roughly be seen + as a foldr on bind - try: + m1 ; try m2 ; try m3; ... Note, that the rough equivalence only holds for + certain predicates in the sequence - length equivalence modulo None, + for example. However, if a conditional is added, the equivalence + can be made precise: *} + +lemma mbind_try: + "(x \ mbind (a#S) F; M x) = + (a' \ try\<^sub>S\<^sub>E(F a); + if a' = None + then (M []) + else (x \ mbind S F; M (the a' # x)))" +apply(rule ext) +apply(simp add: bind_SE_def try_SE_def) +apply(case_tac "F a x", auto) +apply(simp add: bind_SE_def try_SE_def) +apply(case_tac "mbind S F b", auto) +done + +text{* On this basis, a symbolic evaluation scheme can be established + that reduces mbind-code to try\_SE\_code and ite-cascades. *} + +definition alt_SE :: "[('o, '\)MON\<^sub>S\<^sub>E, ('o, '\)MON\<^sub>S\<^sub>E] \ ('o, '\)MON\<^sub>S\<^sub>E" (infixl "\\<^sub>S\<^sub>E" 10) +where "(f \\<^sub>S\<^sub>E g) = (\ \. case f \ of None \ g \ + | Some H \ Some H)" + +definition malt_SE :: "('o, '\)MON\<^sub>S\<^sub>E list \ ('o, '\)MON\<^sub>S\<^sub>E" +where "malt_SE S = foldr alt_SE S fail\<^sub>S\<^sub>E" +notation malt_SE ("\\<^sub>S\<^sub>E") + +lemma malt_SE_mt [simp]: "\\<^sub>S\<^sub>E [] = fail\<^sub>S\<^sub>E" +by(simp add: malt_SE_def) + +lemma malt_SE_cons [simp]: "\\<^sub>S\<^sub>E (a # S) = (a \\<^sub>S\<^sub>E (\\<^sub>S\<^sub>E S))" +by(simp add: malt_SE_def) + + +subsection{* State Backtrack Monads *} +text{*This subsection is still rudimentary and as such an interesting +formal analogue to the previous monad definitions. It is doubtful that it is +interesting for testing and as a cmputational stucture at all. +Clearly more relevant is ``sequence'' instead of ``set,'' which would +rephrase Isabelle's internal tactic concept. *} + +type_synonym ('o, '\) MON\<^sub>S\<^sub>B = "'\ \ ('o \ '\) set" + +definition bind_SB :: "('o, '\)MON\<^sub>S\<^sub>B \ ('o \ ('o', '\)MON\<^sub>S\<^sub>B) \ ('o', '\)MON\<^sub>S\<^sub>B" +where "bind_SB f g \ = \ ((\(out, \). (g out \)) ` (f \))" +notation bind_SB ("bind\<^sub>S\<^sub>B") + +definition unit_SB :: "'o \ ('o, '\)MON\<^sub>S\<^sub>B" ("(returns _)" 8) +where "unit_SB e = (\\. {(e,\)})" +notation unit_SB ("unit\<^sub>S\<^sub>B") + +syntax (xsymbols) + "_bind_SB" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>B,('o','\)MON\<^sub>S\<^sub>B] \ ('o','\)MON\<^sub>S\<^sub>B" + ("(2 _ := _; _)" [5,8,8]8) +translations + "x := f; g" == "CONST bind_SB f (% x . g)" + + + +lemma bind_left_unit_SB : "(x := returns a; m x) = m a" + by (rule ext,simp add: unit_SB_def bind_SB_def) + +lemma bind_right_unit_SB: "(x := m; returns x) = m" + by (rule ext, simp add: unit_SB_def bind_SB_def) + + +lemma bind_assoc_SB: "(y := (x := m; k x); h y) = (x := m; (y := k x; h y))" + by (rule ext, simp add: unit_SB_def bind_SB_def split_def) + + +subsection{* State Backtrack Exception Monad (vulgo: Boogie-PL) *} +text{* The following combination of the previous two Monad-Constructions +allows for the semantic foundation of a simple generic assertion language +in the style of Schirmers Simpl-Language or Rustan Leino's Boogie-PL language. +The key is to use the exceptional element None for violations of +the assert-statement. *} +type_synonym ('o, '\) MON\<^sub>S\<^sub>B\<^sub>E = "'\ \ (('o \ '\) set) option" + + +definition bind_SBE :: "('o,'\)MON\<^sub>S\<^sub>B\<^sub>E \ ('o \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E) \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E" +where "bind_SBE f g = (\\. case f \ of None \ None + | Some S \ (let S' = (\(out, \'). g out \') ` S + in if None \ S' then None + else Some(\ (the ` S'))))" + +syntax (xsymbols) + "_bind_SBE" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>B\<^sub>E,('o','\)MON\<^sub>S\<^sub>B\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E" + ("(2 _ :\ _; _)" [5,8,8]8) +translations + "x :\ f; g" == "CONST bind_SBE f (% x . g)" + +definition unit_SBE :: "'o \ ('o, '\)MON\<^sub>S\<^sub>B\<^sub>E" ("(returning _)" 8) +where "unit_SBE e = (\\. Some({(e,\)}))" + +definition assert_SBE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" +where "assert_SBE e = (\\. if e \ then Some({((),\)}) + else None)" +notation assert_SBE ("assert\<^sub>S\<^sub>B\<^sub>E") + +definition assume_SBE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" +where "assume_SBE e = (\\. if e \ then Some({((),\)}) + else Some {})" +notation assume_SBE ("assume\<^sub>S\<^sub>B\<^sub>E") + + +definition havoc_SBE :: " (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" +where "havoc_SBE = (\\. Some({x. True}))" +notation havoc_SBE ("havoc\<^sub>S\<^sub>B\<^sub>E") + + +lemma bind_left_unit_SBE : "(x :\ returning a; m x) = m a" + apply (rule ext,simp add: unit_SBE_def bind_SBE_def) + apply (case_tac "m a x",auto) + done + +lemma bind_right_unit_SBE: "(x :\ m; returning x) = m" + apply (rule ext, simp add: unit_SBE_def bind_SBE_def) + apply (case_tac "m x", simp_all add:Let_def) + apply (rule HOL.ccontr, simp add: Set.image_iff) + done + + +lemmas aux = trans[OF HOL.neq_commute,OF Option.not_None_eq] + +lemma bind_assoc_SBE: "(y :\ (x :\ m; k); h y) = (x :\ m; (y :\ k; h y))" +proof (rule ext, rename_tac z, simp add: unit_SBE_def bind_SBE_def, + case_tac "m z", simp_all add: Let_def Set.image_iff, safe) + case goal1 then show ?case + by(rule_tac x="(a, b)" in bexI, simp_all) +next + case goal2 then show ?case + apply(rule_tac x="(aa,b)" in bexI, simp_all add:split_def) + apply(erule_tac x="(aa,b)" in ballE) + apply(auto simp: aux image_def split_def intro!: rev_bexI) + done +next + case goal3 then show ?case + by(rule_tac x="(a, b)" in bexI, simp_all) +next + case goal4 then show ?case + apply(erule_tac Q="None = ?X" in contrapos_pp) + apply(erule_tac x="(aa,b)" and P="\ x. None \ split (\out. k) x" in ballE) + apply(auto simp: aux Option.not_None_eq image_def split_def intro!: rev_bexI) + done +next + case goal5 then show ?case + apply simp apply((erule_tac x="(ab,ba)" in ballE)+) + apply(simp_all add: aux Option.not_None_eq, (erule exE)+, simp add:split_def) + apply(erule rev_bexI,case_tac "None\(\p. h (fst p) (snd p))`y",auto simp:split_def) + done + +next + case goal6 then show ?case + apply simp apply((erule_tac x="(a,b)" in ballE)+) + apply(simp_all add: aux Option.not_None_eq, (erule exE)+, simp add:split_def) + apply(erule rev_bexI, case_tac "None\(\p. h(fst p)(snd p))`y",auto simp:split_def) + done +qed + + + +(* TODO: IF THEN ELSE and WHILE + Monadic Laws + Operational Rules. *) + + + +section{* Valid Execution Sequences in the State Exception Monad *} +text{* This is still an unstructured merge of executable monad concepts +and specification oriented high-level properties initiating test procedures. *} + +definition valid_SE :: "'\ \ (bool,'\) MON\<^sub>S\<^sub>E \ bool" (infix "\" 15) +where "(\ \ m) = (m \ \ None \ fst(the (m \)))" +text{* This notation consideres failures as valid -- a definition +inspired by I/O conformance. BUG: It is not possible to define +this concept once and for all in a Hindley-Milner type-system. +For the moment, we present it only for the state-exception +monad, although for the same definition, this notion is applicable +to other monads as well. *} + + +lemma exec_unit_SE [simp]: "(\ \ (return P)) = (P)" +by(auto simp: valid_SE_def unit_SE_def) + +lemma exec_unit_SE' [simp]: "(\\<^sub>0 \ (\\. Some (f \, \))) = (f \\<^sub>0)" +by(simp add: valid_SE_def ) + +lemma exec_fail_SE [simp]: "(\ \ fail\<^sub>S\<^sub>E) = False" +by(auto simp: valid_SE_def fail_SE_def) + + +lemma exec_fail_SE'[simp]: "\(\\<^sub>0 \ (\\. None))" +by(simp add: valid_SE_def ) + +lemma exec_bind_SE_failure: +"A \ = None \ \(\ \ ((s \ A ; M s)))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def) + +lemma exec_bind_SE_success: +"A \ = Some(b,\') \ (\ \ ((s \ A ; M s))) = (\' \ (M b))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def ) + +lemma exec_bind_SE_success': (* atomic boolean Monad "Query Functions" *) +"M \ = Some(f \,\) \ (\ \ M) = f \" +by(simp add: valid_SE_def unit_SE_def bind_SE_def ) + + + +lemma exec_bind_SE_success'': +"\ \ ((s \ A ; M s)) \ \ v \'. the(A \) = (v,\') \ \' \ (M v)" +apply(auto simp: valid_SE_def unit_SE_def bind_SE_def) +apply(cases "A \", simp_all) +apply(simp add: Product_Type.prod_case_unfold) +apply(drule_tac x="A \" and f=the in arg_cong, simp) +apply(rule_tac x="fst aa" in exI) +apply(rule_tac x="snd aa" in exI, auto) +done + + +lemma exec_bind_SE_success''': +"\ \ ((s \ A ; M s)) \ \ a. (A \) = Some a \ (snd a) \ (M (fst a))" +apply(auto simp: valid_SE_def unit_SE_def bind_SE_def) +apply(cases "A \", simp_all) +apply(simp_all add: Product_Type.prod_case_unfold + split: prod.splits) +apply(drule_tac x="A \" and f=the in arg_cong, simp) +apply(rule_tac x="fst aa" in exI) +apply(rule_tac x="snd aa" in exI, auto) +done + + +lemma exec_bind_SE_success'''' : +"\ \ ((s \ A ; M s)) \ \ v \'. A \ = Some(v,\') \ \' \ (M v)" +apply(auto simp: valid_SE_def unit_SE_def bind_SE_def) +apply(cases "A \", simp_all) +apply(simp add: Product_Type.prod_case_unfold) +apply(drule_tac x="A \" and f=the in arg_cong, simp) +apply(rule_tac x="fst aa" in exI) +apply(rule_tac x="snd aa" in exI, auto) +done + + +text{* Recall \verb+mbind_unit+ for the base case. *} + +lemma exec_mbindFSave_failure: +"ioprog a \ = None \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; M s)) = (\ \ (M []))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def) + +lemma exec_mbindFStop_failure: +"ioprog a \ = None \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (a#S) ioprog ; M s)) = (False)" +by(simp add: exec_bind_SE_failure) + +lemma exec_mbindFPurge_failure: +"ioprog a \ = None \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (a#S) ioprog ; M s)) = (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (S) ioprog ; M s))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def mbind''.simps) + + +lemma exec_mbindFSave_success : +"ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; M s)) = + (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog ; M (b#s)))" +unfolding valid_SE_def unit_SE_def bind_SE_def +by(cases "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog \'", auto) + +lemma exec_mbindFStop_success : +"ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (a#S) ioprog ; M s)) = + (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S ioprog ; M (b#s)))" +unfolding valid_SE_def unit_SE_def bind_SE_def +by(cases "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S ioprog \'", auto simp: mbind'.simps) + +lemma exec_mbindFPurge_success : +"ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (a#S) ioprog ; M s)) = + (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog ; M (b#s)))" +unfolding valid_SE_def unit_SE_def bind_SE_def +by(cases "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog \'", auto simp: mbind''.simps) + +lemma exec_mbindFSave: +"(\ \ (s \ mbind (a#S) ioprog ; return (P s))) = + (case ioprog a \ of + None \ (\ \ (return (P []))) + | Some(b,\') \ (\' \ (s \ mbind S ioprog ; return (P (b#s)))))" +apply(case_tac "ioprog a \") +apply(auto simp: exec_mbindFSave_failure exec_mbindFSave_success split: prod.splits) +apply(subst (asm) exec_mbindFSave_success,assumption,simp_all) +by(subst exec_mbindFSave_success,assumption,simp)+ + + +text{* Universal splitting and symbolic execution rule *} +lemma exec_mbindFSave_E: +assumes seq : "(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; (P s)))" + and none: "ioprog a \ = None \ (\ \ (P [])) \ Q" + and some: "\ b \'. ioprog a \ = Some(b,\') \ (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog;(P (b#s)))) \ Q " +shows "Q" +using seq +proof(cases "ioprog a \") + case None assume ass:"ioprog a \ = None" show "Q" + apply(rule none[OF ass]) + apply(insert ass, erule_tac ioprog1=ioprog in exec_mbindFSave_failure[THEN iffD1],rule seq) + done +next + case (Some aa) assume ass:"ioprog a \ = Some aa" show "Q" + apply(insert ass,cases "aa",simp, rename_tac "out" "\'") + apply(erule some) + apply(insert ass,simp) + apply(erule_tac ioprog1=ioprog in exec_mbindFSave_success[THEN iffD1],rule seq) + done +qed + +text{* The next rule reveals the particular interest in deduction; + as an elimination rule, it allows for a linear conversion of a validity judgement + @{term "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p"} over an input list @{term "S"} into a constraint system; without any + branching ... Symbolic execution can even be stopped tactically whenever + @{term "ioprog a \ = Some(b,\')"} comes to a contradiction. *} +lemma exec_mbindFStop_E: +assumes seq : "(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (a#S) ioprog ; (P s)))" + and some: "\b \'. ioprog a \ = Some(b,\') \ (\'\ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S ioprog;(P(b#s)))) \ Q" +shows "Q" +using seq +proof(cases "ioprog a \") + case None assume ass:"ioprog a \ = None" show "Q" + apply(insert ass seq) + apply(drule_tac \=\ and S=S and M=P in exec_mbindFStop_failure, simp) + done +next + case (Some aa) assume ass:"ioprog a \ = Some aa" show "Q" + apply(insert ass,cases "aa",simp, rename_tac "out" "\'") + apply(erule some) + apply(insert ass,simp) + apply(erule_tac ioprog1=ioprog in exec_mbindFStop_success[THEN iffD1],rule seq) + done +qed + + +lemma exec_mbindFPurge_E: +assumes seq : "(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (a#S) ioprog ; (P s)))" + and none: "ioprog a \ = None \ (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog;(P (s)))) \ Q" + and some: "\ b \'. ioprog a \ = Some(b,\') \ (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog;(P (b#s)))) \ Q " +shows "Q" +using seq +proof(cases "ioprog a \") + case None assume ass:"ioprog a \ = None" show "Q" + apply(rule none[OF ass]) + apply(insert ass, erule_tac ioprog1=ioprog in exec_mbindFPurge_failure[THEN iffD1],rule seq) + done +next + case (Some aa) assume ass:"ioprog a \ = Some aa" show "Q" + apply(insert ass,cases "aa",simp, rename_tac "out" "\'") + apply(erule some) + apply(insert ass,simp) + apply(erule_tac ioprog1=ioprog in exec_mbindFPurge_success[THEN iffD1],rule seq) + done +qed + + +lemma assert_disch1 :" P \ \ (\ \ (x \ assert\<^sub>S\<^sub>E P; M x)) = (\ \ (M True))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def) + +lemma assert_disch2 :" \ P \ \ \ (\ \ (x \ assert\<^sub>S\<^sub>E P ; M s))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def) + +lemma assert_disch3 :" \ P \ \ \ (\ \ (assert\<^sub>S\<^sub>E P))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def) + +lemma assert_D : "(\ \ (x \ assert\<^sub>S\<^sub>E P; M x)) \ P \ \ (\ \ (M True))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def split: HOL.split_if_asm) + +lemma assume_D : "(\ \ (x \ assume\<^sub>S\<^sub>E P; M x)) \ \ \. (P \ \ \ \ (M ()))" +apply(auto simp: bind_SE_def assume_SE_def valid_SE_def split: HOL.split_if_asm) +apply(rule_tac x="Eps P" in exI, auto) +apply(rule_tac x="True" in exI, rule_tac x="b" in exI) +apply(subst Hilbert_Choice.someI,assumption,simp) +apply(subst Hilbert_Choice.someI,assumption,simp) +done +text{* These two rule prove that the SE Monad in connection with the notion of valid sequence +is actually sufficient for a representation of a Boogie-like language. The SBE monad with explicit +sets of states --- to be shown below --- is strictly speaking not necessary (and will therefore +be discontinued in the development). *} + +term "if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi" + +lemma if_SE_D1 : "P \ \ (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = (\ \ B\<^sub>1)" +by(auto simp: if_SE_def valid_SE_def) + +lemma if_SE_D2 : "\ P \ \ (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = (\ \ B\<^sub>2)" +by(auto simp: if_SE_def valid_SE_def) + +lemma if_SE_split_asm : " (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = ((P \ \ (\ \ B\<^sub>1)) \ (\ P \ \ (\ \ B\<^sub>2)))" +by(cases "P \",auto simp: if_SE_D1 if_SE_D2) + +lemma if_SE_split : " (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = ((P \ \ (\ \ B\<^sub>1)) \ (\ P \ \ (\ \ B\<^sub>2)))" +by(cases "P \", auto simp: if_SE_D1 if_SE_D2) + + +lemma if_SE_execE: + assumes A: "\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)" + and B: "P \ \ \ \ B\<^sub>1 \ Q" + and C: "\ P \\ \ \ B\<^sub>2 \ Q" + shows "Q" +by(insert A [simplified if_SE_split],cases "P \", simp_all, auto elim: B C) + + +lemma [code]: + "(\ \ m) = (case (m \) of None \ False | (Some (x,y)) \ x)" + apply(simp add: valid_SE_def) + apply(cases "m \ = None", simp_all) + apply(insert not_None_eq, auto) +done + + +text{* Test-Refinements will be stated in terms of the failsave @{term mbind}, opting + more generality. The following lemma allows for an optimization both in + test execution as well as in symbolic execution for an important special case of + the post-codition: Whenever the latter has the constraint that the length of input and + output sequence equal each other (that is to say: no failure occured), failsave mbind + can be reduced to failstop mbind ... *} +lemma mbindFSave_vs_mbindFStop : + "(\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s ioprog); return(length \s = length os \ P \s os))) = + (\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s ioprog); return(P \s os)))" + apply(rule_tac x=P in spec) + apply(rule_tac x=\ in spec) + proof(induct "\s") + case Nil show ?case by(simp_all add: mbind_try try_SE_def del: Monads.mbind.simps) + case (Cons a \s) show ?case + apply(rule allI, rename_tac "\",rule allI, rename_tac "P") + apply(insert Cons.hyps) + apply(case_tac "ioprog a \") + apply(simp only: exec_mbindFSave_failure exec_mbindFStop_failure, simp) + apply(simp add: split_paired_all del: Monads.mbind.simps ) + apply(rename_tac "\'") + apply(subst exec_mbindFSave_success, assumption) + apply(subst (2) exec_bind_SE_success, assumption) + apply(erule_tac x="\'" in allE) + apply(erule_tac x="\\s s. P (a # \s) (aa # s)" in allE) (* heureka ! *) + apply(simp) + done + qed + + +lemma mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e_vs_mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p: +assumes A: "\ \ \. ioprog \ \ \ None" +shows "(\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s ioprog); P os)) = + (\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s ioprog); P os))" +proof(induct "\s") print_cases + case Nil show ?case by simp +next + case (Cons a \s) + from Cons.hyps + have B:"\ S f \. mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S f \ \ None " by simp + have C:"\\. mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s ioprog \ = mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s ioprog \" + apply(induct \s, simp) + apply(rule allI,rename_tac "\") + apply(simp add: Monads.mbind'.simps(2)) + apply(insert A, erule_tac x="a" in allE) + apply(erule_tac x="\" and P="\\ . ioprog a \ \ None" in allE) + apply(auto split:option.split) + done + show ?case + apply(insert A,erule_tac x="a" in allE,erule_tac x="\" in allE) + apply(simp, elim exE) + apply(rename_tac "out" "\'") + apply(insert B, erule_tac x=\s in allE, erule_tac x=ioprog in allE, erule_tac x=\' in allE) + apply(subst(asm) not_None_eq, elim exE) + apply(subst Monads.exec_bind_SE_success) + apply(simp split: option.split, auto) + apply(rule_tac s="(\ a b c. a # (fst c)) out \' (aa, b)" in trans, simp,rule refl) + apply(rule_tac s="(\ a b c. (snd c)) out \' (aa, b)" in trans, simp,rule refl) + apply(simp_all) + apply(subst Monads.exec_bind_SE_success, assumption) + apply(subst Monads.exec_bind_SE_success) + apply(rule_tac s="Some (aa, b)" in trans,simp_all add:C) + apply(subst(asm) Monads.exec_bind_SE_success, assumption) + apply(subst(asm) Monads.exec_bind_SE_success) + apply(rule_tac s="Some (aa, b)" in trans,simp_all add:C) + done +qed + + +section{* Valid Test Sequences in the State Exception Backtrack Monad *} +text{* This is still an unstructured merge of executable monad concepts +and specification oriented high-level properties initiating test procedures. *} + +definition valid_SBE :: "'\ \ ('a,'\) MON\<^sub>S\<^sub>B\<^sub>E \ bool" (infix "\\<^sub>S\<^sub>B\<^sub>E" 15) +where "\ \\<^sub>S\<^sub>B\<^sub>E m \ (m \ \ None)" +text{* This notation consideres all non-failures as valid. *} + + +lemma assume_assert: "(\ \\<^sub>S\<^sub>B\<^sub>E ( _ :\ assume\<^sub>S\<^sub>B\<^sub>E P ; assert\<^sub>S\<^sub>B\<^sub>E Q)) = (P \ \ Q \)" + by(simp add: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) + +lemma assert_intro: "Q \ \ \ \\<^sub>S\<^sub>B\<^sub>E (assert\<^sub>S\<^sub>B\<^sub>E Q)" + by(simp add: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) + +lemma assume_dest: + "\ \ \\<^sub>S\<^sub>B\<^sub>E (x :\ assume\<^sub>S\<^sub>B\<^sub>E Q; M x); Q \' \ \ \ \\<^sub>S\<^sub>B\<^sub>E M ()" + apply(auto simp: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) + apply(cases "Q \",simp_all) + oops + +text{* This still needs work. What would be needed is a kind + of wp - calculus that comes out of that. So far: nope. *} + +subsection{* Legacy Bindings *} + + +lemma valid_true[simp]: (* legacy: special case *) + "(\ \ (s \ return x ; return (P s))) = P x" by simp + + +(* +lemmas valid_both = exec_mbindFSave (* legacy *) +lemmas valid_success = exec_mbindFSave_success (* legacy *) +lemmas valid_success'' = exec_mbindFSave_success(* legacy *) +lemmas valid_success' = exec_bind_SE_success (* legacy *) +lemmas valid_failure = exec_mbindFSave_failure (* legacy : *) +lemmas valid_failure' = exec_bind_SE_failure (* legacy *) +lemmas valid_failure''=valid_failure (* legacy : *) +lemmas valid_failure''' = exec_mbindFStop_failure (* legacy : *) +lemmas valid_propagate_fail = exec_fail_SE (* legacy *) +lemmas valid_propagate_fail' = exec_fail_SE' (* legacy *) +lemmas valid_propoagate_3' = valid_propagate_fail' (* legacy *) +lemmas valid_propagate_2 = exec_bind_SE_success''(* legacy *) +lemmas valid_propagate_1 = exec_unit_SE (* legacy *) +lemmas valid_successElem = exec_bind_SE_success' (* legacy *) +lemmas valid_propagate_2' = exec_bind_SE_success'''(* legacy *) +lemmas valid_propagate_2'' = exec_bind_SE_success'''' (* legacy *) +lemmas valid_propoagate_3 = exec_unit_SE' (* legacy *) + *) +(* legacy ?: +lemma valid_success'': +"ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind (a#S) ioprog ; return (P s))) = + (\' \ (s \ mbind S ioprog ; return (P (b#s))))" +unfolding valid_SE_def unit_SE_def bind_SE_def +by(cases "mbind S ioprog \'", auto) +*) + +end diff --git a/src/test-gen/src/test/Testing.thy b/src/test-gen/src/test/Testing.thy new file mode 100644 index 0000000..94c6ff0 --- /dev/null +++ b/src/test-gen/src/test/Testing.thy @@ -0,0 +1,57 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * Testing.thy --- wiring everything together. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007 ETH Zurich, Switzerland + * 2008-2015 Achim D. Brucker, Germany + * 2009-2016 Université Paris-Sud, France + * 2016 The University of Sheffield, UK + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Summing up *} + +theory + Testing +imports + TestGen + TestLib + TestScript + "codegen_fsharp/code_fsharp" +begin + + +end From 6712473df1b6b8abb5befc861d8ee7c5fd4e0b79 Mon Sep 17 00:00:00 2001 From: Peyman Derafshkavian Date: Sat, 5 Feb 2022 15:26:53 -0500 Subject: [PATCH 2/8] Old SMT added (cherry picked from commit 4105105ee7363eaaa1e58b0496c21e189e3a3496) --- src/test-gen/Old_SMT/old_smt_builtin.ML | 231 +++++ src/test-gen/Old_SMT/old_smt_config.ML | 254 +++++ src/test-gen/Old_SMT/old_smt_datatypes.ML | 94 ++ src/test-gen/Old_SMT/old_smt_failure.ML | 61 ++ src/test-gen/Old_SMT/old_smt_normalize.ML | 652 +++++++++++++ src/test-gen/Old_SMT/old_smt_real.ML | 134 +++ src/test-gen/Old_SMT/old_smt_setup_solvers.ML | 189 ++++ src/test-gen/Old_SMT/old_smt_solver.ML | 374 ++++++++ src/test-gen/Old_SMT/old_smt_translate.ML | 589 ++++++++++++ src/test-gen/Old_SMT/old_smt_utils.ML | 221 +++++ src/test-gen/Old_SMT/old_smt_word.ML | 146 +++ src/test-gen/Old_SMT/old_smtlib_interface.ML | 161 ++++ src/test-gen/Old_SMT/old_z3_interface.ML | 239 +++++ src/test-gen/Old_SMT/old_z3_model.ML | 337 +++++++ src/test-gen/Old_SMT/old_z3_proof_literals.ML | 363 +++++++ src/test-gen/Old_SMT/old_z3_proof_methods.ML | 149 +++ src/test-gen/Old_SMT/old_z3_proof_parser.ML | 446 +++++++++ .../Old_SMT/old_z3_proof_reconstruction.ML | 891 ++++++++++++++++++ src/test-gen/Old_SMT/old_z3_proof_tools.ML | 374 ++++++++ 19 files changed, 5905 insertions(+) create mode 100644 src/test-gen/Old_SMT/old_smt_builtin.ML create mode 100644 src/test-gen/Old_SMT/old_smt_config.ML create mode 100644 src/test-gen/Old_SMT/old_smt_datatypes.ML create mode 100644 src/test-gen/Old_SMT/old_smt_failure.ML create mode 100644 src/test-gen/Old_SMT/old_smt_normalize.ML create mode 100644 src/test-gen/Old_SMT/old_smt_real.ML create mode 100644 src/test-gen/Old_SMT/old_smt_setup_solvers.ML create mode 100644 src/test-gen/Old_SMT/old_smt_solver.ML create mode 100644 src/test-gen/Old_SMT/old_smt_translate.ML create mode 100644 src/test-gen/Old_SMT/old_smt_utils.ML create mode 100644 src/test-gen/Old_SMT/old_smt_word.ML create mode 100644 src/test-gen/Old_SMT/old_smtlib_interface.ML create mode 100644 src/test-gen/Old_SMT/old_z3_interface.ML create mode 100644 src/test-gen/Old_SMT/old_z3_model.ML create mode 100644 src/test-gen/Old_SMT/old_z3_proof_literals.ML create mode 100644 src/test-gen/Old_SMT/old_z3_proof_methods.ML create mode 100644 src/test-gen/Old_SMT/old_z3_proof_parser.ML create mode 100644 src/test-gen/Old_SMT/old_z3_proof_reconstruction.ML create mode 100644 src/test-gen/Old_SMT/old_z3_proof_tools.ML diff --git a/src/test-gen/Old_SMT/old_smt_builtin.ML b/src/test-gen/Old_SMT/old_smt_builtin.ML new file mode 100644 index 0000000..e492ed4 --- /dev/null +++ b/src/test-gen/Old_SMT/old_smt_builtin.ML @@ -0,0 +1,231 @@ +(* Title: HOL/Library/Old_SMT/old_smt_builtin.ML + Author: Sascha Boehme, TU Muenchen + +Tables of types and terms directly supported by SMT solvers. +*) + +signature OLD_SMT_BUILTIN = +sig + (*for experiments*) + val filter_builtins: (typ -> bool) -> Proof.context -> Proof.context + + (*built-in types*) + val add_builtin_typ: Old_SMT_Utils.class -> + typ * (typ -> string option) * (typ -> int -> string option) -> + Context.generic -> Context.generic + val add_builtin_typ_ext: typ * (typ -> bool) -> Context.generic -> + Context.generic + val dest_builtin_typ: Proof.context -> typ -> string option + val is_builtin_typ_ext: Proof.context -> typ -> bool + + (*built-in numbers*) + val dest_builtin_num: Proof.context -> term -> (string * typ) option + val is_builtin_num: Proof.context -> term -> bool + val is_builtin_num_ext: Proof.context -> term -> bool + + (*built-in functions*) + type 'a bfun = Proof.context -> typ -> term list -> 'a + type bfunr = string * int * term list * (term list -> term) + val add_builtin_fun: Old_SMT_Utils.class -> + (string * typ) * bfunr option bfun -> Context.generic -> Context.generic + val add_builtin_fun': Old_SMT_Utils.class -> term * string -> Context.generic -> + Context.generic + val add_builtin_fun_ext: (string * typ) * term list bfun -> + Context.generic -> Context.generic + val add_builtin_fun_ext': string * typ -> Context.generic -> Context.generic + val add_builtin_fun_ext'': string -> Context.generic -> Context.generic + val dest_builtin_fun: Proof.context -> string * typ -> term list -> + bfunr option + val dest_builtin_eq: Proof.context -> term -> term -> bfunr option + val dest_builtin_pred: Proof.context -> string * typ -> term list -> + bfunr option + val dest_builtin_conn: Proof.context -> string * typ -> term list -> + bfunr option + val dest_builtin: Proof.context -> string * typ -> term list -> bfunr option + val dest_builtin_ext: Proof.context -> string * typ -> term list -> + term list option + val is_builtin_fun: Proof.context -> string * typ -> term list -> bool + val is_builtin_fun_ext: Proof.context -> string * typ -> term list -> bool +end + +structure Old_SMT_Builtin: OLD_SMT_BUILTIN = +struct + + +(* built-in tables *) + +datatype ('a, 'b) kind = Ext of 'a | Int of 'b + +type ('a, 'b) ttab = ((typ * ('a, 'b) kind) Ord_List.T) Old_SMT_Utils.dict + +fun typ_ord ((T, _), (U, _)) = + let + fun tord (TVar _, Type _) = GREATER + | tord (Type _, TVar _) = LESS + | tord (Type (n, Ts), Type (m, Us)) = + if n = m then list_ord tord (Ts, Us) + else Term_Ord.typ_ord (T, U) + | tord TU = Term_Ord.typ_ord TU + in tord (T, U) end + +fun insert_ttab cs T f = + Old_SMT_Utils.dict_map_default (cs, []) + (Ord_List.insert typ_ord (perhaps (try Logic.varifyT_global) T, f)) + +fun merge_ttab ttabp = + Old_SMT_Utils.dict_merge (Ord_List.merge typ_ord) ttabp + +fun lookup_ttab ctxt ttab T = + let fun match (U, _) = Sign.typ_instance (Proof_Context.theory_of ctxt) (T, U) + in + get_first (find_first match) + (Old_SMT_Utils.dict_lookup ttab (Old_SMT_Config.solver_class_of ctxt)) + end + +type ('a, 'b) btab = ('a, 'b) ttab Symtab.table + +fun insert_btab cs n T f = + Symtab.map_default (n, []) (insert_ttab cs T f) + +fun merge_btab btabp = Symtab.join (K merge_ttab) btabp + +fun lookup_btab ctxt btab (n, T) = + (case Symtab.lookup btab n of + NONE => NONE + | SOME ttab => lookup_ttab ctxt ttab T) + +type 'a bfun = Proof.context -> typ -> term list -> 'a + +type bfunr = string * int * term list * (term list -> term) + +structure Builtins = Generic_Data +( + type T = + (typ -> bool, (typ -> string option) * (typ -> int -> string option)) ttab * + (term list bfun, bfunr option bfun) btab + val empty = ([], Symtab.empty) + val extend = I + fun merge ((t1, b1), (t2, b2)) = (merge_ttab (t1, t2), merge_btab (b1, b2)) +) + +fun filter_ttab keep_T = map (apsnd (filter (keep_T o fst))) + +fun filter_builtins keep_T = + Context.proof_map (Builtins.map (fn (ttab, btab) => + (filter_ttab keep_T ttab, Symtab.map (K (filter_ttab keep_T)) btab))) + + +(* built-in types *) + +fun add_builtin_typ cs (T, f, g) = + Builtins.map (apfst (insert_ttab cs T (Int (f, g)))) + +fun add_builtin_typ_ext (T, f) = + Builtins.map (apfst (insert_ttab Old_SMT_Utils.basicC T (Ext f))) + +fun lookup_builtin_typ ctxt = + lookup_ttab ctxt (fst (Builtins.get (Context.Proof ctxt))) + +fun dest_builtin_typ ctxt T = + (case lookup_builtin_typ ctxt T of + SOME (_, Int (f, _)) => f T + | _ => NONE) + +fun is_builtin_typ_ext ctxt T = + (case lookup_builtin_typ ctxt T of + SOME (_, Int (f, _)) => is_some (f T) + | SOME (_, Ext f) => f T + | NONE => false) + + +(* built-in numbers *) + +fun dest_builtin_num ctxt t = + (case try HOLogic.dest_number t of + NONE => NONE + | SOME (T, i) => + if i < 0 then NONE else + (case lookup_builtin_typ ctxt T of + SOME (_, Int (_, g)) => g T i |> Option.map (rpair T) + | _ => NONE)) + +val is_builtin_num = is_some oo dest_builtin_num + +fun is_builtin_num_ext ctxt t = + (case try HOLogic.dest_number t of + NONE => false + | SOME (T, _) => is_builtin_typ_ext ctxt T) + + +(* built-in functions *) + +fun add_builtin_fun cs ((n, T), f) = + Builtins.map (apsnd (insert_btab cs n T (Int f))) + +fun add_builtin_fun' cs (t, n) = + let + val c as (m, T) = Term.dest_Const t + fun app U ts = Term.list_comb (Const (m, U), ts) + fun bfun _ U ts = SOME (n, length (Term.binder_types T), ts, app U) + in add_builtin_fun cs (c, bfun) end + +fun add_builtin_fun_ext ((n, T), f) = + Builtins.map (apsnd (insert_btab Old_SMT_Utils.basicC n T (Ext f))) + +fun add_builtin_fun_ext' c = add_builtin_fun_ext (c, fn _ => fn _ => I) + +fun add_builtin_fun_ext'' n context = + let val thy = Context.theory_of context + in add_builtin_fun_ext' (n, Sign.the_const_type thy n) context end + +fun lookup_builtin_fun ctxt = + lookup_btab ctxt (snd (Builtins.get (Context.Proof ctxt))) + +fun dest_builtin_fun ctxt (c as (_, T)) ts = + (case lookup_builtin_fun ctxt c of + SOME (_, Int f) => f ctxt T ts + | _ => NONE) + +fun dest_builtin_eq ctxt t u = + let + val aT = TFree (Name.aT, @{sort type}) + val c = (@{const_name HOL.eq}, aT --> aT --> @{typ bool}) + fun mk ts = Term.list_comb (HOLogic.eq_const (Term.fastype_of (hd ts)), ts) + in + dest_builtin_fun ctxt c [] + |> Option.map (fn (n, i, _, _) => (n, i, [t, u], mk)) + end + +fun special_builtin_fun pred ctxt (c as (_, T)) ts = + if pred (Term.body_type T, Term.binder_types T) then + dest_builtin_fun ctxt c ts + else NONE + +fun dest_builtin_pred ctxt = special_builtin_fun (equal @{typ bool} o fst) ctxt + +fun dest_builtin_conn ctxt = + special_builtin_fun (forall (equal @{typ bool}) o (op ::)) ctxt + +fun dest_builtin ctxt c ts = + let val t = Term.list_comb (Const c, ts) + in + (case dest_builtin_num ctxt t of + SOME (n, _) => SOME (n, 0, [], K t) + | NONE => dest_builtin_fun ctxt c ts) + end + +fun dest_builtin_fun_ext ctxt (c as (_, T)) ts = + (case lookup_builtin_fun ctxt c of + SOME (_, Int f) => f ctxt T ts |> Option.map (fn (_, _, us, _) => us) + | SOME (_, Ext f) => SOME (f ctxt T ts) + | NONE => NONE) + +fun dest_builtin_ext ctxt c ts = + if is_builtin_num_ext ctxt (Term.list_comb (Const c, ts)) then SOME [] + else dest_builtin_fun_ext ctxt c ts + +fun is_builtin_fun ctxt c ts = is_some (dest_builtin_fun ctxt c ts) + +fun is_builtin_fun_ext ctxt c ts = is_some (dest_builtin_fun_ext ctxt c ts) + +end diff --git a/src/test-gen/Old_SMT/old_smt_config.ML b/src/test-gen/Old_SMT/old_smt_config.ML new file mode 100644 index 0000000..318b2ce --- /dev/null +++ b/src/test-gen/Old_SMT/old_smt_config.ML @@ -0,0 +1,254 @@ +(* Title: HOL/Library/Old_SMT/old_smt_config.ML + Author: Sascha Boehme, TU Muenchen + +Configuration options and diagnostic tools for SMT. +*) + +signature OLD_SMT_CONFIG = +sig + (*solver*) + type solver_info = { + name: string, + class: Proof.context -> Old_SMT_Utils.class, + avail: unit -> bool, + options: Proof.context -> string list } + val add_solver: solver_info -> Context.generic -> Context.generic + val set_solver_options: string * string -> Context.generic -> Context.generic + val is_available: Proof.context -> string -> bool + val available_solvers_of: Proof.context -> string list + val select_solver: string -> Context.generic -> Context.generic + val solver_of: Proof.context -> string + val solver_class_of: Proof.context -> Old_SMT_Utils.class + val solver_options_of: Proof.context -> string list + + (*options*) + val oracle: bool Config.T + val datatypes: bool Config.T + val timeout: real Config.T + val random_seed: int Config.T + val read_only_certificates: bool Config.T + val verbose: bool Config.T + val trace: bool Config.T + val trace_used_facts: bool Config.T + val monomorph_limit: int Config.T + val monomorph_instances: int Config.T + val infer_triggers: bool Config.T + val filter_only_facts: bool Config.T + val debug_files: string Config.T + + (*tools*) + val with_timeout: Proof.context -> ('a -> 'b) -> 'a -> 'b + + (*diagnostics*) + val trace_msg: Proof.context -> ('a -> string) -> 'a -> unit + val verbose_msg: Proof.context -> ('a -> string) -> 'a -> unit + + (*certificates*) + val select_certificates: string -> Context.generic -> Context.generic + val certificates_of: Proof.context -> Cache_IO.cache option + + (*setup*) + val setup: theory -> theory + val print_setup: Proof.context -> unit +end + +structure Old_SMT_Config: OLD_SMT_CONFIG = +struct + +(* solver *) + +type solver_info = { + name: string, + class: Proof.context -> Old_SMT_Utils.class, + avail: unit -> bool, + options: Proof.context -> string list } + +(* FIXME just one data slot (record) per program unit *) +structure Solvers = Generic_Data +( + type T = (solver_info * string list) Symtab.table * string option + val empty = (Symtab.empty, NONE) + val extend = I + fun merge ((ss1, s1), (ss2, s2)) = + (Symtab.merge (K true) (ss1, ss2), merge_options (s1, s2)) +) + +fun set_solver_options (name, options) = + let val opts = String.tokens (Symbol.is_ascii_blank o str) options + in Solvers.map (apfst (Symtab.map_entry name (apsnd (K opts)))) end + +fun add_solver (info as {name, ...} : solver_info) context = + if Symtab.defined (fst (Solvers.get context)) name then + error ("Solver already registered: " ^ quote name) + else + context + |> Solvers.map (apfst (Symtab.update (name, (info, [])))) + |> Context.map_theory (Attrib.setup (Binding.name ("old_" ^ name ^ "_options")) + (Scan.lift (@{keyword "="} |-- Args.name) >> + (Thm.declaration_attribute o K o set_solver_options o pair name)) + ("additional command line options for SMT solver " ^ quote name)) + +fun all_solvers_of ctxt = Symtab.keys (fst (Solvers.get (Context.Proof ctxt))) + +fun solver_name_of ctxt = snd (Solvers.get (Context.Proof ctxt)) + +fun is_available ctxt name = + (case Symtab.lookup (fst (Solvers.get (Context.Proof ctxt))) name of + SOME ({avail, ...}, _) => avail () + | NONE => false) + +fun available_solvers_of ctxt = + filter (is_available ctxt) (all_solvers_of ctxt) + +fun warn_solver (Context.Proof ctxt) name = + if Context_Position.is_visible ctxt then + warning ("The SMT solver " ^ quote name ^ " is not installed.") + else () + | warn_solver _ _ = (); + +fun select_solver name context = + let + val ctxt = Context.proof_of context + val upd = Solvers.map (apsnd (K (SOME name))) + in + if not (member (op =) (all_solvers_of ctxt) name) then + error ("Trying to select unknown solver: " ^ quote name) + else if not (is_available ctxt name) then + (warn_solver context name; upd context) + else upd context + end + +fun no_solver_err () = error "No SMT solver selected" + +fun solver_of ctxt = + (case solver_name_of ctxt of + SOME name => name + | NONE => no_solver_err ()) + +fun solver_info_of default select ctxt = + (case Solvers.get (Context.Proof ctxt) of + (solvers, SOME name) => select (Symtab.lookup solvers name) + | (_, NONE) => default ()) + +fun solver_class_of ctxt = + let fun class_of ({class, ...}: solver_info, _) = class ctxt + in solver_info_of no_solver_err (class_of o the) ctxt end + +fun solver_options_of ctxt = + let + fun all_options NONE = [] + | all_options (SOME ({options, ...} : solver_info, opts)) = + opts @ options ctxt + in solver_info_of (K []) all_options ctxt end + +val setup_solver = + Attrib.setup @{binding old_smt_solver} + (Scan.lift (@{keyword "="} |-- Args.name) >> + (Thm.declaration_attribute o K o select_solver)) + "SMT solver configuration" + + +(* options *) + +val oracle = Attrib.setup_config_bool @{binding old_smt_oracle} (K true) +val datatypes = Attrib.setup_config_bool @{binding old_smt_datatypes} (K false) +val timeout = Attrib.setup_config_real @{binding old_smt_timeout} (K 30.0) +val random_seed = Attrib.setup_config_int @{binding old_smt_random_seed} (K 1) +val read_only_certificates = Attrib.setup_config_bool @{binding old_smt_read_only_certificates} (K false) +val verbose = Attrib.setup_config_bool @{binding old_smt_verbose} (K true) +val trace = Attrib.setup_config_bool @{binding old_smt_trace} (K false) +val trace_used_facts = Attrib.setup_config_bool @{binding old_smt_trace_used_facts} (K false) +val monomorph_limit = Attrib.setup_config_int @{binding old_smt_monomorph_limit} (K 10) +val monomorph_instances = Attrib.setup_config_int @{binding old_smt_monomorph_instances} (K 500) +val infer_triggers = Attrib.setup_config_bool @{binding old_smt_infer_triggers} (K false) +val filter_only_facts = Attrib.setup_config_bool @{binding old_smt_filter_only_facts} (K false) +val debug_files = Attrib.setup_config_string @{binding old_smt_debug_files} (K "") + + +(* diagnostics *) + +fun cond_trace flag f x = if flag then tracing ("SMT: " ^ f x) else () + +fun verbose_msg ctxt = cond_trace (Config.get ctxt verbose) + +fun trace_msg ctxt = cond_trace (Config.get ctxt trace) + + +(* tools *) + +fun with_timeout ctxt f x = + Timeout.apply (seconds (Config.get ctxt timeout)) f x + handle Timeout.TIMEOUT _ => raise Old_SMT_Failure.SMT Old_SMT_Failure.Time_Out + + +(* certificates *) + +(* FIXME just one data slot (record) per program unit *) +structure Certificates = Generic_Data +( + type T = Cache_IO.cache option + val empty = NONE + val extend = I + fun merge (s, _) = s (* FIXME merge options!? *) +) + +val get_certificates_path = + Option.map (Cache_IO.cache_path_of) o Certificates.get o Context.Proof + +fun select_certificates name context = context |> Certificates.put ( + if name = "" then NONE + else + Path.explode name + |> Path.append (Resources.master_directory (Context.theory_of context)) + |> SOME o Cache_IO.unsynchronized_init) + +val certificates_of = Certificates.get o Context.Proof + +val setup_certificates = + Attrib.setup @{binding old_smt_certificates} + (Scan.lift (@{keyword "="} |-- Args.name) >> + (Thm.declaration_attribute o K o select_certificates)) + "SMT certificates configuration" + + +(* setup *) + +val setup = + setup_solver #> + setup_certificates + +fun print_setup ctxt = + let + fun string_of_bool b = if b then "true" else "false" + + val names = available_solvers_of ctxt + val ns = if null names then ["(none)"] else sort_strings names + val n = the_default "(none)" (solver_name_of ctxt) + val opts = solver_options_of ctxt + + val t = string_of_real (Config.get ctxt timeout) + + val certs_filename = + (case get_certificates_path ctxt of + SOME path => Path.print path + | NONE => "(disabled)") + in + Pretty.writeln (Pretty.big_list "SMT setup:" [ + Pretty.str ("Current SMT solver: " ^ n), + Pretty.str ("Current SMT solver options: " ^ space_implode " " opts), + Pretty.str_list "Available SMT solvers: " "" ns, + Pretty.str ("Current timeout: " ^ t ^ " seconds"), + Pretty.str ("With proofs: " ^ + string_of_bool (not (Config.get ctxt oracle))), + Pretty.str ("Certificates cache: " ^ certs_filename), + Pretty.str ("Fixed certificates: " ^ + string_of_bool (Config.get ctxt read_only_certificates))]) + end + +val _ = + Outer_Syntax.command @{command_keyword old_smt_status} + "show the available SMT solvers, the currently selected SMT solver, \ + \and the values of SMT configuration options" + (Scan.succeed (Toplevel.keep (print_setup o Toplevel.context_of))) + +end diff --git a/src/test-gen/Old_SMT/old_smt_datatypes.ML b/src/test-gen/Old_SMT/old_smt_datatypes.ML new file mode 100644 index 0000000..971dc74 --- /dev/null +++ b/src/test-gen/Old_SMT/old_smt_datatypes.ML @@ -0,0 +1,94 @@ +(* Title: HOL/Library/Old_SMT/old_smt_datatypes.ML + Author: Sascha Boehme, TU Muenchen + +Collector functions for common type declarations and their representation +as algebraic datatypes. +*) + +signature OLD_SMT_DATATYPES = +sig + val add_decls: typ -> + (typ * (term * term list) list) list list * Proof.context -> + (typ * (term * term list) list) list list * Proof.context +end + +structure Old_SMT_Datatypes: OLD_SMT_DATATYPES = +struct + +fun mk_selectors T Ts = + Variable.variant_fixes (replicate (length Ts) "select") + #>> map2 (fn U => fn n => Free (n, T --> U)) Ts + + +(* free constructor type declarations *) + +fun get_ctr_sugar_decl ({ctrs, ...} : Ctr_Sugar.ctr_sugar) T Ts ctxt = + let + fun mk_constr ctr0 = + let val ctr = Ctr_Sugar.mk_ctr Ts ctr0 in + mk_selectors T (binder_types (fastype_of ctr)) #>> pair ctr + end + in + fold_map mk_constr ctrs ctxt + |>> (pair T #> single) + end + + +(* typedef declarations *) + +fun get_typedef_decl (({Abs_name, Rep_name, abs_type, rep_type, ...}, {Abs_inverse, ...}) + : Typedef.info) T Ts = + if can (curry (op RS) @{thm UNIV_I}) Abs_inverse then + let + val env = snd (Term.dest_Type abs_type) ~~ Ts + val instT = Term.map_atyps (perhaps (AList.lookup (op =) env)) + + val constr = Const (Abs_name, instT (rep_type --> abs_type)) + val select = Const (Rep_name, instT (abs_type --> rep_type)) + in [(T, [(constr, [select])])] end + else + [] + + +(* collection of declarations *) + +fun declared declss T = exists (exists (equal T o fst)) declss +fun declared' dss T = exists (exists (equal T o fst) o snd) dss + +fun get_decls T n Ts ctxt = + (case Ctr_Sugar.ctr_sugar_of ctxt n of + SOME ctr_sugar => get_ctr_sugar_decl ctr_sugar T Ts ctxt + | NONE => + (case Typedef.get_info ctxt n of + [] => ([], ctxt) + | info :: _ => (get_typedef_decl info T Ts, ctxt))) + +fun add_decls T (declss, ctxt) = + let + fun depends Ts ds = exists (member (op =) (map fst ds)) Ts + + fun add (TFree _) = I + | add (TVar _) = I + | add (T as Type (@{type_name fun}, _)) = + fold add (Term.body_type T :: Term.binder_types T) + | add @{typ bool} = I + | add (T as Type (n, Ts)) = (fn (dss, ctxt1) => + if declared declss T orelse declared' dss T then (dss, ctxt1) + else if Old_SMT_Builtin.is_builtin_typ_ext ctxt1 T then (dss, ctxt1) + else + (case get_decls T n Ts ctxt1 of + ([], _) => (dss, ctxt1) + | (ds, ctxt2) => + let + val constrTs = + maps (map (snd o Term.dest_Const o fst) o snd) ds + val Us = fold (union (op =) o Term.binder_types) constrTs [] + + fun ins [] = [(Us, ds)] + | ins ((Uds as (Us', _)) :: Udss) = + if depends Us' ds then (Us, ds) :: Uds :: Udss + else Uds :: ins Udss + in fold add Us (ins dss, ctxt2) end)) + in add T ([], ctxt) |>> append declss o map snd end + +end diff --git a/src/test-gen/Old_SMT/old_smt_failure.ML b/src/test-gen/Old_SMT/old_smt_failure.ML new file mode 100644 index 0000000..394287c --- /dev/null +++ b/src/test-gen/Old_SMT/old_smt_failure.ML @@ -0,0 +1,61 @@ +(* Title: HOL/Library/Old_SMT/old_smt_failure.ML + Author: Sascha Boehme, TU Muenchen + +Failures and exception of SMT. +*) + +signature OLD_SMT_FAILURE = +sig + type counterexample = { + is_real_cex: bool, + free_constraints: term list, + const_defs: term list} + datatype failure = + Counterexample of counterexample | + Time_Out | + Out_Of_Memory | + Abnormal_Termination of int | + Other_Failure of string + val pretty_counterexample: Proof.context -> counterexample -> Pretty.T + val string_of_failure: Proof.context -> failure -> string + exception SMT of failure +end + +structure Old_SMT_Failure: OLD_SMT_FAILURE = +struct + +type counterexample = { + is_real_cex: bool, + free_constraints: term list, + const_defs: term list} + +datatype failure = + Counterexample of counterexample | + Time_Out | + Out_Of_Memory | + Abnormal_Termination of int | + Other_Failure of string + +fun pretty_counterexample ctxt {is_real_cex, free_constraints, const_defs} = + let + val msg = + if is_real_cex then "Counterexample found (possibly spurious)" + else "Potential counterexample found" + in + if null free_constraints andalso null const_defs then Pretty.str msg + else + Pretty.big_list (msg ^ ":") + (map (Syntax.pretty_term ctxt) (free_constraints @ const_defs)) + end + +fun string_of_failure ctxt (Counterexample cex) = + Pretty.string_of (pretty_counterexample ctxt cex) + | string_of_failure _ Time_Out = "Timed out" + | string_of_failure _ Out_Of_Memory = "Ran out of memory" + | string_of_failure _ (Abnormal_Termination err) = + "Solver terminated abnormally with error code " ^ string_of_int err + | string_of_failure _ (Other_Failure msg) = msg + +exception SMT of failure + +end diff --git a/src/test-gen/Old_SMT/old_smt_normalize.ML b/src/test-gen/Old_SMT/old_smt_normalize.ML new file mode 100644 index 0000000..18cf0b7 --- /dev/null +++ b/src/test-gen/Old_SMT/old_smt_normalize.ML @@ -0,0 +1,652 @@ +(* Title: HOL/Library/Old_SMT/old_smt_normalize.ML + Author: Sascha Boehme, TU Muenchen + +Normalization steps on theorems required by SMT solvers. +*) + +signature OLD_SMT_NORMALIZE = +sig + val drop_fact_warning: Proof.context -> thm -> unit + val atomize_conv: Proof.context -> conv + type extra_norm = Proof.context -> thm list * thm list -> thm list * thm list + val add_extra_norm: Old_SMT_Utils.class * extra_norm -> Context.generic -> + Context.generic + val normalize: (int * (int option * thm)) list -> Proof.context -> + (int * thm) list * Proof.context + val setup: theory -> theory +end + +structure Old_SMT_Normalize: OLD_SMT_NORMALIZE = +struct + +fun drop_fact_warning ctxt = + Old_SMT_Config.verbose_msg ctxt (prefix "Warning: dropping assumption: " o + Thm.string_of_thm ctxt) + + +(* general theorem normalizations *) + +(** instantiate elimination rules **) + +local + val (cpfalse, cfalse) = + `Old_SMT_Utils.mk_cprop (Thm.cterm_of @{context} @{const False}) + + fun inst f ct thm = + let val cv = f (Drule.strip_imp_concl (Thm.cprop_of thm)) + in Thm.instantiate ([], [(dest_Var (Thm.term_of cv), ct)]) thm end +in + +fun instantiate_elim thm = + (case Thm.concl_of thm of + @{const Trueprop} $ Var (_, @{typ bool}) => inst Thm.dest_arg cfalse thm + | Var _ => inst I cpfalse thm + | _ => thm) + +end + + +(** normalize definitions **) + +fun norm_def thm = + (case Thm.prop_of thm of + @{const Trueprop} $ (Const (@{const_name HOL.eq}, _) $ _ $ Abs _) => + norm_def (thm RS @{thm fun_cong}) + | Const (@{const_name Pure.eq}, _) $ _ $ Abs _ => + norm_def (thm RS @{thm meta_eq_to_obj_eq}) + | _ => thm) + + +(** atomization **) + +fun atomize_conv ctxt ct = + (case Thm.term_of ct of + @{const Pure.imp} $ _ $ _ => + Conv.binop_conv (atomize_conv ctxt) then_conv + Conv.rewr_conv @{thm atomize_imp} + | Const (@{const_name Pure.eq}, _) $ _ $ _ => + Conv.binop_conv (atomize_conv ctxt) then_conv + Conv.rewr_conv @{thm atomize_eq} + | Const (@{const_name Pure.all}, _) $ Abs _ => + Conv.binder_conv (atomize_conv o snd) ctxt then_conv + Conv.rewr_conv @{thm atomize_all} + | _ => Conv.all_conv) ct + +val setup_atomize = + fold Old_SMT_Builtin.add_builtin_fun_ext'' [@{const_name Pure.imp}, + @{const_name Pure.eq}, @{const_name Pure.all}, @{const_name Trueprop}] + + +(** unfold special quantifiers **) + +local + val ex1_def = mk_meta_eq @{lemma + "Ex1 = (%P. EX x. P x & (ALL y. P y --> y = x))" + by (rule ext) (simp only: Ex1_def)} + + val ball_def = mk_meta_eq @{lemma "Ball = (%A P. ALL x. x : A --> P x)" + by (rule ext)+ (rule Ball_def)} + + val bex_def = mk_meta_eq @{lemma "Bex = (%A P. EX x. x : A & P x)" + by (rule ext)+ (rule Bex_def)} + + val special_quants = [(@{const_name Ex1}, ex1_def), + (@{const_name Ball}, ball_def), (@{const_name Bex}, bex_def)] + + fun special_quant (Const (n, _)) = AList.lookup (op =) special_quants n + | special_quant _ = NONE + + fun special_quant_conv _ ct = + (case special_quant (Thm.term_of ct) of + SOME thm => Conv.rewr_conv thm + | NONE => Conv.all_conv) ct +in + +fun unfold_special_quants_conv ctxt = + Old_SMT_Utils.if_exists_conv (is_some o special_quant) + (Conv.top_conv special_quant_conv ctxt) + +val setup_unfolded_quants = + fold (Old_SMT_Builtin.add_builtin_fun_ext'' o fst) special_quants + +end + + +(** trigger inference **) + +local + (*** check trigger syntax ***) + + fun dest_trigger (Const (@{const_name pat}, _) $ _) = SOME true + | dest_trigger (Const (@{const_name nopat}, _) $ _) = SOME false + | dest_trigger _ = NONE + + fun eq_list [] = false + | eq_list (b :: bs) = forall (equal b) bs + + fun proper_trigger t = + t + |> these o try HOLogic.dest_list + |> map (map_filter dest_trigger o these o try HOLogic.dest_list) + |> (fn [] => false | bss => forall eq_list bss) + + fun proper_quant inside f t = + (case t of + Const (@{const_name All}, _) $ Abs (_, _, u) => proper_quant true f u + | Const (@{const_name Ex}, _) $ Abs (_, _, u) => proper_quant true f u + | @{const trigger} $ p $ u => + (if inside then f p else false) andalso proper_quant false f u + | Abs (_, _, u) => proper_quant false f u + | u1 $ u2 => proper_quant false f u1 andalso proper_quant false f u2 + | _ => true) + + fun check_trigger_error ctxt t = + error ("SMT triggers must only occur under quantifier and multipatterns " ^ + "must have the same kind: " ^ Syntax.string_of_term ctxt t) + + fun check_trigger_conv ctxt ct = + if proper_quant false proper_trigger (Old_SMT_Utils.term_of ct) then + Conv.all_conv ct + else check_trigger_error ctxt (Thm.term_of ct) + + + (*** infer simple triggers ***) + + fun dest_cond_eq ct = + (case Thm.term_of ct of + Const (@{const_name HOL.eq}, _) $ _ $ _ => Thm.dest_binop ct + | @{const HOL.implies} $ _ $ _ => dest_cond_eq (Thm.dest_arg ct) + | _ => raise CTERM ("no equation", [ct])) + + fun get_constrs thy (Type (n, _)) = these (Old_Datatype_Data.get_constrs thy n) + | get_constrs _ _ = [] + + fun is_constr thy (n, T) = + let fun match (m, U) = m = n andalso Sign.typ_instance thy (T, U) + in can (the o find_first match o get_constrs thy o Term.body_type) T end + + fun is_constr_pat thy t = + (case Term.strip_comb t of + (Free _, []) => true + | (Const c, ts) => is_constr thy c andalso forall (is_constr_pat thy) ts + | _ => false) + + fun is_simp_lhs ctxt t = + (case Term.strip_comb t of + (Const c, ts as _ :: _) => + not (Old_SMT_Builtin.is_builtin_fun_ext ctxt c ts) andalso + forall (is_constr_pat (Proof_Context.theory_of ctxt)) ts + | _ => false) + + fun has_all_vars vs t = + subset (op aconv) (vs, map Free (Term.add_frees t [])) + + fun minimal_pats vs ct = + if has_all_vars vs (Thm.term_of ct) then + (case Thm.term_of ct of + _ $ _ => + (case apply2 (minimal_pats vs) (Thm.dest_comb ct) of + ([], []) => [[ct]] + | (ctss, ctss') => union (eq_set (op aconvc)) ctss ctss') + | _ => []) + else [] + + fun proper_mpat _ _ _ [] = false + | proper_mpat thy gen u cts = + let + val tps = (op ~~) (`gen (map Thm.term_of cts)) + fun some_match u = tps |> exists (fn (t', t) => + Pattern.matches thy (t', u) andalso not (t aconv u)) + in not (Term.exists_subterm some_match u) end + + val pat = + Old_SMT_Utils.mk_const_pat @{theory} @{const_name pat} Old_SMT_Utils.destT1 + fun mk_pat ct = Thm.apply (Old_SMT_Utils.instT' ct pat) ct + + fun mk_clist T = apply2 (Thm.cterm_of @{context}) (HOLogic.cons_const T, HOLogic.nil_const T) + fun mk_list (ccons, cnil) f cts = fold_rev (Thm.mk_binop ccons o f) cts cnil + val mk_pat_list = mk_list (mk_clist @{typ pattern}) + val mk_mpat_list = mk_list (mk_clist @{typ "pattern list"}) + fun mk_trigger ctss = mk_mpat_list (mk_pat_list mk_pat) ctss + + val trigger_eq = + mk_meta_eq @{lemma "p = trigger t p" by (simp add: trigger_def)} + + fun insert_trigger_conv [] ct = Conv.all_conv ct + | insert_trigger_conv ctss ct = + let val (ctr, cp) = Thm.dest_binop (Thm.rhs_of trigger_eq) ||> rpair ct + in Thm.instantiate ([], map (apfst (dest_Var o Thm.term_of)) [cp, (ctr, mk_trigger ctss)]) trigger_eq end + + fun infer_trigger_eq_conv outer_ctxt (ctxt, cvs) ct = + let + val (lhs, rhs) = dest_cond_eq ct + + val vs = map Thm.term_of cvs + val thy = Proof_Context.theory_of ctxt + + fun get_mpats ct = + if is_simp_lhs ctxt (Thm.term_of ct) then minimal_pats vs ct + else [] + val gen = Variable.export_terms ctxt outer_ctxt + val filter_mpats = filter (proper_mpat thy gen (Thm.term_of rhs)) + + in insert_trigger_conv (filter_mpats (get_mpats lhs)) ct end + + fun has_trigger (@{const trigger} $ _ $ _) = true + | has_trigger _ = false + + fun try_trigger_conv cv ct = + if Old_SMT_Utils.under_quant has_trigger (Old_SMT_Utils.term_of ct) then + Conv.all_conv ct + else Conv.try_conv cv ct + + fun infer_trigger_conv ctxt = + if Config.get ctxt Old_SMT_Config.infer_triggers then + try_trigger_conv + (Old_SMT_Utils.under_quant_conv (infer_trigger_eq_conv ctxt) ctxt) + else Conv.all_conv +in + +fun trigger_conv ctxt = + Old_SMT_Utils.prop_conv + (check_trigger_conv ctxt then_conv infer_trigger_conv ctxt) + +val setup_trigger = + fold Old_SMT_Builtin.add_builtin_fun_ext'' + [@{const_name pat}, @{const_name nopat}, @{const_name trigger}] + +end + + +(** adding quantifier weights **) + +local + (*** check weight syntax ***) + + val has_no_weight = + not o Term.exists_subterm (fn @{const weight} => true | _ => false) + + fun is_weight (@{const weight} $ w $ t) = + (case try HOLogic.dest_number w of + SOME (_, i) => i >= 0 andalso has_no_weight t + | _ => false) + | is_weight t = has_no_weight t + + fun proper_trigger (@{const trigger} $ _ $ t) = is_weight t + | proper_trigger t = is_weight t + + fun check_weight_error ctxt t = + error ("SMT weight must be a non-negative number and must only occur " ^ + "under the top-most quantifier and an optional trigger: " ^ + Syntax.string_of_term ctxt t) + + fun check_weight_conv ctxt ct = + if Old_SMT_Utils.under_quant proper_trigger (Old_SMT_Utils.term_of ct) then + Conv.all_conv ct + else check_weight_error ctxt (Thm.term_of ct) + + + (*** insertion of weights ***) + + fun under_trigger_conv cv ct = + (case Thm.term_of ct of + @{const trigger} $ _ $ _ => Conv.arg_conv cv + | _ => cv) ct + + val weight_eq = + mk_meta_eq @{lemma "p = weight i p" by (simp add: weight_def)} + fun mk_weight_eq w = + let val cv = Thm.dest_arg1 (Thm.rhs_of weight_eq) + in + Thm.instantiate ([], [(dest_Var (Thm.term_of cv), Numeral.mk_cnumber @{ctyp int} w)]) + weight_eq + end + + fun add_weight_conv NONE _ = Conv.all_conv + | add_weight_conv (SOME weight) ctxt = + let val cv = Conv.rewr_conv (mk_weight_eq weight) + in Old_SMT_Utils.under_quant_conv (K (under_trigger_conv cv)) ctxt end +in + +fun weight_conv weight ctxt = + Old_SMT_Utils.prop_conv + (check_weight_conv ctxt then_conv add_weight_conv weight ctxt) + +val setup_weight = Old_SMT_Builtin.add_builtin_fun_ext'' @{const_name weight} + +end + + +(** combined general normalizations **) + +fun gen_normalize1_conv ctxt weight = + atomize_conv ctxt then_conv + unfold_special_quants_conv ctxt then_conv + Thm.beta_conversion true then_conv + trigger_conv ctxt then_conv + weight_conv weight ctxt + +fun gen_normalize1 ctxt weight thm = + thm + |> instantiate_elim + |> norm_def + |> Conv.fconv_rule (Thm.beta_conversion true then_conv Thm.eta_conversion) + |> Drule.forall_intr_vars + |> Conv.fconv_rule (gen_normalize1_conv ctxt weight) + +fun gen_norm1_safe ctxt (i, (weight, thm)) = + (case try (gen_normalize1 ctxt weight) thm of + SOME thm' => SOME (i, thm') + | NONE => (drop_fact_warning ctxt thm; NONE)) + +fun gen_normalize ctxt iwthms = map_filter (gen_norm1_safe ctxt) iwthms + + + +(* unfolding of definitions and theory-specific rewritings *) + +fun expand_head_conv cv ct = + (case Thm.term_of ct of + _ $ _ => + Conv.fun_conv (expand_head_conv cv) then_conv + Conv.try_conv (Thm.beta_conversion false) + | _ => cv) ct + + +(** rewrite bool case expressions as if expressions **) + +local + fun is_case_bool (Const (@{const_name "bool.case_bool"}, _)) = true + | is_case_bool _ = false + + val thm = mk_meta_eq @{lemma + "case_bool = (%x y P. if P then x else y)" by (rule ext)+ simp} + + fun unfold_conv _ = + Old_SMT_Utils.if_true_conv (is_case_bool o Term.head_of) + (expand_head_conv (Conv.rewr_conv thm)) +in + +fun rewrite_case_bool_conv ctxt = + Old_SMT_Utils.if_exists_conv is_case_bool (Conv.top_conv unfold_conv ctxt) + +val setup_case_bool = + Old_SMT_Builtin.add_builtin_fun_ext'' @{const_name "bool.case_bool"} + +end + + +(** unfold abs, min and max **) + +local + val abs_def = mk_meta_eq @{lemma + "abs = (%a::'a::abs_if. if a < 0 then - a else a)" + by (rule ext) (rule abs_if)} + + val min_def = mk_meta_eq @{lemma "min = (%a b. if a <= b then a else b)" + by (rule ext)+ (rule min_def)} + + val max_def = mk_meta_eq @{lemma "max = (%a b. if a <= b then b else a)" + by (rule ext)+ (rule max_def)} + + val defs = [(@{const_name min}, min_def), (@{const_name max}, max_def), + (@{const_name abs}, abs_def)] + + fun is_builtinT ctxt T = + Old_SMT_Builtin.is_builtin_typ_ext ctxt (Term.domain_type T) + + fun abs_min_max ctxt (Const (n, T)) = + (case AList.lookup (op =) defs n of + NONE => NONE + | SOME thm => if is_builtinT ctxt T then SOME thm else NONE) + | abs_min_max _ _ = NONE + + fun unfold_amm_conv ctxt ct = + (case abs_min_max ctxt (Term.head_of (Thm.term_of ct)) of + SOME thm => expand_head_conv (Conv.rewr_conv thm) + | NONE => Conv.all_conv) ct +in + +fun unfold_abs_min_max_conv ctxt = + Old_SMT_Utils.if_exists_conv (is_some o abs_min_max ctxt) + (Conv.top_conv unfold_amm_conv ctxt) + +val setup_abs_min_max = fold (Old_SMT_Builtin.add_builtin_fun_ext'' o fst) defs + +end + + +(** embedding of standard natural number operations into integer operations **) + +local + val nat_embedding = @{lemma + "ALL n. nat (int n) = n" + "ALL i. i >= 0 --> int (nat i) = i" + "ALL i. i < 0 --> int (nat i) = 0" + by simp_all} + + val simple_nat_ops = [ + @{const less (nat)}, @{const less_eq (nat)}, + @{const Suc}, @{const plus (nat)}, @{const minus (nat)}] + + val mult_nat_ops = + [@{const times (nat)}, @{const divide (nat)}, @{const modulo (nat)}] + + val nat_ops = simple_nat_ops @ mult_nat_ops + + val nat_consts = nat_ops @ [@{const numeral (nat)}, + @{const zero_class.zero (nat)}, @{const one_class.one (nat)}] + + val nat_int_coercions = [@{const of_nat (int)}, @{const nat}] + + val builtin_nat_ops = nat_int_coercions @ simple_nat_ops + + val is_nat_const = member (op aconv) nat_consts + + fun is_nat_const' @{const of_nat (int)} = true + | is_nat_const' t = is_nat_const t + + val expands = map mk_meta_eq @{lemma + "0 = nat 0" + "1 = nat 1" + "(numeral :: num => nat) = (%i. nat (numeral i))" + "op < = (%a b. int a < int b)" + "op <= = (%a b. int a <= int b)" + "Suc = (%a. nat (int a + 1))" + "op + = (%a b. nat (int a + int b))" + "op - = (%a b. nat (int a - int b))" + "op * = (%a b. nat (int a * int b))" + "op div = (%a b. nat (int a div int b))" + "op mod = (%a b. nat (int a mod int b))" + by (fastforce simp add: nat_mult_distrib nat_div_distrib nat_mod_distrib)+} + + val ints = map mk_meta_eq @{lemma + "int 0 = 0" + "int 1 = 1" + "int (Suc n) = int n + 1" + "int (n + m) = int n + int m" + "int (n - m) = int (nat (int n - int m))" + "int (n * m) = int n * int m" + "int (n div m) = int n div int m" + "int (n mod m) = int n mod int m" + by (auto simp add: of_nat_mult zdiv_int zmod_int)} + + val int_if = mk_meta_eq @{lemma + "int (if P then n else m) = (if P then int n else int m)" + by simp} + + fun mk_number_eq ctxt i lhs = + let + val eq = Old_SMT_Utils.mk_cequals lhs (Numeral.mk_cnumber @{ctyp int} i) + val tac = + Simplifier.simp_tac (put_simpset HOL_ss ctxt addsimps [@{thm of_nat_numeral}]) 1 + in Goal.norm_result ctxt (Goal.prove_internal ctxt [] eq (K tac)) end + + fun ite_conv cv1 cv2 = + Conv.combination_conv (Conv.combination_conv (Conv.arg_conv cv1) cv2) cv2 + + fun int_conv ctxt ct = + (case Thm.term_of ct of + @{const of_nat (int)} $ (n as (@{const numeral (nat)} $ _)) => + Conv.rewr_conv (mk_number_eq ctxt (snd (HOLogic.dest_number n)) ct) + | @{const of_nat (int)} $ _ => + (Conv.rewrs_conv ints then_conv Conv.sub_conv ints_conv ctxt) else_conv + (Conv.rewr_conv int_if then_conv + ite_conv (nat_conv ctxt) (int_conv ctxt)) else_conv + Conv.sub_conv (Conv.top_sweep_conv nat_conv) ctxt + | _ => Conv.no_conv) ct + + and ints_conv ctxt = Conv.top_sweep_conv int_conv ctxt + + and expand_conv ctxt = + Old_SMT_Utils.if_conv (is_nat_const o Term.head_of) + (expand_head_conv (Conv.rewrs_conv expands) then_conv ints_conv ctxt) + (int_conv ctxt) + + and nat_conv ctxt = Old_SMT_Utils.if_exists_conv is_nat_const' + (Conv.top_sweep_conv expand_conv ctxt) + + val uses_nat_int = Term.exists_subterm (member (op aconv) nat_int_coercions) +in + +val nat_as_int_conv = nat_conv + +fun add_nat_embedding thms = + if exists (uses_nat_int o Thm.prop_of) thms then (thms, nat_embedding) + else (thms, []) + +val setup_nat_as_int = + Old_SMT_Builtin.add_builtin_typ_ext (@{typ nat}, K true) #> + fold (Old_SMT_Builtin.add_builtin_fun_ext' o Term.dest_Const) builtin_nat_ops + +end + + +(** normalize numerals **) + +local + (* + rewrite Numeral1 into 1 + rewrite - 0 into 0 + *) + + fun is_irregular_number (Const (@{const_name numeral}, _) $ Const (@{const_name num.One}, _)) = + true + | is_irregular_number (Const (@{const_name uminus}, _) $ Const (@{const_name Groups.zero}, _)) = + true + | is_irregular_number _ = + false; + + fun is_strange_number ctxt t = is_irregular_number t andalso Old_SMT_Builtin.is_builtin_num ctxt t; + + val proper_num_ss = + simpset_of (put_simpset HOL_ss @{context} + addsimps @{thms Num.numeral_One minus_zero}) + + fun norm_num_conv ctxt = + Old_SMT_Utils.if_conv (is_strange_number ctxt) + (Simplifier.rewrite (put_simpset proper_num_ss ctxt)) Conv.no_conv +in + +fun normalize_numerals_conv ctxt = + Old_SMT_Utils.if_exists_conv (is_strange_number ctxt) + (Conv.top_sweep_conv norm_num_conv ctxt) + +end + + +(** combined unfoldings and rewritings **) + +fun unfold_conv ctxt = + rewrite_case_bool_conv ctxt then_conv + unfold_abs_min_max_conv ctxt then_conv + nat_as_int_conv ctxt then_conv + Thm.beta_conversion true + +fun unfold1 ctxt = map (apsnd (Conv.fconv_rule (unfold_conv ctxt))) + +fun burrow_ids f ithms = + let + val (is, thms) = split_list ithms + val (thms', extra_thms) = f thms + in (is ~~ thms') @ map (pair ~1) extra_thms end + +fun unfold2 ctxt ithms = + ithms + |> map (apsnd (Conv.fconv_rule (normalize_numerals_conv ctxt))) + |> burrow_ids add_nat_embedding + + + +(* overall normalization *) + +type extra_norm = Proof.context -> thm list * thm list -> thm list * thm list + +structure Extra_Norms = Generic_Data +( + type T = extra_norm Old_SMT_Utils.dict + val empty = [] + val extend = I + fun merge data = Old_SMT_Utils.dict_merge fst data +) + +fun add_extra_norm (cs, norm) = + Extra_Norms.map (Old_SMT_Utils.dict_update (cs, norm)) + +fun apply_extra_norms ctxt ithms = + let + val cs = Old_SMT_Config.solver_class_of ctxt + val es = Old_SMT_Utils.dict_lookup (Extra_Norms.get (Context.Proof ctxt)) cs + in burrow_ids (fold (fn e => e ctxt) es o rpair []) ithms end + +local + val ignored = member (op =) [@{const_name All}, @{const_name Ex}, + @{const_name Let}, @{const_name If}, @{const_name HOL.eq}] + + val schematic_consts_of = + let + fun collect (@{const trigger} $ p $ t) = + collect_trigger p #> collect t + | collect (t $ u) = collect t #> collect u + | collect (Abs (_, _, t)) = collect t + | collect (t as Const (n, _)) = + if not (ignored n) then Monomorph.add_schematic_consts_of t else I + | collect _ = I + and collect_trigger t = + let val dest = these o try HOLogic.dest_list + in fold (fold collect_pat o dest) (dest t) end + and collect_pat (Const (@{const_name pat}, _) $ t) = collect t + | collect_pat (Const (@{const_name nopat}, _) $ t) = collect t + | collect_pat _ = I + in (fn t => collect t Symtab.empty) end +in + +fun monomorph ctxt xthms = + let val (xs, thms) = split_list xthms + in + map (pair 1) thms + |> Monomorph.monomorph schematic_consts_of ctxt + |> maps (uncurry (map o pair)) o map2 pair xs o map (map snd) + end + +end + +fun normalize iwthms ctxt = + iwthms + |> gen_normalize ctxt + |> unfold1 ctxt + |> monomorph ctxt + |> unfold2 ctxt + |> apply_extra_norms ctxt + |> rpair ctxt + +val setup = Context.theory_map ( + setup_atomize #> + setup_unfolded_quants #> + setup_trigger #> + setup_weight #> + setup_case_bool #> + setup_abs_min_max #> + setup_nat_as_int) + +end diff --git a/src/test-gen/Old_SMT/old_smt_real.ML b/src/test-gen/Old_SMT/old_smt_real.ML new file mode 100644 index 0000000..6a2a793 --- /dev/null +++ b/src/test-gen/Old_SMT/old_smt_real.ML @@ -0,0 +1,134 @@ +(* Title: HOL/Library/Old_SMT/old_smt_real.ML + Author: Sascha Boehme, TU Muenchen + +SMT setup for reals. +*) + +structure Old_SMT_Real: sig end = +struct + + +(* SMT-LIB logic *) + +fun smtlib_logic ts = + if exists (Term.exists_type (Term.exists_subtype (equal @{typ real}))) ts + then SOME "AUFLIRA" + else NONE + + +(* SMT-LIB and Z3 built-ins *) + +local + fun real_num _ i = SOME (string_of_int i ^ ".0") + + fun is_linear [t] = Old_SMT_Utils.is_number t + | is_linear [t, u] = Old_SMT_Utils.is_number t orelse Old_SMT_Utils.is_number u + | is_linear _ = false + + fun mk_times ts = Term.list_comb (@{const times (real)}, ts) + + fun times _ _ ts = if is_linear ts then SOME ("*", 2, ts, mk_times) else NONE +in + +val setup_builtins = + Old_SMT_Builtin.add_builtin_typ Old_SMTLIB_Interface.smtlibC + (@{typ real}, K (SOME "Real"), real_num) #> + fold (Old_SMT_Builtin.add_builtin_fun' Old_SMTLIB_Interface.smtlibC) [ + (@{const less (real)}, "<"), + (@{const less_eq (real)}, "<="), + (@{const uminus (real)}, "~"), + (@{const plus (real)}, "+"), + (@{const minus (real)}, "-") ] #> + Old_SMT_Builtin.add_builtin_fun Old_SMTLIB_Interface.smtlibC + (Term.dest_Const @{const times (real)}, times) #> + Old_SMT_Builtin.add_builtin_fun' Old_Z3_Interface.smtlib_z3C + (@{const times (real)}, "*") #> + Old_SMT_Builtin.add_builtin_fun' Old_Z3_Interface.smtlib_z3C + (@{const divide (real)}, "/") + +end + + +(* Z3 constructors *) + +local + fun z3_mk_builtin_typ (Old_Z3_Interface.Sym ("Real", _)) = SOME @{typ real} + | z3_mk_builtin_typ (Old_Z3_Interface.Sym ("real", _)) = SOME @{typ real} + (*FIXME: delete*) + | z3_mk_builtin_typ _ = NONE + + fun z3_mk_builtin_num _ i T = + if T = @{typ real} then SOME (Numeral.mk_cnumber @{ctyp real} i) + else NONE + + fun mk_nary _ cu [] = cu + | mk_nary ct _ cts = uncurry (fold_rev (Thm.mk_binop ct)) (split_last cts) + + val mk_uminus = Thm.apply (Thm.cterm_of @{context} @{const uminus (real)}) + val add = Thm.cterm_of @{context} @{const plus (real)} + val real0 = Numeral.mk_cnumber @{ctyp real} 0 + val mk_sub = Thm.mk_binop (Thm.cterm_of @{context} @{const minus (real)}) + val mk_mul = Thm.mk_binop (Thm.cterm_of @{context} @{const times (real)}) + val mk_div = Thm.mk_binop (Thm.cterm_of @{context} @{const divide (real)}) + val mk_lt = Thm.mk_binop (Thm.cterm_of @{context} @{const less (real)}) + val mk_le = Thm.mk_binop (Thm.cterm_of @{context} @{const less_eq (real)}) + + fun z3_mk_builtin_fun (Old_Z3_Interface.Sym ("-", _)) [ct] = SOME (mk_uminus ct) + | z3_mk_builtin_fun (Old_Z3_Interface.Sym ("+", _)) cts = + SOME (mk_nary add real0 cts) + | z3_mk_builtin_fun (Old_Z3_Interface.Sym ("-", _)) [ct, cu] = + SOME (mk_sub ct cu) + | z3_mk_builtin_fun (Old_Z3_Interface.Sym ("*", _)) [ct, cu] = + SOME (mk_mul ct cu) + | z3_mk_builtin_fun (Old_Z3_Interface.Sym ("/", _)) [ct, cu] = + SOME (mk_div ct cu) + | z3_mk_builtin_fun (Old_Z3_Interface.Sym ("<", _)) [ct, cu] = + SOME (mk_lt ct cu) + | z3_mk_builtin_fun (Old_Z3_Interface.Sym ("<=", _)) [ct, cu] = + SOME (mk_le ct cu) + | z3_mk_builtin_fun (Old_Z3_Interface.Sym (">", _)) [ct, cu] = + SOME (mk_lt cu ct) + | z3_mk_builtin_fun (Old_Z3_Interface.Sym (">=", _)) [ct, cu] = + SOME (mk_le cu ct) + | z3_mk_builtin_fun _ _ = NONE +in + +val z3_mk_builtins = { + mk_builtin_typ = z3_mk_builtin_typ, + mk_builtin_num = z3_mk_builtin_num, + mk_builtin_fun = (fn _ => fn sym => fn cts => + (case try (Thm.typ_of_cterm o hd) cts of + SOME @{typ real} => z3_mk_builtin_fun sym cts + | _ => NONE)) } + +end + + +(* Z3 proof reconstruction *) + +val real_rules = @{lemma + "0 + (x::real) = x" + "x + 0 = x" + "0 * x = 0" + "1 * x = x" + "x + y = y + x" + by auto} + +val real_linarith_proc = + Simplifier.make_simproc @{context} "fast_real_arith" + {lhss = [@{term "(m::real) < n"}, @{term "(m::real) \ n"}, @{term "(m::real) = n"}], + proc = K Lin_Arith.simproc} + + +(* setup *) + +val _ = + Theory.setup + (Context.theory_map ( + Old_SMTLIB_Interface.add_logic (10, smtlib_logic) #> + setup_builtins #> + Old_Z3_Interface.add_mk_builtins z3_mk_builtins #> + fold Old_Z3_Proof_Reconstruction.add_z3_rule real_rules #> + Old_Z3_Proof_Tools.add_simproc real_linarith_proc)) + +end diff --git a/src/test-gen/Old_SMT/old_smt_setup_solvers.ML b/src/test-gen/Old_SMT/old_smt_setup_solvers.ML new file mode 100644 index 0000000..15e01db --- /dev/null +++ b/src/test-gen/Old_SMT/old_smt_setup_solvers.ML @@ -0,0 +1,189 @@ +(* Title: HOL/Library/Old_SMT/old_smt_setup_solvers.ML + Author: Sascha Boehme, TU Muenchen + +Setup SMT solvers. +*) + +signature OLD_SMT_SETUP_SOLVERS = +sig + datatype z3_non_commercial = + Z3_Non_Commercial_Unknown | + Z3_Non_Commercial_Accepted | + Z3_Non_Commercial_Declined + val z3_non_commercial: unit -> z3_non_commercial + val z3_with_extensions: bool Config.T + val setup: theory -> theory +end + +structure Old_SMT_Setup_Solvers: OLD_SMT_SETUP_SOLVERS = +struct + +(* helper functions *) + +fun make_avail name () = getenv ("OLD_" ^ name ^ "_SOLVER") <> "" +fun make_command name () = [getenv ("OLD_" ^ name ^ "_SOLVER")] + +fun outcome_of unsat sat unknown solver_name line = + if String.isPrefix unsat line then Old_SMT_Solver.Unsat + else if String.isPrefix sat line then Old_SMT_Solver.Sat + else if String.isPrefix unknown line then Old_SMT_Solver.Unknown + else raise Old_SMT_Failure.SMT (Old_SMT_Failure.Other_Failure ("Solver " ^ + quote solver_name ^ " failed. Enable SMT tracing by setting the " ^ + "configuration option " ^ quote (Config.name_of Old_SMT_Config.trace) ^ " and " ^ + "see the trace for details.")) + +fun on_first_line test_outcome solver_name lines = + let + val empty_line = (fn "" => true | _ => false) + val split_first = (fn [] => ("", []) | l :: ls => (l, ls)) + val (l, ls) = split_first (snd (take_prefix empty_line lines)) + in (test_outcome solver_name l, ls) end + + +(* CVC3 *) + +local + fun cvc3_options ctxt = [ + "-seed", string_of_int (Config.get ctxt Old_SMT_Config.random_seed), + "-lang", "smtlib", "-output-lang", "presentation", + "-timeout", string_of_int (Real.ceil (Config.get ctxt Old_SMT_Config.timeout))] +in + +val cvc3: Old_SMT_Solver.solver_config = { + name = "cvc3", + class = K Old_SMTLIB_Interface.smtlibC, + avail = make_avail "CVC3", + command = make_command "CVC3", + options = cvc3_options, + default_max_relevant = 400 (* FUDGE *), + supports_filter = false, + outcome = + on_first_line (outcome_of "Unsatisfiable." "Satisfiable." "Unknown."), + cex_parser = NONE, + reconstruct = NONE } + +end + + +(* Yices *) + +val yices: Old_SMT_Solver.solver_config = { + name = "yices", + class = K Old_SMTLIB_Interface.smtlibC, + avail = make_avail "YICES", + command = make_command "YICES", + options = (fn ctxt => [ + "--rand-seed=" ^ string_of_int (Config.get ctxt Old_SMT_Config.random_seed), + "--timeout=" ^ + string_of_int (Real.ceil (Config.get ctxt Old_SMT_Config.timeout)), + "--smtlib"]), + default_max_relevant = 350 (* FUDGE *), + supports_filter = false, + outcome = on_first_line (outcome_of "unsat" "sat" "unknown"), + cex_parser = NONE, + reconstruct = NONE } + + +(* Z3 *) + +datatype z3_non_commercial = + Z3_Non_Commercial_Unknown | + Z3_Non_Commercial_Accepted | + Z3_Non_Commercial_Declined + + +local + val accepted = member (op =) ["yes", "Yes", "YES"] + val declined = member (op =) ["no", "No", "NO"] +in + +fun z3_non_commercial () = + let + val flag2 = getenv "OLD_Z3_NON_COMMERCIAL" + in + if accepted flag2 then Z3_Non_Commercial_Accepted + else if declined flag2 then Z3_Non_Commercial_Declined + else Z3_Non_Commercial_Unknown + end + +fun if_z3_non_commercial f = + (case z3_non_commercial () of + Z3_Non_Commercial_Accepted => f () + | Z3_Non_Commercial_Declined => + error (Pretty.string_of (Pretty.para + "The SMT solver Z3 may only be used for non-commercial applications.")) + | Z3_Non_Commercial_Unknown => + error + (Pretty.string_of + (Pretty.para + ("The SMT solver Z3 is not activated. To activate it, set the Isabelle \ + \system option \"z3_non_commercial\" to \"yes\" (e.g. via \ + \the Isabelle/jEdit menu Plugin Options / Isabelle / General).")) ^ + "\n\nSee also " ^ Url.print (Url.explode "http://z3.codeplex.com/license"))) + +end + + +val z3_with_extensions = + Attrib.setup_config_bool @{binding old_z3_with_extensions} (K false) + +local + fun z3_make_command name () = if_z3_non_commercial (make_command name) + + fun z3_options ctxt = + ["-rs:" ^ string_of_int (Config.get ctxt Old_SMT_Config.random_seed), + "MODEL=true", + "SOFT_TIMEOUT=" ^ + string_of_int (Real.ceil (1000.0 * Config.get ctxt Old_SMT_Config.timeout)), + "-smt"] + |> not (Config.get ctxt Old_SMT_Config.oracle) ? + append ["DISPLAY_PROOF=true", "PROOF_MODE=2"] + + fun z3_on_first_or_last_line solver_name lines = + let + fun junk l = + if String.isPrefix "WARNING: Out of allocated virtual memory" l + then raise Old_SMT_Failure.SMT Old_SMT_Failure.Out_Of_Memory + else + String.isPrefix "WARNING" l orelse + String.isPrefix "ERROR" l orelse + forall Symbol.is_ascii_blank (Symbol.explode l) + val lines = filter_out junk lines + fun outcome split = + the_default ("", []) (try split lines) + |>> outcome_of "unsat" "sat" "unknown" solver_name + in + (* Starting with version 4.0, Z3 puts the outcome on the first line of the + output rather than on the last line. *) + outcome (fn lines => (hd lines, tl lines)) + handle Old_SMT_Failure.SMT _ => outcome (swap o split_last) + end + + fun select_class ctxt = + if Config.get ctxt z3_with_extensions then Old_Z3_Interface.smtlib_z3C + else Old_SMTLIB_Interface.smtlibC +in + +val z3: Old_SMT_Solver.solver_config = { + name = "z3", + class = select_class, + avail = make_avail "Z3", + command = z3_make_command "Z3", + options = z3_options, + default_max_relevant = 350 (* FUDGE *), + supports_filter = true, + outcome = z3_on_first_or_last_line, + cex_parser = SOME Old_Z3_Model.parse_counterex, + reconstruct = SOME Old_Z3_Proof_Reconstruction.reconstruct } + +end + + +(* overall setup *) + +val setup = + Old_SMT_Solver.add_solver cvc3 #> + Old_SMT_Solver.add_solver yices #> + Old_SMT_Solver.add_solver z3 + +end diff --git a/src/test-gen/Old_SMT/old_smt_solver.ML b/src/test-gen/Old_SMT/old_smt_solver.ML new file mode 100644 index 0000000..da7b8e4 --- /dev/null +++ b/src/test-gen/Old_SMT/old_smt_solver.ML @@ -0,0 +1,374 @@ +(* Title: HOL/Library/Old_SMT/old_smt_solver.ML + Author: Sascha Boehme, TU Muenchen + +SMT solvers registry and SMT tactic. +*) + +signature OLD_SMT_SOLVER = +sig + (*configuration*) + datatype outcome = Unsat | Sat | Unknown + type solver_config = { + name: string, + class: Proof.context -> Old_SMT_Utils.class, + avail: unit -> bool, + command: unit -> string list, + options: Proof.context -> string list, + default_max_relevant: int, + supports_filter: bool, + outcome: string -> string list -> outcome * string list, + cex_parser: (Proof.context -> Old_SMT_Translate.recon -> string list -> + term list * term list) option, + reconstruct: (Proof.context -> Old_SMT_Translate.recon -> string list -> + int list * thm) option } + + (*registry*) + val add_solver: solver_config -> theory -> theory + val solver_name_of: Proof.context -> string + val available_solvers_of: Proof.context -> string list + val apply_solver: Proof.context -> (int * (int option * thm)) list -> + int list * thm + val default_max_relevant: Proof.context -> string -> int + + (*filter*) + type 'a smt_filter_data = + ('a * thm) list * ((int * thm) list * Proof.context) + val smt_filter_preprocess: Proof.context -> thm list -> thm -> + ('a * (int option * thm)) list -> int -> 'a smt_filter_data + val smt_filter_apply: Time.time -> 'a smt_filter_data -> + {outcome: Old_SMT_Failure.failure option, used_facts: ('a * thm) list} + + (*tactic*) + val smt_tac: Proof.context -> thm list -> int -> tactic + val smt_tac': Proof.context -> thm list -> int -> tactic +end + +structure Old_SMT_Solver: OLD_SMT_SOLVER = +struct + + +(* interface to external solvers *) + +local + +fun make_cmd command options problem_path proof_path = + space_implode " " + ("(exec 2>&1;" :: map Bash.string (command () @ options) @ + [File.bash_path problem_path, ")", ">", File.bash_path proof_path]) + +fun trace_and ctxt msg f x = + let val _ = Old_SMT_Config.trace_msg ctxt (fn () => msg) () + in f x end + +fun run ctxt name mk_cmd input = + (case Old_SMT_Config.certificates_of ctxt of + NONE => + if not (Old_SMT_Config.is_available ctxt name) then + error ("The SMT solver " ^ quote name ^ " is not installed.") + else if Config.get ctxt Old_SMT_Config.debug_files = "" then + trace_and ctxt ("Invoking SMT solver " ^ quote name ^ " ...") + (Cache_IO.run mk_cmd) input + else + let + val base_path = Path.explode (Config.get ctxt Old_SMT_Config.debug_files) + val in_path = Path.ext "smt_in" base_path + val out_path = Path.ext "smt_out" base_path + in Cache_IO.raw_run mk_cmd input in_path out_path end + | SOME certs => + (case Cache_IO.lookup certs input of + (NONE, key) => + if Config.get ctxt Old_SMT_Config.read_only_certificates then + error ("Bad certificate cache: missing certificate") + else + Cache_IO.run_and_cache certs key mk_cmd input + | (SOME output, _) => + trace_and ctxt ("Using cached certificate from " ^ + Path.print (Cache_IO.cache_path_of certs) ^ " ...") + I output)) + +fun run_solver ctxt name mk_cmd input = + let + fun pretty tag ls = Pretty.string_of (Pretty.big_list tag + (map Pretty.str ls)) + + val _ = Old_SMT_Config.trace_msg ctxt (pretty "Problem:" o split_lines) input + + val {redirected_output=res, output=err, return_code} = + Old_SMT_Config.with_timeout ctxt (run ctxt name mk_cmd) input + val _ = Old_SMT_Config.trace_msg ctxt (pretty "Solver:") err + + val ls = fst (take_suffix (equal "") res) + val _ = Old_SMT_Config.trace_msg ctxt (pretty "Result:") ls + + val _ = return_code <> 0 andalso + raise Old_SMT_Failure.SMT (Old_SMT_Failure.Abnormal_Termination return_code) + in ls end + +fun trace_assms ctxt = + Old_SMT_Config.trace_msg ctxt (Pretty.string_of o + Pretty.big_list "Assertions:" o map (Thm.pretty_thm ctxt o snd)) + +fun trace_recon_data ({context=ctxt, typs, terms, ...} : Old_SMT_Translate.recon) = + let + fun pretty_eq n p = Pretty.block [Pretty.str n, Pretty.str " = ", p] + fun p_typ (n, T) = pretty_eq n (Syntax.pretty_typ ctxt T) + fun p_term (n, t) = pretty_eq n (Syntax.pretty_term ctxt t) + in + Old_SMT_Config.trace_msg ctxt (fn () => + Pretty.string_of (Pretty.big_list "Names:" [ + Pretty.big_list "sorts:" (map p_typ (Symtab.dest typs)), + Pretty.big_list "functions:" (map p_term (Symtab.dest terms))])) () + end + +in + +fun invoke name command ithms ctxt = + let + val options = Old_SMT_Config.solver_options_of ctxt + val comments = ("solver: " ^ name) :: + ("timeout: " ^ string_of_real (Config.get ctxt Old_SMT_Config.timeout)) :: + ("random seed: " ^ + string_of_int (Config.get ctxt Old_SMT_Config.random_seed)) :: + "arguments:" :: options + + val (str, recon as {context=ctxt', ...}) = + ithms + |> tap (trace_assms ctxt) + |> Old_SMT_Translate.translate ctxt comments + ||> tap trace_recon_data + in (run_solver ctxt' name (make_cmd command options) str, recon) end + +end + + +(* configuration *) + +datatype outcome = Unsat | Sat | Unknown + +type solver_config = { + name: string, + class: Proof.context -> Old_SMT_Utils.class, + avail: unit -> bool, + command: unit -> string list, + options: Proof.context -> string list, + default_max_relevant: int, + supports_filter: bool, + outcome: string -> string list -> outcome * string list, + cex_parser: (Proof.context -> Old_SMT_Translate.recon -> string list -> + term list * term list) option, + reconstruct: (Proof.context -> Old_SMT_Translate.recon -> string list -> + int list * thm) option } + + +(* registry *) + +type solver_info = { + command: unit -> string list, + default_max_relevant: int, + supports_filter: bool, + reconstruct: Proof.context -> string list * Old_SMT_Translate.recon -> + int list * thm } + +structure Solvers = Generic_Data +( + type T = solver_info Symtab.table + val empty = Symtab.empty + val extend = I + fun merge data = Symtab.merge (K true) data +) + +local + fun finish outcome cex_parser reconstruct ocl outer_ctxt + (output, (recon as {context=ctxt, ...} : Old_SMT_Translate.recon)) = + (case outcome output of + (Unsat, ls) => + if not (Config.get ctxt Old_SMT_Config.oracle) andalso is_some reconstruct + then the reconstruct outer_ctxt recon ls + else ([], ocl ()) + | (result, ls) => + let + val (ts, us) = + (case cex_parser of SOME f => f ctxt recon ls | _ => ([], [])) + in + raise Old_SMT_Failure.SMT (Old_SMT_Failure.Counterexample { + is_real_cex = (result = Sat), + free_constraints = ts, + const_defs = us}) + end) + + val cfalse = Thm.cterm_of @{context} (@{const Trueprop} $ @{const False}) +in + +fun add_solver cfg = + let + val {name, class, avail, command, options, default_max_relevant, + supports_filter, outcome, cex_parser, reconstruct} = cfg + + fun core_oracle () = cfalse + + fun solver ocl = { + command = command, + default_max_relevant = default_max_relevant, + supports_filter = supports_filter, + reconstruct = finish (outcome name) cex_parser reconstruct ocl } + + val info = {name=name, class=class, avail=avail, options=options} + in + Thm.add_oracle (Binding.name name, core_oracle) #-> (fn (_, ocl) => + Context.theory_map (Solvers.map (Symtab.update_new (name, solver ocl)))) #> + Context.theory_map (Old_SMT_Config.add_solver info) + end + +end + +fun get_info ctxt name = + the (Symtab.lookup (Solvers.get (Context.Proof ctxt)) name) + +val solver_name_of = Old_SMT_Config.solver_of + +val available_solvers_of = Old_SMT_Config.available_solvers_of + +fun name_and_info_of ctxt = + let val name = solver_name_of ctxt + in (name, get_info ctxt name) end + +fun gen_preprocess ctxt iwthms = Old_SMT_Normalize.normalize iwthms ctxt + +fun gen_apply (ithms, ctxt) = + let val (name, {command, reconstruct, ...}) = name_and_info_of ctxt + in + (ithms, ctxt) + |-> invoke name command + |> reconstruct ctxt + |>> distinct (op =) + end + +fun apply_solver ctxt = gen_apply o gen_preprocess ctxt + +val default_max_relevant = #default_max_relevant oo get_info + +val supports_filter = #supports_filter o snd o name_and_info_of + + +(* check well-sortedness *) + +val has_topsort = Term.exists_type (Term.exists_subtype (fn + TFree (_, []) => true + | TVar (_, []) => true + | _ => false)) + +(* without this test, we would run into problems when atomizing the rules: *) +fun check_topsort ctxt thm = + if has_topsort (Thm.prop_of thm) then + (Old_SMT_Normalize.drop_fact_warning ctxt thm; TrueI) + else + thm + +fun check_topsorts ctxt iwthms = map (apsnd (apsnd (check_topsort ctxt))) iwthms + + +(* filter *) + +val cnot = Thm.cterm_of @{context} @{const Not} + +fun mk_result outcome xrules = { outcome = outcome, used_facts = xrules } + +type 'a smt_filter_data = ('a * thm) list * ((int * thm) list * Proof.context) + +fun smt_filter_preprocess ctxt facts goal xwthms i = + let + val ctxt = + ctxt + |> Config.put Old_SMT_Config.oracle false + |> Config.put Old_SMT_Config.filter_only_facts true + + val ({context=ctxt', prems, concl, ...}, _) = Subgoal.focus ctxt i NONE goal + fun negate ct = Thm.dest_comb ct ||> Thm.apply cnot |-> Thm.apply + val cprop = + (case try negate (Thm.rhs_of (Old_SMT_Normalize.atomize_conv ctxt' concl)) of + SOME ct => ct + | NONE => raise Old_SMT_Failure.SMT (Old_SMT_Failure.Other_Failure ( + "goal is not a HOL term"))) + in + map snd xwthms + |> map_index I + |> append (map (pair ~1 o pair NONE) (Thm.assume cprop :: prems @ facts)) + |> check_topsorts ctxt' + |> gen_preprocess ctxt' + |> pair (map (apsnd snd) xwthms) + end + +fun smt_filter_apply time_limit (xthms, (ithms, ctxt)) = + let + val ctxt' = + ctxt + |> Config.put Old_SMT_Config.timeout (Time.toReal time_limit) + + fun filter_thms false = K xthms + | filter_thms true = map_filter (try (nth xthms)) o fst + in + (ithms, ctxt') + |> gen_apply + |> filter_thms (supports_filter ctxt') + |> mk_result NONE + end + handle Old_SMT_Failure.SMT fail => mk_result (SOME fail) [] + + +(* SMT tactic *) + +local + fun trace_assumptions ctxt iwthms idxs = + let + val wthms = + idxs + |> filter (fn i => i >= 0) + |> map_filter (AList.lookup (op =) iwthms) + in + if Config.get ctxt Old_SMT_Config.trace_used_facts andalso length wthms > 0 + then + tracing (Pretty.string_of (Pretty.big_list "SMT used facts:" + (map (Thm.pretty_thm ctxt o snd) wthms))) + else () + end + + fun solve ctxt iwthms = + iwthms + |> check_topsorts ctxt + |> apply_solver ctxt + |>> trace_assumptions ctxt iwthms + |> snd + + fun str_of ctxt fail = + Old_SMT_Failure.string_of_failure ctxt fail + |> prefix ("Solver " ^ Old_SMT_Config.solver_of ctxt ^ ": ") + + fun safe_solve ctxt iwthms = SOME (solve ctxt iwthms) + handle + Old_SMT_Failure.SMT (fail as Old_SMT_Failure.Counterexample _) => + (Old_SMT_Config.verbose_msg ctxt (str_of ctxt) fail; NONE) + | Old_SMT_Failure.SMT (fail as Old_SMT_Failure.Time_Out) => + error ("SMT: Solver " ^ quote (Old_SMT_Config.solver_of ctxt) ^ ": " ^ + Old_SMT_Failure.string_of_failure ctxt fail ^ " (setting the " ^ + "configuration option " ^ quote (Config.name_of Old_SMT_Config.timeout) ^ " might help)") + | Old_SMT_Failure.SMT fail => error (str_of ctxt fail) + + fun tag_rules thms = map_index (apsnd (pair NONE)) thms + fun tag_prems thms = map (pair ~1 o pair NONE) thms + + fun resolve ctxt (SOME thm) = resolve_tac ctxt [thm] 1 + | resolve _ NONE = no_tac + + fun tac prove ctxt rules = + CONVERSION (Old_SMT_Normalize.atomize_conv ctxt) + THEN' resolve_tac ctxt @{thms ccontr} + THEN' SUBPROOF (fn {context = ctxt', prems, ...} => + resolve ctxt' (prove ctxt' (tag_rules rules @ tag_prems prems))) ctxt +in + +val smt_tac = tac safe_solve +val smt_tac' = tac (SOME oo solve) + +end + +end diff --git a/src/test-gen/Old_SMT/old_smt_translate.ML b/src/test-gen/Old_SMT/old_smt_translate.ML new file mode 100644 index 0000000..ab4a2a2 --- /dev/null +++ b/src/test-gen/Old_SMT/old_smt_translate.ML @@ -0,0 +1,589 @@ +(* Title: HOL/Library/Old_SMT/old_smt_translate.ML + Author: Sascha Boehme, TU Muenchen + +Translate theorems into an SMT intermediate format and serialize them. +*) + +signature OLD_SMT_TRANSLATE = +sig + (*intermediate term structure*) + datatype squant = SForall | SExists + datatype 'a spattern = SPat of 'a list | SNoPat of 'a list + datatype sterm = + SVar of int | + SApp of string * sterm list | + SLet of string * sterm * sterm | + SQua of squant * string list * sterm spattern list * int option * sterm + + (*translation configuration*) + type prefixes = {sort_prefix: string, func_prefix: string} + type sign = { + header: string list, + sorts: string list, + dtyps: (string * (string * (string * string) list) list) list list, + funcs: (string * (string list * string)) list } + type config = { + prefixes: prefixes, + header: term list -> string list, + is_fol: bool, + has_datatypes: bool, + serialize: string list -> sign -> sterm list -> string } + type recon = { + context: Proof.context, + typs: typ Symtab.table, + terms: term Symtab.table, + rewrite_rules: thm list, + assms: (int * thm) list } + + (*translation*) + val add_config: Old_SMT_Utils.class * (Proof.context -> config) -> + Context.generic -> Context.generic + val translate: Proof.context -> string list -> (int * thm) list -> + string * recon +end + +structure Old_SMT_Translate: OLD_SMT_TRANSLATE = +struct + + +(* intermediate term structure *) + +datatype squant = SForall | SExists + +datatype 'a spattern = SPat of 'a list | SNoPat of 'a list + +datatype sterm = + SVar of int | + SApp of string * sterm list | + SLet of string * sterm * sterm | + SQua of squant * string list * sterm spattern list * int option * sterm + + + +(* translation configuration *) + +type prefixes = {sort_prefix: string, func_prefix: string} + +type sign = { + header: string list, + sorts: string list, + dtyps: (string * (string * (string * string) list) list) list list, + funcs: (string * (string list * string)) list } + +type config = { + prefixes: prefixes, + header: term list -> string list, + is_fol: bool, + has_datatypes: bool, + serialize: string list -> sign -> sterm list -> string } + +type recon = { + context: Proof.context, + typs: typ Symtab.table, + terms: term Symtab.table, + rewrite_rules: thm list, + assms: (int * thm) list } + + + +(* translation context *) + +fun make_tr_context {sort_prefix, func_prefix} = + (sort_prefix, 1, Typtab.empty, func_prefix, 1, Termtab.empty) + +fun string_of_index pre i = pre ^ string_of_int i + +fun add_typ T proper (cx as (sp, Tidx, typs, fp, idx, terms)) = + (case Typtab.lookup typs T of + SOME (n, _) => (n, cx) + | NONE => + let + val n = string_of_index sp Tidx + val typs' = Typtab.update (T, (n, proper)) typs + in (n, (sp, Tidx+1, typs', fp, idx, terms)) end) + +fun add_fun t sort (cx as (sp, Tidx, typs, fp, idx, terms)) = + (case Termtab.lookup terms t of + SOME (n, _) => (n, cx) + | NONE => + let + val n = string_of_index fp idx + val terms' = Termtab.update (t, (n, sort)) terms + in (n, (sp, Tidx, typs, fp, idx+1, terms')) end) + +fun sign_of header dtyps (_, _, typs, _, _, terms) = { + header = header, + sorts = Typtab.fold (fn (_, (n, true)) => cons n | _ => I) typs [], + dtyps = dtyps, + funcs = Termtab.fold (fn (_, (n, SOME ss)) => cons (n,ss) | _ => I) terms []} + +fun recon_of ctxt rules thms ithms (_, _, typs, _, _, terms) = + let + fun add_typ (T, (n, _)) = Symtab.update (n, T) + val typs' = Typtab.fold add_typ typs Symtab.empty + + fun add_fun (t, (n, _)) = Symtab.update (n, t) + val terms' = Termtab.fold add_fun terms Symtab.empty + + val assms = map (pair ~1) thms @ ithms + in + {context=ctxt, typs=typs', terms=terms', rewrite_rules=rules, assms=assms} + end + + + +(* preprocessing *) + +(** datatype declarations **) + +fun collect_datatypes_and_records (tr_context, ctxt) ts = + let + val (declss, ctxt') = + fold (Term.fold_types Old_SMT_Datatypes.add_decls) ts ([], ctxt) + + fun is_decl_typ T = exists (exists (equal T o fst)) declss + + fun add_typ' T proper = + (case Old_SMT_Builtin.dest_builtin_typ ctxt' T of + SOME n => pair n + | NONE => add_typ T proper) + + fun tr_select sel = + let val T = Term.range_type (Term.fastype_of sel) + in add_fun sel NONE ##>> add_typ' T (not (is_decl_typ T)) end + fun tr_constr (constr, selects) = + add_fun constr NONE ##>> fold_map tr_select selects + fun tr_typ (T, cases) = add_typ' T false ##>> fold_map tr_constr cases + val (declss', tr_context') = fold_map (fold_map tr_typ) declss tr_context + + fun add (constr, selects) = + Termtab.update (constr, length selects) #> + fold (Termtab.update o rpair 1) selects + val funcs = fold (fold (fold add o snd)) declss Termtab.empty + in ((funcs, declss', tr_context', ctxt'), ts) end + (* FIXME: also return necessary datatype and record theorems *) + + +(** eta-expand quantifiers, let expressions and built-ins *) + +local + fun eta f T t = Abs (Name.uu, T, f (Term.incr_boundvars 1 t $ Bound 0)) + + fun exp f T = eta f (Term.domain_type (Term.domain_type T)) + + fun exp2 T q = + let val U = Term.domain_type T + in Abs (Name.uu, U, q $ eta I (Term.domain_type U) (Bound 0)) end + + fun exp2' T l = + let val (U1, U2) = Term.dest_funT T ||> Term.domain_type + in Abs (Name.uu, U1, eta I U2 (l $ Bound 0)) end + + fun expf k i T t = + let val Ts = drop i (fst (Old_SMT_Utils.dest_funT k T)) + in + Term.incr_boundvars (length Ts) t + |> fold_rev (fn i => fn u => u $ Bound i) (0 upto length Ts - 1) + |> fold_rev (fn T => fn u => Abs (Name.uu, T, u)) Ts + end +in + +fun eta_expand ctxt is_fol funcs = + let + fun exp_func t T ts = + (case Termtab.lookup funcs t of + SOME k => + Term.list_comb (t, ts) + |> k <> length ts ? expf k (length ts) T + | NONE => Term.list_comb (t, ts)) + + fun expand ((q as Const (@{const_name All}, _)) $ Abs a) = q $ abs_expand a + | expand ((q as Const (@{const_name All}, T)) $ t) = q $ exp expand T t + | expand (q as Const (@{const_name All}, T)) = exp2 T q + | expand ((q as Const (@{const_name Ex}, _)) $ Abs a) = q $ abs_expand a + | expand ((q as Const (@{const_name Ex}, T)) $ t) = q $ exp expand T t + | expand (q as Const (@{const_name Ex}, T)) = exp2 T q + | expand ((l as Const (@{const_name Let}, _)) $ t $ Abs a) = + if is_fol then expand (Term.betapply (Abs a, t)) + else l $ expand t $ abs_expand a + | expand ((l as Const (@{const_name Let}, T)) $ t $ u) = + if is_fol then expand (u $ t) + else l $ expand t $ exp expand (Term.range_type T) u + | expand ((l as Const (@{const_name Let}, T)) $ t) = + if is_fol then + let val U = Term.domain_type (Term.range_type T) + in Abs (Name.uu, U, Bound 0 $ Term.incr_boundvars 1 t) end + else exp2 T (l $ expand t) + | expand (l as Const (@{const_name Let}, T)) = + if is_fol then + let val U = Term.domain_type (Term.range_type T) + in + Abs (Name.uu, Term.domain_type T, Abs (Name.uu, U, + Bound 0 $ Bound 1)) + end + else exp2' T l + | expand t = + (case Term.strip_comb t of + (u as Const (c as (_, T)), ts) => + (case Old_SMT_Builtin.dest_builtin ctxt c ts of + SOME (_, k, us, mk) => + if k = length us then mk (map expand us) + else if k < length us then + chop k (map expand us) |>> mk |> Term.list_comb + else expf k (length ts) T (mk (map expand us)) + | NONE => exp_func u T (map expand ts)) + | (u as Free (_, T), ts) => exp_func u T (map expand ts) + | (Abs a, ts) => Term.list_comb (abs_expand a, map expand ts) + | (u, ts) => Term.list_comb (u, map expand ts)) + + and abs_expand (n, T, t) = Abs (n, T, expand t) + + in map expand end + +end + + +(** introduce explicit applications **) + +local + (* + Make application explicit for functions with varying number of arguments. + *) + + fun add t i = apfst (Termtab.map_default (t, i) (Integer.min i)) + fun add_type T = apsnd (Typtab.update (T, ())) + + fun min_arities t = + (case Term.strip_comb t of + (u as Const _, ts) => add u (length ts) #> fold min_arities ts + | (u as Free _, ts) => add u (length ts) #> fold min_arities ts + | (Abs (_, T, u), ts) => add_type T #> min_arities u #> fold min_arities ts + | (_, ts) => fold min_arities ts) + + fun minimize types t i = + let + fun find_min j [] _ = j + | find_min j (U :: Us) T = + if Typtab.defined types T then j + else find_min (j + 1) Us (U --> T) + + val (Ts, T) = Term.strip_type (Term.type_of t) + in find_min 0 (take i (rev Ts)) T end + + fun app u (t, T) = + (Const (@{const_name fun_app}, T --> T) $ t $ u, Term.range_type T) + + fun apply i t T ts = + let + val (ts1, ts2) = chop i ts + val (_, U) = Old_SMT_Utils.dest_funT i T + in fst (fold app ts2 (Term.list_comb (t, ts1), U)) end +in + +fun intro_explicit_application ctxt funcs ts = + let + val (arities, types) = fold min_arities ts (Termtab.empty, Typtab.empty) + val arities' = Termtab.map (minimize types) arities + + fun app_func t T ts = + if is_some (Termtab.lookup funcs t) then Term.list_comb (t, ts) + else apply (the (Termtab.lookup arities' t)) t T ts + + fun in_list T f t = HOLogic.mk_list T (map f (HOLogic.dest_list t)) + + fun traverse Ts t = + (case Term.strip_comb t of + (q as Const (@{const_name All}, _), [Abs (x, T, u)]) => + q $ Abs (x, T, in_trigger (T :: Ts) u) + | (q as Const (@{const_name Ex}, _), [Abs (x, T, u)]) => + q $ Abs (x, T, in_trigger (T :: Ts) u) + | (q as Const (@{const_name Let}, _), [u1, u2 as Abs _]) => + q $ traverse Ts u1 $ traverse Ts u2 + | (u as Const (c as (_, T)), ts) => + (case Old_SMT_Builtin.dest_builtin ctxt c ts of + SOME (_, k, us, mk) => + let + val (ts1, ts2) = chop k (map (traverse Ts) us) + val U = Term.strip_type T |>> snd o chop k |> (op --->) + in apply 0 (mk ts1) U ts2 end + | NONE => app_func u T (map (traverse Ts) ts)) + | (u as Free (_, T), ts) => app_func u T (map (traverse Ts) ts) + | (u as Bound i, ts) => apply 0 u (nth Ts i) (map (traverse Ts) ts) + | (Abs (n, T, u), ts) => traverses Ts (Abs (n, T, traverse (T::Ts) u)) ts + | (u, ts) => traverses Ts u ts) + and in_trigger Ts ((c as @{const trigger}) $ p $ t) = + c $ in_pats Ts p $ in_weight Ts t + | in_trigger Ts t = in_weight Ts t + and in_pats Ts ps = + in_list @{typ "pattern list"} + (in_list @{typ pattern} (in_pat Ts)) ps + and in_pat Ts ((p as Const (@{const_name pat}, _)) $ t) = + p $ traverse Ts t + | in_pat Ts ((p as Const (@{const_name nopat}, _)) $ t) = + p $ traverse Ts t + | in_pat _ t = raise TERM ("bad pattern", [t]) + and in_weight Ts ((c as @{const weight}) $ w $ t) = + c $ w $ traverse Ts t + | in_weight Ts t = traverse Ts t + and traverses Ts t ts = Term.list_comb (t, map (traverse Ts) ts) + in map (traverse []) ts end + +val fun_app_eq = mk_meta_eq @{thm fun_app_def} + +end + + +(** map HOL formulas to FOL formulas (i.e., separate formulas froms terms) **) + +local + val term_bool = @{lemma "term_true ~= term_false" + by (simp add: term_true_def term_false_def)} + + val is_quant = member (op =) [@{const_name All}, @{const_name Ex}] + + val fol_rules = [ + Let_def, + mk_meta_eq @{thm term_true_def}, + mk_meta_eq @{thm term_false_def}, + @{lemma "P = True == P" by (rule eq_reflection) simp}, + @{lemma "if P then True else False == P" by (rule eq_reflection) simp}] + + fun as_term t = @{const HOL.eq (bool)} $ t $ @{const term_true} + + exception BAD_PATTERN of unit + + fun wrap_in_if pat t = + if pat then + raise BAD_PATTERN () + else + @{const If (bool)} $ t $ @{const term_true} $ @{const term_false} + + fun is_builtin_conn_or_pred ctxt c ts = + is_some (Old_SMT_Builtin.dest_builtin_conn ctxt c ts) orelse + is_some (Old_SMT_Builtin.dest_builtin_pred ctxt c ts) + + fun builtin b ctxt c ts = + (case (Const c, ts) of + (@{const HOL.eq (bool)}, [t, u]) => + if t = @{const term_true} orelse u = @{const term_true} then + Old_SMT_Builtin.dest_builtin_eq ctxt t u + else b ctxt c ts + | _ => b ctxt c ts) +in + +fun folify ctxt = + let + fun in_list T f t = HOLogic.mk_list T (map_filter f (HOLogic.dest_list t)) + + fun in_term pat t = + (case Term.strip_comb t of + (@{const True}, []) => @{const term_true} + | (@{const False}, []) => @{const term_false} + | (u as Const (@{const_name If}, _), [t1, t2, t3]) => + if pat then raise BAD_PATTERN () + else u $ in_form t1 $ in_term pat t2 $ in_term pat t3 + | (Const (c as (n, _)), ts) => + if is_builtin_conn_or_pred ctxt c ts then wrap_in_if pat (in_form t) + else if is_quant n then wrap_in_if pat (in_form t) + else Term.list_comb (Const c, map (in_term pat) ts) + | (Free c, ts) => Term.list_comb (Free c, map (in_term pat) ts) + | _ => t) + + and in_weight ((c as @{const weight}) $ w $ t) = c $ w $ in_form t + | in_weight t = in_form t + + and in_pat ((p as Const (@{const_name pat}, _)) $ t) = + p $ in_term true t + | in_pat ((p as Const (@{const_name nopat}, _)) $ t) = + p $ in_term true t + | in_pat t = raise TERM ("bad pattern", [t]) + + and in_pats ps = + in_list @{typ "pattern list"} + (SOME o in_list @{typ pattern} (try in_pat)) ps + + and in_trigger ((c as @{const trigger}) $ p $ t) = + c $ in_pats p $ in_weight t + | in_trigger t = in_weight t + + and in_form t = + (case Term.strip_comb t of + (q as Const (qn, _), [Abs (n, T, u)]) => + if is_quant qn then q $ Abs (n, T, in_trigger u) + else as_term (in_term false t) + | (Const c, ts) => + (case Old_SMT_Builtin.dest_builtin_conn ctxt c ts of + SOME (_, _, us, mk) => mk (map in_form us) + | NONE => + (case Old_SMT_Builtin.dest_builtin_pred ctxt c ts of + SOME (_, _, us, mk) => mk (map (in_term false) us) + | NONE => as_term (in_term false t))) + | _ => as_term (in_term false t)) + in + map in_form #> + cons (Old_SMT_Utils.prop_of term_bool) #> + pair (fol_rules, [term_bool], builtin) + end + +end + + +(* translation into intermediate format *) + +(** utility functions **) + +val quantifier = (fn + @{const_name All} => SOME SForall + | @{const_name Ex} => SOME SExists + | _ => NONE) + +fun group_quant qname Ts (t as Const (q, _) $ Abs (_, T, u)) = + if q = qname then group_quant qname (T :: Ts) u else (Ts, t) + | group_quant _ Ts t = (Ts, t) + +fun dest_weight (@{const weight} $ w $ t) = + (SOME (snd (HOLogic.dest_number w)), t) + | dest_weight t = (NONE, t) + +fun dest_pat (Const (@{const_name pat}, _) $ t) = (t, true) + | dest_pat (Const (@{const_name nopat}, _) $ t) = (t, false) + | dest_pat t = raise TERM ("bad pattern", [t]) + +fun dest_pats [] = I + | dest_pats ts = + (case map dest_pat ts |> split_list ||> distinct (op =) of + (ps, [true]) => cons (SPat ps) + | (ps, [false]) => cons (SNoPat ps) + | _ => raise TERM ("bad multi-pattern", ts)) + +fun dest_trigger (@{const trigger} $ tl $ t) = + (rev (fold (dest_pats o HOLogic.dest_list) (HOLogic.dest_list tl) []), t) + | dest_trigger t = ([], t) + +fun dest_quant qn T t = quantifier qn |> Option.map (fn q => + let + val (Ts, u) = group_quant qn [T] t + val (ps, p) = dest_trigger u + val (w, b) = dest_weight p + in (q, rev Ts, ps, w, b) end) + +fun fold_map_pat f (SPat ts) = fold_map f ts #>> SPat + | fold_map_pat f (SNoPat ts) = fold_map f ts #>> SNoPat + + +(** translation from Isabelle terms into SMT intermediate terms **) + +fun intermediate header dtyps builtin ctxt ts trx = + let + fun transT (T as TFree _) = add_typ T true + | transT (T as TVar _) = (fn _ => raise TYPE ("bad SMT type", [T], [])) + | transT (T as Type _) = + (case Old_SMT_Builtin.dest_builtin_typ ctxt T of + SOME n => pair n + | NONE => add_typ T true) + + fun app n ts = SApp (n, ts) + + fun trans t = + (case Term.strip_comb t of + (Const (qn, _), [Abs (_, T, t1)]) => + (case dest_quant qn T t1 of + SOME (q, Ts, ps, w, b) => + fold_map transT Ts ##>> fold_map (fold_map_pat trans) ps ##>> + trans b #>> (fn ((Ts', ps'), b') => SQua (q, Ts', ps', w, b')) + | NONE => raise TERM ("unsupported quantifier", [t])) + | (Const (@{const_name Let}, _), [t1, Abs (_, T, t2)]) => + transT T ##>> trans t1 ##>> trans t2 #>> + (fn ((U, u1), u2) => SLet (U, u1, u2)) + | (u as Const (c as (_, T)), ts) => + (case builtin ctxt c ts of + SOME (n, _, us, _) => fold_map trans us #>> app n + | NONE => transs u T ts) + | (u as Free (_, T), ts) => transs u T ts + | (Bound i, []) => pair (SVar i) + | _ => raise TERM ("bad SMT term", [t])) + + and transs t T ts = + let val (Us, U) = Old_SMT_Utils.dest_funT (length ts) T + in + fold_map transT Us ##>> transT U #-> (fn Up => + add_fun t (SOME Up) ##>> fold_map trans ts #>> SApp) + end + + val (us, trx') = fold_map trans ts trx + in ((sign_of (header ts) dtyps trx', us), trx') end + + + +(* translation *) + +structure Configs = Generic_Data +( + type T = (Proof.context -> config) Old_SMT_Utils.dict + val empty = [] + val extend = I + fun merge data = Old_SMT_Utils.dict_merge fst data +) + +fun add_config (cs, cfg) = Configs.map (Old_SMT_Utils.dict_update (cs, cfg)) + +fun get_config ctxt = + let val cs = Old_SMT_Config.solver_class_of ctxt + in + (case Old_SMT_Utils.dict_get (Configs.get (Context.Proof ctxt)) cs of + SOME cfg => cfg ctxt + | NONE => error ("SMT: no translation configuration found " ^ + "for solver class " ^ quote (Old_SMT_Utils.string_of_class cs))) + end + +fun translate ctxt comments ithms = + let + val {prefixes, is_fol, header, has_datatypes, serialize} = get_config ctxt + + val with_datatypes = + has_datatypes andalso Config.get ctxt Old_SMT_Config.datatypes + + fun no_dtyps (tr_context, ctxt) ts = + ((Termtab.empty, [], tr_context, ctxt), ts) + + val ts1 = map (Envir.beta_eta_contract o Old_SMT_Utils.prop_of o snd) ithms + + val ((funcs, dtyps, tr_context, ctxt1), ts2) = + ((make_tr_context prefixes, ctxt), ts1) + |-> (if with_datatypes then collect_datatypes_and_records else no_dtyps) + + fun is_binder (Const (@{const_name Let}, _) $ _) = true + | is_binder t = Lambda_Lifting.is_quantifier t + + fun mk_trigger ((q as Const (@{const_name All}, _)) $ Abs (n, T, t)) = + q $ Abs (n, T, mk_trigger t) + | mk_trigger (eq as (Const (@{const_name HOL.eq}, T) $ lhs $ _)) = + Term.domain_type T --> @{typ pattern} + |> (fn T => Const (@{const_name pat}, T) $ lhs) + |> HOLogic.mk_list @{typ pattern} o single + |> HOLogic.mk_list @{typ "pattern list"} o single + |> (fn t => @{const trigger} $ t $ eq) + | mk_trigger t = t + + val (ctxt2, ts3) = + ts2 + |> eta_expand ctxt1 is_fol funcs + |> rpair ctxt1 + |-> Lambda_Lifting.lift_lambdas NONE is_binder + |-> (fn (ts', defs) => fn ctxt' => + map mk_trigger defs @ ts' + |> intro_explicit_application ctxt' funcs + |> pair ctxt') + + val ((rewrite_rules, extra_thms, builtin), ts4) = + (if is_fol then folify ctxt2 else pair ([], [], I)) ts3 + + val rewrite_rules' = fun_app_eq :: rewrite_rules + in + (ts4, tr_context) + |-> intermediate header dtyps (builtin Old_SMT_Builtin.dest_builtin) ctxt2 + |>> uncurry (serialize comments) + ||> recon_of ctxt2 rewrite_rules' extra_thms ithms + end + +end diff --git a/src/test-gen/Old_SMT/old_smt_utils.ML b/src/test-gen/Old_SMT/old_smt_utils.ML new file mode 100644 index 0000000..8603f1a --- /dev/null +++ b/src/test-gen/Old_SMT/old_smt_utils.ML @@ -0,0 +1,221 @@ +(* Title: HOL/Library/Old_SMT/old_smt_utils.ML + Author: Sascha Boehme, TU Muenchen + +General utility functions. +*) + +signature OLD_SMT_UTILS = +sig + (*basic combinators*) + val repeat: ('a -> 'a option) -> 'a -> 'a + val repeat_yield: ('a -> 'b -> ('a * 'b) option) -> 'a -> 'b -> 'a * 'b + + (*class dictionaries*) + type class = string list + val basicC: class + val string_of_class: class -> string + type 'a dict = (class * 'a) Ord_List.T + val dict_map_default: class * 'a -> ('a -> 'a) -> 'a dict -> 'a dict + val dict_update: class * 'a -> 'a dict -> 'a dict + val dict_merge: ('a * 'a -> 'a) -> 'a dict * 'a dict -> 'a dict + val dict_lookup: 'a dict -> class -> 'a list + val dict_get: 'a dict -> class -> 'a option + + (*types*) + val dest_funT: int -> typ -> typ list * typ + + (*terms*) + val dest_conj: term -> term * term + val dest_disj: term -> term * term + val under_quant: (term -> 'a) -> term -> 'a + val is_number: term -> bool + + (*patterns and instantiations*) + val mk_const_pat: theory -> string -> (ctyp -> 'a) -> 'a * cterm + val destT1: ctyp -> ctyp + val destT2: ctyp -> ctyp + val instTs: ctyp list -> ctyp list * cterm -> cterm + val instT: ctyp -> ctyp * cterm -> cterm + val instT': cterm -> ctyp * cterm -> cterm + + (*certified terms*) + val dest_cabs: cterm -> Proof.context -> cterm * Proof.context + val dest_all_cabs: cterm -> Proof.context -> cterm * Proof.context + val dest_cbinder: cterm -> Proof.context -> cterm * Proof.context + val dest_all_cbinders: cterm -> Proof.context -> cterm * Proof.context + val mk_cprop: cterm -> cterm + val dest_cprop: cterm -> cterm + val mk_cequals: cterm -> cterm -> cterm + val term_of: cterm -> term + val prop_of: thm -> term + + (*conversions*) + val if_conv: (term -> bool) -> conv -> conv -> conv + val if_true_conv: (term -> bool) -> conv -> conv + val if_exists_conv: (term -> bool) -> conv -> conv + val binders_conv: (Proof.context -> conv) -> Proof.context -> conv + val under_quant_conv: (Proof.context * cterm list -> conv) -> + Proof.context -> conv + val prop_conv: conv -> conv +end + +structure Old_SMT_Utils: OLD_SMT_UTILS = +struct + +(* basic combinators *) + +fun repeat f = + let fun rep x = (case f x of SOME y => rep y | NONE => x) + in rep end + +fun repeat_yield f = + let fun rep x y = (case f x y of SOME (x', y') => rep x' y' | NONE => (x, y)) + in rep end + + +(* class dictionaries *) + +type class = string list + +val basicC = [] + +fun string_of_class [] = "basic" + | string_of_class cs = "basic." ^ space_implode "." cs + +type 'a dict = (class * 'a) Ord_List.T + +fun class_ord ((cs1, _), (cs2, _)) = + rev_order (list_ord fast_string_ord (cs1, cs2)) + +fun dict_insert (cs, x) d = + if AList.defined (op =) d cs then d + else Ord_List.insert class_ord (cs, x) d + +fun dict_map_default (cs, x) f = + dict_insert (cs, x) #> AList.map_entry (op =) cs f + +fun dict_update (e as (_, x)) = dict_map_default e (K x) + +fun dict_merge val_merge = sort class_ord o AList.join (op =) (K val_merge) + +fun dict_lookup d cs = + let fun match (cs', x) = if is_prefix (op =) cs' cs then SOME x else NONE + in map_filter match d end + +fun dict_get d cs = + (case AList.lookup (op =) d cs of + NONE => (case cs of [] => NONE | _ => dict_get d (take (length cs - 1) cs)) + | SOME x => SOME x) + + +(* types *) + +val dest_funT = + let + fun dest Ts 0 T = (rev Ts, T) + | dest Ts i (Type ("fun", [T, U])) = dest (T::Ts) (i-1) U + | dest _ _ T = raise TYPE ("not a function type", [T], []) + in dest [] end + + +(* terms *) + +fun dest_conj (@{const HOL.conj} $ t $ u) = (t, u) + | dest_conj t = raise TERM ("not a conjunction", [t]) + +fun dest_disj (@{const HOL.disj} $ t $ u) = (t, u) + | dest_disj t = raise TERM ("not a disjunction", [t]) + +fun under_quant f t = + (case t of + Const (@{const_name All}, _) $ Abs (_, _, u) => under_quant f u + | Const (@{const_name Ex}, _) $ Abs (_, _, u) => under_quant f u + | _ => f t) + +val is_number = + let + fun is_num env (Const (@{const_name If}, _) $ _ $ t $ u) = + is_num env t andalso is_num env u + | is_num env (Const (@{const_name Let}, _) $ t $ Abs (_, _, u)) = + is_num (t :: env) u + | is_num env (Bound i) = i < length env andalso is_num env (nth env i) + | is_num _ t = can HOLogic.dest_number t + in is_num [] end + + +(* patterns and instantiations *) + +fun mk_const_pat thy name destT = + let val cpat = Thm.global_cterm_of thy (Const (name, Sign.the_const_type thy name)) + in (destT (Thm.ctyp_of_cterm cpat), cpat) end + +val destT1 = hd o Thm.dest_ctyp +val destT2 = hd o tl o Thm.dest_ctyp + +fun instTs cUs (cTs, ct) = Thm.instantiate_cterm (map (dest_TVar o Thm.typ_of) cTs ~~ cUs, []) ct +fun instT cU (cT, ct) = instTs [cU] ([cT], ct) +fun instT' ct = instT (Thm.ctyp_of_cterm ct) + + +(* certified terms *) + +fun dest_cabs ct ctxt = + (case Thm.term_of ct of + Abs _ => + let val (n, ctxt') = yield_singleton Variable.variant_fixes Name.uu ctxt + in (snd (Thm.dest_abs (SOME n) ct), ctxt') end + | _ => raise CTERM ("no abstraction", [ct])) + +val dest_all_cabs = repeat_yield (try o dest_cabs) + +fun dest_cbinder ct ctxt = + (case Thm.term_of ct of + Const _ $ Abs _ => dest_cabs (Thm.dest_arg ct) ctxt + | _ => raise CTERM ("not a binder", [ct])) + +val dest_all_cbinders = repeat_yield (try o dest_cbinder) + +val mk_cprop = Thm.apply (Thm.cterm_of @{context} @{const Trueprop}) + +fun dest_cprop ct = + (case Thm.term_of ct of + @{const Trueprop} $ _ => Thm.dest_arg ct + | _ => raise CTERM ("not a property", [ct])) + +val equals = mk_const_pat @{theory} @{const_name Pure.eq} destT1 +fun mk_cequals ct cu = Thm.mk_binop (instT' ct equals) ct cu + +val dest_prop = (fn @{const Trueprop} $ t => t | t => t) +fun term_of ct = dest_prop (Thm.term_of ct) +fun prop_of thm = dest_prop (Thm.prop_of thm) + + +(* conversions *) + +fun if_conv pred cv1 cv2 ct = if pred (Thm.term_of ct) then cv1 ct else cv2 ct + +fun if_true_conv pred cv = if_conv pred cv Conv.all_conv + +fun if_exists_conv pred = if_true_conv (Term.exists_subterm pred) + +fun binders_conv cv ctxt = + Conv.binder_conv (binders_conv cv o snd) ctxt else_conv cv ctxt + +fun under_quant_conv cv ctxt = + let + fun quant_conv inside ctxt cvs ct = + (case Thm.term_of ct of + Const (@{const_name All}, _) $ Abs _ => + Conv.binder_conv (under_conv cvs) ctxt + | Const (@{const_name Ex}, _) $ Abs _ => + Conv.binder_conv (under_conv cvs) ctxt + | _ => if inside then cv (ctxt, cvs) else Conv.all_conv) ct + and under_conv cvs (cv, ctxt) = quant_conv true ctxt (cv :: cvs) + in quant_conv false ctxt [] end + +fun prop_conv cv ct = + (case Thm.term_of ct of + @{const Trueprop} $ _ => Conv.arg_conv cv ct + | _ => raise CTERM ("not a property", [ct])) + +end diff --git a/src/test-gen/Old_SMT/old_smt_word.ML b/src/test-gen/Old_SMT/old_smt_word.ML new file mode 100644 index 0000000..4303aba --- /dev/null +++ b/src/test-gen/Old_SMT/old_smt_word.ML @@ -0,0 +1,146 @@ +(* Title: HOL/Library/Old_SMT/old_smt_word.ML + Author: Sascha Boehme, TU Muenchen + +SMT setup for words. +*) + +structure Old_SMT_Word: sig end = +struct + +open Word_Lib + +(* SMT-LIB logic *) + +fun smtlib_logic ts = + if exists (Term.exists_type (Term.exists_subtype is_wordT)) ts + then SOME "QF_AUFBV" + else NONE + + +(* SMT-LIB builtins *) + +local + val smtlibC = Old_SMTLIB_Interface.smtlibC + + val wordT = @{typ "'a::len word"} + + fun index1 n i = n ^ "[" ^ string_of_int i ^ "]" + fun index2 n i j = n ^ "[" ^ string_of_int i ^ ":" ^ string_of_int j ^ "]" + + fun word_typ (Type (@{type_name word}, [T])) = + Option.map (index1 "BitVec") (try dest_binT T) + | word_typ _ = NONE + + fun word_num (Type (@{type_name word}, [T])) i = + Option.map (index1 ("bv" ^ string_of_int i)) (try dest_binT T) + | word_num _ _ = NONE + + fun if_fixed pred m n T ts = + let val (Us, U) = Term.strip_type T + in + if pred (U, Us) then + SOME (n, length Us, ts, Term.list_comb o pair (Const (m, T))) + else NONE + end + + fun if_fixed_all m = if_fixed (forall (can dest_wordT) o (op ::)) m + fun if_fixed_args m = if_fixed (forall (can dest_wordT) o snd) m + + fun add_word_fun f (t, n) = + let val (m, _) = Term.dest_Const t + in Old_SMT_Builtin.add_builtin_fun smtlibC (Term.dest_Const t, K (f m n)) end + + fun hd2 xs = hd (tl xs) + + fun mk_nat i = @{const nat} $ HOLogic.mk_number @{typ nat} i + + fun dest_nat (@{const nat} $ n) = snd (HOLogic.dest_number n) + | dest_nat t = raise TERM ("not a natural number", [t]) + + fun mk_shift c [t, u] = Const c $ t $ mk_nat (snd (HOLogic.dest_number u)) + | mk_shift c ts = raise TERM ("bad arguments", Const c :: ts) + + fun shift m n T ts = + let val U = Term.domain_type T + in + (case (can dest_wordT U, try (dest_nat o hd2) ts) of + (true, SOME i) => + SOME (n, 2, [hd ts, HOLogic.mk_number U i], mk_shift (m, T)) + | _ => NONE) (* FIXME: also support non-numerical shifts *) + end + + fun mk_extract c i ts = Term.list_comb (Const c, mk_nat i :: ts) + + fun extract m n T ts = + let val U = Term.range_type (Term.range_type T) + in + (case (try (dest_nat o hd) ts, try dest_wordT U) of + (SOME lb, SOME i) => + SOME (index2 n (i + lb - 1) lb, 1, tl ts, mk_extract (m, T) lb) + | _ => NONE) + end + + fun mk_extend c ts = Term.list_comb (Const c, ts) + + fun extend m n T ts = + let val (U1, U2) = Term.dest_funT T + in + (case (try dest_wordT U1, try dest_wordT U2) of + (SOME i, SOME j) => + if j-i >= 0 then SOME (index1 n (j-i), 1, ts, mk_extend (m, T)) + else NONE + | _ => NONE) + end + + fun mk_rotate c i ts = Term.list_comb (Const c, mk_nat i :: ts) + + fun rotate m n T ts = + let val U = Term.domain_type (Term.range_type T) + in + (case (can dest_wordT U, try (dest_nat o hd) ts) of + (true, SOME i) => SOME (index1 n i, 1, tl ts, mk_rotate (m, T) i) + | _ => NONE) + end +in + +val setup_builtins = + Old_SMT_Builtin.add_builtin_typ smtlibC (wordT, word_typ, word_num) #> + fold (add_word_fun if_fixed_all) [ + (@{term "uminus :: 'a::len word => _"}, "bvneg"), + (@{term "plus :: 'a::len word => _"}, "bvadd"), + (@{term "minus :: 'a::len word => _"}, "bvsub"), + (@{term "times :: 'a::len word => _"}, "bvmul"), + (@{term "bitNOT :: 'a::len word => _"}, "bvnot"), + (@{term "bitAND :: 'a::len word => _"}, "bvand"), + (@{term "bitOR :: 'a::len word => _"}, "bvor"), + (@{term "bitXOR :: 'a::len word => _"}, "bvxor"), + (@{term "word_cat :: 'a::len word => _"}, "concat") ] #> + fold (add_word_fun shift) [ + (@{term "shiftl :: 'a::len word => _ "}, "bvshl"), + (@{term "shiftr :: 'a::len word => _"}, "bvlshr"), + (@{term "sshiftr :: 'a::len word => _"}, "bvashr") ] #> + add_word_fun extract + (@{term "slice :: _ => 'a::len word => _"}, "extract") #> + fold (add_word_fun extend) [ + (@{term "ucast :: 'a::len word => _"}, "zero_extend"), + (@{term "scast :: 'a::len word => _"}, "sign_extend") ] #> + fold (add_word_fun rotate) [ + (@{term word_rotl}, "rotate_left"), + (@{term word_rotr}, "rotate_right") ] #> + fold (add_word_fun if_fixed_args) [ + (@{term "less :: 'a::len word => _"}, "bvult"), + (@{term "less_eq :: 'a::len word => _"}, "bvule"), + (@{term word_sless}, "bvslt"), + (@{term word_sle}, "bvsle") ] + +end + + +(* setup *) + +val _ = + Theory.setup + (Context.theory_map + (Old_SMTLIB_Interface.add_logic (20, smtlib_logic) #> setup_builtins)) + +end diff --git a/src/test-gen/Old_SMT/old_smtlib_interface.ML b/src/test-gen/Old_SMT/old_smtlib_interface.ML new file mode 100644 index 0000000..dc00faa --- /dev/null +++ b/src/test-gen/Old_SMT/old_smtlib_interface.ML @@ -0,0 +1,161 @@ +(* Title: HOL/Library/Old_SMT/old_smtlib_interface.ML + Author: Sascha Boehme, TU Muenchen + +Interface to SMT solvers based on the SMT-LIB format. +*) + +signature OLD_SMTLIB_INTERFACE = +sig + val smtlibC: Old_SMT_Utils.class + val add_logic: int * (term list -> string option) -> Context.generic -> + Context.generic + val translate_config: Proof.context -> Old_SMT_Translate.config + val setup: theory -> theory +end + +structure Old_SMTLIB_Interface: OLD_SMTLIB_INTERFACE = +struct + + +val smtlibC = ["smtlib"] + + +(* builtins *) + +local + fun int_num _ i = SOME (string_of_int i) + + fun is_linear [t] = Old_SMT_Utils.is_number t + | is_linear [t, u] = Old_SMT_Utils.is_number t orelse Old_SMT_Utils.is_number u + | is_linear _ = false + + fun times _ _ ts = + let val mk = Term.list_comb o pair @{const times (int)} + in if is_linear ts then SOME ("*", 2, ts, mk) else NONE end +in + +val setup_builtins = + Old_SMT_Builtin.add_builtin_typ smtlibC (@{typ int}, K (SOME "Int"), int_num) #> + fold (Old_SMT_Builtin.add_builtin_fun' smtlibC) [ + (@{const True}, "true"), + (@{const False}, "false"), + (@{const Not}, "not"), + (@{const HOL.conj}, "and"), + (@{const HOL.disj}, "or"), + (@{const HOL.implies}, "implies"), + (@{const HOL.eq (bool)}, "iff"), + (@{const HOL.eq ('a)}, "="), + (@{const If (bool)}, "if_then_else"), + (@{const If ('a)}, "ite"), + (@{const less (int)}, "<"), + (@{const less_eq (int)}, "<="), + (@{const uminus (int)}, "~"), + (@{const plus (int)}, "+"), + (@{const minus (int)}, "-") ] #> + Old_SMT_Builtin.add_builtin_fun smtlibC + (Term.dest_Const @{const times (int)}, times) + +end + + +(* serialization *) + +(** header **) + +fun fst_int_ord ((i1, _), (i2, _)) = int_ord (i1, i2) + +structure Logics = Generic_Data +( + type T = (int * (term list -> string option)) list + val empty = [] + val extend = I + fun merge data = Ord_List.merge fst_int_ord data +) + +fun add_logic pf = Logics.map (Ord_List.insert fst_int_ord pf) + +fun choose_logic ctxt ts = + let + fun choose [] = "AUFLIA" + | choose ((_, f) :: fs) = (case f ts of SOME s => s | NONE => choose fs) + in [":logic " ^ choose (Logics.get (Context.Proof ctxt))] end + + +(** serialization **) + +val add = Buffer.add +fun sep f = add " " #> f +fun enclose l r f = sep (add l #> f #> add r) +val par = enclose "(" ")" +fun app n f = (fn [] => sep (add n) | xs => par (add n #> fold f xs)) +fun line f = f #> add "\n" + +fun var i = add "?v" #> add (string_of_int i) + +fun sterm l (Old_SMT_Translate.SVar i) = sep (var (l - i - 1)) + | sterm l (Old_SMT_Translate.SApp (n, ts)) = app n (sterm l) ts + | sterm _ (Old_SMT_Translate.SLet _) = + raise Fail "SMT-LIB: unsupported let expression" + | sterm l (Old_SMT_Translate.SQua (q, ss, ps, w, t)) = + let + fun quant Old_SMT_Translate.SForall = add "forall" + | quant Old_SMT_Translate.SExists = add "exists" + val vs = map_index (apfst (Integer.add l)) ss + fun var_decl (i, s) = par (var i #> sep (add s)) + val sub = sterm (l + length ss) + fun pat kind ts = sep (add kind #> enclose "{" " }" (fold sub ts)) + fun pats (Old_SMT_Translate.SPat ts) = pat ":pat" ts + | pats (Old_SMT_Translate.SNoPat ts) = pat ":nopat" ts + fun weight NONE = I + | weight (SOME i) = + sep (add ":weight { " #> add (string_of_int i) #> add " }") + in + par (quant q #> fold var_decl vs #> sub t #> fold pats ps #> weight w) + end + +fun ssort sorts = sort fast_string_ord sorts +fun fsort funcs = sort (prod_ord fast_string_ord (K EQUAL)) funcs + +fun sdatatypes decls = + let + fun con (n, []) = sep (add n) + | con (n, sels) = par (add n #> + fold (fn (n, s) => par (add n #> sep (add s))) sels) + fun dtyp (n, decl) = add n #> fold con decl + in line (add ":datatypes " #> par (fold (par o dtyp) decls)) end + +fun serialize comments {header, sorts, dtyps, funcs} ts = + Buffer.empty + |> line (add "(benchmark Isabelle") + |> line (add ":status unknown") + |> fold (line o add) header + |> length sorts > 0 ? + line (add ":extrasorts" #> par (fold (sep o add) (ssort sorts))) + |> fold sdatatypes dtyps + |> length funcs > 0 ? ( + line (add ":extrafuns" #> add " (") #> + fold (fn (f, (ss, s)) => + line (sep (app f (sep o add) (ss @ [s])))) (fsort funcs) #> + line (add ")")) + |> fold (fn t => line (add ":assumption" #> sterm 0 t)) ts + |> line (add ":formula true)") + |> fold (fn str => line (add "; " #> add str)) comments + |> Buffer.content + + +(* interface *) + +fun translate_config ctxt = { + prefixes = { + sort_prefix = "S", + func_prefix = "f"}, + header = choose_logic ctxt, + is_fol = true, + has_datatypes = false, + serialize = serialize} + +val setup = Context.theory_map ( + setup_builtins #> + Old_SMT_Translate.add_config (smtlibC, translate_config)) + +end diff --git a/src/test-gen/Old_SMT/old_z3_interface.ML b/src/test-gen/Old_SMT/old_z3_interface.ML new file mode 100644 index 0000000..ec9f3d6 --- /dev/null +++ b/src/test-gen/Old_SMT/old_z3_interface.ML @@ -0,0 +1,239 @@ +(* Title: HOL/Library/Old_SMT/old_z3_interface.ML + Author: Sascha Boehme, TU Muenchen + +Interface to Z3 based on a relaxed version of SMT-LIB. +*) + +signature OLD_Z3_INTERFACE = +sig + val smtlib_z3C: Old_SMT_Utils.class + val setup: theory -> theory + + datatype sym = Sym of string * sym list + type mk_builtins = { + mk_builtin_typ: sym -> typ option, + mk_builtin_num: theory -> int -> typ -> cterm option, + mk_builtin_fun: theory -> sym -> cterm list -> cterm option } + val add_mk_builtins: mk_builtins -> Context.generic -> Context.generic + val mk_builtin_typ: Proof.context -> sym -> typ option + val mk_builtin_num: Proof.context -> int -> typ -> cterm option + val mk_builtin_fun: Proof.context -> sym -> cterm list -> cterm option + + val is_builtin_theory_term: Proof.context -> term -> bool +end + +structure Old_Z3_Interface: OLD_Z3_INTERFACE = +struct + +val smtlib_z3C = Old_SMTLIB_Interface.smtlibC @ ["z3"] + + + +(* interface *) + +local + fun translate_config ctxt = + let + val {prefixes, header, is_fol, serialize, ...} = + Old_SMTLIB_Interface.translate_config ctxt + in + {prefixes=prefixes, header=header, is_fol=is_fol, serialize=serialize, + has_datatypes=true} + end + + fun is_div_mod @{const divide (int)} = true + | is_div_mod @{const modulo (int)} = true + | is_div_mod _ = false + + val div_by_z3div = @{lemma + "ALL k l. k div l = ( + if k = 0 | l = 0 then 0 + else if (0 < k & 0 < l) | (k < 0 & 0 < l) then z3div k l + else z3div (-k) (-l))" + by (simp add: z3div_def)} + + val mod_by_z3mod = @{lemma + "ALL k l. k mod l = ( + if l = 0 then k + else if k = 0 then 0 + else if (0 < k & 0 < l) | (k < 0 & 0 < l) then z3mod k l + else - z3mod (-k) (-l))" + by (simp add: z3mod_def)} + + val have_int_div_mod = + exists (Term.exists_subterm is_div_mod o Thm.prop_of) + + fun add_div_mod _ (thms, extra_thms) = + if have_int_div_mod thms orelse have_int_div_mod extra_thms then + (thms, div_by_z3div :: mod_by_z3mod :: extra_thms) + else (thms, extra_thms) + + val setup_builtins = + Old_SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const times (int)}, "*") #> + Old_SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const z3div}, "div") #> + Old_SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const z3mod}, "mod") +in + +val setup = Context.theory_map ( + setup_builtins #> + Old_SMT_Normalize.add_extra_norm (smtlib_z3C, add_div_mod) #> + Old_SMT_Translate.add_config (smtlib_z3C, translate_config)) + +end + + + +(* constructors *) + +datatype sym = Sym of string * sym list + + +(** additional constructors **) + +type mk_builtins = { + mk_builtin_typ: sym -> typ option, + mk_builtin_num: theory -> int -> typ -> cterm option, + mk_builtin_fun: theory -> sym -> cterm list -> cterm option } + +fun chained _ [] = NONE + | chained f (b :: bs) = (case f b of SOME y => SOME y | NONE => chained f bs) + +fun chained_mk_builtin_typ bs sym = + chained (fn {mk_builtin_typ=mk, ...} : mk_builtins => mk sym) bs + +fun chained_mk_builtin_num ctxt bs i T = + let val thy = Proof_Context.theory_of ctxt + in chained (fn {mk_builtin_num=mk, ...} : mk_builtins => mk thy i T) bs end + +fun chained_mk_builtin_fun ctxt bs s cts = + let val thy = Proof_Context.theory_of ctxt + in chained (fn {mk_builtin_fun=mk, ...} : mk_builtins => mk thy s cts) bs end + +fun fst_int_ord ((i1, _), (i2, _)) = int_ord (i1, i2) + +structure Mk_Builtins = Generic_Data +( + type T = (int * mk_builtins) list + val empty = [] + val extend = I + fun merge data = Ord_List.merge fst_int_ord data +) + +fun add_mk_builtins mk = + Mk_Builtins.map (Ord_List.insert fst_int_ord (serial (), mk)) + +fun get_mk_builtins ctxt = map snd (Mk_Builtins.get (Context.Proof ctxt)) + + +(** basic and additional constructors **) + +fun mk_builtin_typ _ (Sym ("Bool", _)) = SOME @{typ bool} + | mk_builtin_typ _ (Sym ("Int", _)) = SOME @{typ int} + | mk_builtin_typ _ (Sym ("bool", _)) = SOME @{typ bool} (*FIXME: legacy*) + | mk_builtin_typ _ (Sym ("int", _)) = SOME @{typ int} (*FIXME: legacy*) + | mk_builtin_typ ctxt sym = chained_mk_builtin_typ (get_mk_builtins ctxt) sym + +fun mk_builtin_num _ i @{typ int} = SOME (Numeral.mk_cnumber @{ctyp int} i) + | mk_builtin_num ctxt i T = + chained_mk_builtin_num ctxt (get_mk_builtins ctxt) i T + +val mk_true = Thm.cterm_of @{context} (@{const Not} $ @{const False}) +val mk_false = Thm.cterm_of @{context} @{const False} +val mk_not = Thm.apply (Thm.cterm_of @{context} @{const Not}) +val mk_implies = Thm.mk_binop (Thm.cterm_of @{context} @{const HOL.implies}) +val mk_iff = Thm.mk_binop (Thm.cterm_of @{context} @{const HOL.eq (bool)}) +val conj = Thm.cterm_of @{context} @{const HOL.conj} +val disj = Thm.cterm_of @{context} @{const HOL.disj} + +fun mk_nary _ cu [] = cu + | mk_nary ct _ cts = uncurry (fold_rev (Thm.mk_binop ct)) (split_last cts) + +val eq = Old_SMT_Utils.mk_const_pat @{theory} @{const_name HOL.eq} Old_SMT_Utils.destT1 +fun mk_eq ct cu = Thm.mk_binop (Old_SMT_Utils.instT' ct eq) ct cu + +val if_term = + Old_SMT_Utils.mk_const_pat @{theory} @{const_name If} + (Old_SMT_Utils.destT1 o Old_SMT_Utils.destT2) +fun mk_if cc ct cu = + Thm.mk_binop (Thm.apply (Old_SMT_Utils.instT' ct if_term) cc) ct cu + +val nil_term = + Old_SMT_Utils.mk_const_pat @{theory} @{const_name Nil} Old_SMT_Utils.destT1 +val cons_term = + Old_SMT_Utils.mk_const_pat @{theory} @{const_name Cons} Old_SMT_Utils.destT1 +fun mk_list cT cts = + fold_rev (Thm.mk_binop (Old_SMT_Utils.instT cT cons_term)) cts + (Old_SMT_Utils.instT cT nil_term) + +val distinct = Old_SMT_Utils.mk_const_pat @{theory} @{const_name distinct} + (Old_SMT_Utils.destT1 o Old_SMT_Utils.destT1) +fun mk_distinct [] = mk_true + | mk_distinct (cts as (ct :: _)) = + Thm.apply (Old_SMT_Utils.instT' ct distinct) + (mk_list (Thm.ctyp_of_cterm ct) cts) + +val access = + Old_SMT_Utils.mk_const_pat @{theory} @{const_name fun_app} Old_SMT_Utils.destT1 +fun mk_access array = Thm.apply (Old_SMT_Utils.instT' array access) array + +val update = Old_SMT_Utils.mk_const_pat @{theory} @{const_name fun_upd} + (Thm.dest_ctyp o Old_SMT_Utils.destT1) +fun mk_update array index value = + let val cTs = Thm.dest_ctyp (Thm.ctyp_of_cterm array) + in + Thm.apply (Thm.mk_binop (Old_SMT_Utils.instTs cTs update) array index) value + end + +val mk_uminus = Thm.apply (Thm.cterm_of @{context} @{const uminus (int)}) +val add = Thm.cterm_of @{context} @{const plus (int)} +val int0 = Numeral.mk_cnumber @{ctyp int} 0 +val mk_sub = Thm.mk_binop (Thm.cterm_of @{context} @{const minus (int)}) +val mk_mul = Thm.mk_binop (Thm.cterm_of @{context} @{const times (int)}) +val mk_div = Thm.mk_binop (Thm.cterm_of @{context} @{const z3div}) +val mk_mod = Thm.mk_binop (Thm.cterm_of @{context} @{const z3mod}) +val mk_lt = Thm.mk_binop (Thm.cterm_of @{context} @{const less (int)}) +val mk_le = Thm.mk_binop (Thm.cterm_of @{context} @{const less_eq (int)}) + +fun mk_builtin_fun ctxt sym cts = + (case (sym, cts) of + (Sym ("true", _), []) => SOME mk_true + | (Sym ("false", _), []) => SOME mk_false + | (Sym ("not", _), [ct]) => SOME (mk_not ct) + | (Sym ("and", _), _) => SOME (mk_nary conj mk_true cts) + | (Sym ("or", _), _) => SOME (mk_nary disj mk_false cts) + | (Sym ("implies", _), [ct, cu]) => SOME (mk_implies ct cu) + | (Sym ("iff", _), [ct, cu]) => SOME (mk_iff ct cu) + | (Sym ("~", _), [ct, cu]) => SOME (mk_iff ct cu) + | (Sym ("xor", _), [ct, cu]) => SOME (mk_not (mk_iff ct cu)) + | (Sym ("if", _), [ct1, ct2, ct3]) => SOME (mk_if ct1 ct2 ct3) + | (Sym ("ite", _), [ct1, ct2, ct3]) => SOME (mk_if ct1 ct2 ct3) (* FIXME: remove *) + | (Sym ("=", _), [ct, cu]) => SOME (mk_eq ct cu) + | (Sym ("distinct", _), _) => SOME (mk_distinct cts) + | (Sym ("select", _), [ca, ck]) => SOME (Thm.apply (mk_access ca) ck) + | (Sym ("store", _), [ca, ck, cv]) => SOME (mk_update ca ck cv) + | _ => + (case (sym, try (Thm.typ_of_cterm o hd) cts, cts) of + (Sym ("+", _), SOME @{typ int}, _) => SOME (mk_nary add int0 cts) + | (Sym ("-", _), SOME @{typ int}, [ct]) => SOME (mk_uminus ct) + | (Sym ("-", _), SOME @{typ int}, [ct, cu]) => SOME (mk_sub ct cu) + | (Sym ("*", _), SOME @{typ int}, [ct, cu]) => SOME (mk_mul ct cu) + | (Sym ("div", _), SOME @{typ int}, [ct, cu]) => SOME (mk_div ct cu) + | (Sym ("mod", _), SOME @{typ int}, [ct, cu]) => SOME (mk_mod ct cu) + | (Sym ("<", _), SOME @{typ int}, [ct, cu]) => SOME (mk_lt ct cu) + | (Sym ("<=", _), SOME @{typ int}, [ct, cu]) => SOME (mk_le ct cu) + | (Sym (">", _), SOME @{typ int}, [ct, cu]) => SOME (mk_lt cu ct) + | (Sym (">=", _), SOME @{typ int}, [ct, cu]) => SOME (mk_le cu ct) + | _ => chained_mk_builtin_fun ctxt (get_mk_builtins ctxt) sym cts)) + + + +(* abstraction *) + +fun is_builtin_theory_term ctxt t = + if Old_SMT_Builtin.is_builtin_num ctxt t then true + else + (case Term.strip_comb t of + (Const c, ts) => Old_SMT_Builtin.is_builtin_fun ctxt c ts + | _ => false) + +end diff --git a/src/test-gen/Old_SMT/old_z3_model.ML b/src/test-gen/Old_SMT/old_z3_model.ML new file mode 100644 index 0000000..b61f104 --- /dev/null +++ b/src/test-gen/Old_SMT/old_z3_model.ML @@ -0,0 +1,337 @@ +(* Title: HOL/Library/Old_SMT/old_z3_model.ML + Author: Sascha Boehme and Philipp Meyer, TU Muenchen + +Parser for counterexamples generated by Z3. +*) + +signature OLD_Z3_MODEL = +sig + val parse_counterex: Proof.context -> Old_SMT_Translate.recon -> string list -> + term list * term list +end + +structure Old_Z3_Model: OLD_Z3_MODEL = +struct + + +(* counterexample expressions *) + +datatype expr = True | False | Number of int * int option | Value of int | + Array of array | App of string * expr list +and array = Fresh of expr | Store of (array * expr) * expr + + +(* parsing *) + +val space = Scan.many Symbol.is_ascii_blank +fun spaced p = p --| space +fun in_parens p = spaced (Scan.$$ "(") |-- p --| spaced (Scan.$$ ")") +fun in_braces p = spaced (Scan.$$ "{") |-- p --| spaced (Scan.$$ "}") + +val digit = (fn + "0" => SOME 0 | "1" => SOME 1 | "2" => SOME 2 | "3" => SOME 3 | + "4" => SOME 4 | "5" => SOME 5 | "6" => SOME 6 | "7" => SOME 7 | + "8" => SOME 8 | "9" => SOME 9 | _ => NONE) + +val nat_num = spaced (Scan.repeat1 (Scan.some digit) >> + (fn ds => fold (fn d => fn i => i * 10 + d) ds 0)) +val int_num = spaced (Scan.optional ($$ "-" >> K (fn i => ~i)) I :|-- + (fn sign => nat_num >> sign)) + +val is_char = Symbol.is_ascii_letter orf Symbol.is_ascii_digit orf + member (op =) (raw_explode "_+*-/%~=<>$&|?!.@^#") +val name = spaced (Scan.many1 is_char >> implode) + +fun $$$ s = spaced (Scan.this_string s) + +fun array_expr st = st |> in_parens ( + $$$ "const" |-- expr >> Fresh || + $$$ "store" |-- array_expr -- expr -- expr >> Store) + +and expr st = st |> ( + $$$ "true" >> K True || + $$$ "false" >> K False || + int_num -- Scan.option ($$$ "/" |-- int_num) >> Number || + $$$ "val!" |-- nat_num >> Value || + name >> (App o rpair []) || + array_expr >> Array || + in_parens (name -- Scan.repeat1 expr) >> App) + +fun args st = ($$$ "->" >> K [] || expr ::: args) st +val args_case = args -- expr +val else_case = $$$ "else" -- $$$ "->" |-- expr >> pair ([] : expr list) + +val func = + let fun cases st = (else_case >> single || args_case ::: cases) st + in in_braces cases end + +val cex = space |-- + Scan.repeat (name --| $$$ "->" -- (func || expr >> (single o pair []))) + +fun resolve terms ((n, k), cases) = + (case Symtab.lookup terms n of + NONE => NONE + | SOME t => SOME ((t, k), cases)) + +fun annotate _ (_, []) = NONE + | annotate terms (n, [([], c)]) = resolve terms ((n, 0), (c, [])) + | annotate _ (_, [_]) = NONE + | annotate terms (n, cases as (args, _) :: _) = + let val (cases', (_, else_case)) = split_last cases + in resolve terms ((n, length args), (else_case, cases')) end + +fun read_cex terms ls = + maps (cons "\n" o raw_explode) ls + |> try (fst o Scan.finite Symbol.stopper cex) + |> the_default [] + |> map_filter (annotate terms) + + +(* translation into terms *) + +fun max_value vs = + let + fun max_val_expr (Value i) = Integer.max i + | max_val_expr (App (_, es)) = fold max_val_expr es + | max_val_expr (Array a) = max_val_array a + | max_val_expr _ = I + + and max_val_array (Fresh e) = max_val_expr e + | max_val_array (Store ((a, e1), e2)) = + max_val_array a #> max_val_expr e1 #> max_val_expr e2 + + fun max_val (_, (ec, cs)) = + max_val_expr ec #> fold (fn (es, e) => fold max_val_expr (e :: es)) cs + + in fold max_val vs ~1 end + +fun with_context terms f vs = fst (fold_map f vs (terms, max_value vs + 1)) + +fun get_term n T es (cx as (terms, next_val)) = + (case Symtab.lookup terms n of + SOME t => ((t, es), cx) + | NONE => + let val t = Var (("skolem", next_val), T) + in ((t, []), (Symtab.update (n, t) terms, next_val + 1)) end) + +fun trans_expr _ True = pair @{const True} + | trans_expr _ False = pair @{const False} + | trans_expr T (Number (i, NONE)) = pair (HOLogic.mk_number T i) + | trans_expr T (Number (i, SOME j)) = + pair (Const (@{const_name divide}, [T, T] ---> T) $ + HOLogic.mk_number T i $ HOLogic.mk_number T j) + | trans_expr T (Value i) = pair (Var (("value", i), T)) + | trans_expr T (Array a) = trans_array T a + | trans_expr T (App (n, es)) = get_term n T es #-> (fn (t, es') => + let val Ts = fst (Old_SMT_Utils.dest_funT (length es') (Term.fastype_of t)) + in + fold_map (uncurry trans_expr) (Ts ~~ es') #>> Term.list_comb o pair t + end) + +and trans_array T a = + let val (dT, rT) = Term.dest_funT T + in + (case a of + Fresh e => trans_expr rT e #>> (fn t => Abs ("x", dT, t)) + | Store ((a', e1), e2) => + trans_array T a' ##>> trans_expr dT e1 ##>> trans_expr rT e2 #>> + (fn ((m, k), v) => + Const (@{const_name fun_upd}, [T, dT, rT] ---> T) $ m $ k $ v)) + end + +fun trans_pattern T ([], e) = trans_expr T e #>> pair [] + | trans_pattern T (arg :: args, e) = + trans_expr (Term.domain_type T) arg ##>> + trans_pattern (Term.range_type T) (args, e) #>> + (fn (arg', (args', e')) => (arg' :: args', e')) + +fun mk_fun_upd T U = Const (@{const_name fun_upd}, [T --> U, T, U, T] ---> U) + +fun mk_update ([], u) _ = u + | mk_update ([t], u) f = + uncurry mk_fun_upd (Term.dest_funT (Term.fastype_of f)) $ f $ t $ u + | mk_update (t :: ts, u) f = + let + val (dT, rT) = Term.dest_funT (Term.fastype_of f) + val (dT', rT') = Term.dest_funT rT + in + mk_fun_upd dT rT $ f $ t $ + mk_update (ts, u) (absdummy dT' (Const ("_", rT'))) + end + +fun mk_lambda Ts (t, pats) = + fold_rev absdummy Ts t |> fold mk_update pats + +fun translate ((t, k), (e, cs)) = + let + val T = Term.fastype_of t + val (Us, U) = Old_SMT_Utils.dest_funT k (Term.fastype_of t) + + fun mk_full_def u' pats = + pats + |> filter_out (fn (_, u) => u aconv u') + |> HOLogic.mk_eq o pair t o mk_lambda Us o pair u' + + fun mk_eq (us, u) = HOLogic.mk_eq (Term.list_comb (t, us), u) + fun mk_eqs u' [] = [HOLogic.mk_eq (t, u')] + | mk_eqs _ pats = map mk_eq pats + in + trans_expr U e ##>> + (if k = 0 then pair [] else fold_map (trans_pattern T) cs) #>> + (fn (u', pats) => (mk_eqs u' pats, mk_full_def u' pats)) + end + + +(* normalization *) + +fun partition_eqs f = + let + fun part t (xs, ts) = + (case try HOLogic.dest_eq t of + SOME (l, r) => (case f l r of SOME x => (x::xs, ts) | _ => (xs, t::ts)) + | NONE => (xs, t :: ts)) + in (fn ts => fold part ts ([], [])) end + +fun first_eq pred = + let + fun part _ [] = NONE + | part us (t :: ts) = + (case try (pred o HOLogic.dest_eq) t of + SOME (SOME lr) => SOME (lr, fold cons us ts) + | _ => part (t :: us) ts) + in (fn ts => part [] ts) end + +fun replace_vars tab = + let + fun repl v = the_default v (AList.lookup (op aconv) tab v) + fun replace (v as Var _) = repl v + | replace (v as Free _) = repl v + | replace t = t + in map (Term.map_aterms replace) end + +fun remove_int_nat_coercions (eqs, defs) = + let + fun mk_nat_num t i = + (case try HOLogic.dest_number i of + SOME (_, n) => SOME (t, HOLogic.mk_number @{typ nat} n) + | NONE => NONE) + fun nat_of (@{const of_nat (int)} $ (t as Var _)) i = mk_nat_num t i + | nat_of (@{const nat} $ i) (t as Var _) = mk_nat_num t i + | nat_of _ _ = NONE + val (nats, eqs') = partition_eqs nat_of eqs + + fun is_coercion t = + (case try HOLogic.dest_eq t of + SOME (@{const of_nat (int)}, _) => true + | SOME (@{const nat}, _) => true + | _ => false) + in apply2 (replace_vars nats) (eqs', filter_out is_coercion defs) end + +fun unfold_funapp (eqs, defs) = + let + fun unfold_app (Const (@{const_name fun_app}, _) $ f $ t) = f $ t + | unfold_app t = t + fun unfold_eq ((eq as Const (@{const_name HOL.eq}, _)) $ t $ u) = + eq $ unfold_app t $ u + | unfold_eq t = t + + fun is_fun_app t = + (case try HOLogic.dest_eq t of + SOME (Const (@{const_name fun_app}, _), _) => true + | _ => false) + + in (map unfold_eq eqs, filter_out is_fun_app defs) end + +val unfold_eqs = + let + val is_ground = not o Term.exists_subterm Term.is_Var + fun is_non_rec (v, t) = not (Term.exists_subterm (equal v) t) + + fun rewr_var (l as Var _, r) = if is_ground r then SOME (l, r) else NONE + | rewr_var (r, l as Var _) = if is_ground r then SOME (l, r) else NONE + | rewr_var _ = NONE + + fun rewr_free' e = if is_non_rec e then SOME e else NONE + fun rewr_free (e as (Free _, _)) = rewr_free' e + | rewr_free (e as (_, Free _)) = rewr_free' (swap e) + | rewr_free _ = NONE + + fun is_trivial (Const (@{const_name HOL.eq}, _) $ t $ u) = t aconv u + | is_trivial _ = false + + fun replace r = replace_vars [r] #> filter_out is_trivial + + fun unfold_vars (es, ds) = + (case first_eq rewr_var es of + SOME (lr, es') => unfold_vars (apply2 (replace lr) (es', ds)) + | NONE => (es, ds)) + + fun unfold_frees ues (es, ds) = + (case first_eq rewr_free es of + SOME (lr, es') => + apply2 (replace lr) (es', ds) + |> unfold_frees (HOLogic.mk_eq lr :: replace lr ues) + | NONE => (ues @ es, ds)) + + in unfold_vars #> unfold_frees [] end + +fun swap_free ((eq as Const (@{const_name HOL.eq}, _)) $ t $ (u as Free _)) = + eq $ u $ t + | swap_free t = t + +fun frees_for_vars ctxt (eqs, defs) = + let + fun fresh_free i T (cx as (frees, ctxt)) = + (case Inttab.lookup frees i of + SOME t => (t, cx) + | NONE => + let + val (n, ctxt') = yield_singleton Variable.variant_fixes "" ctxt + val t = Free (n, T) + in (t, (Inttab.update (i, t) frees, ctxt')) end) + + fun repl_var (Var ((_, i), T)) = fresh_free i T + | repl_var (t $ u) = repl_var t ##>> repl_var u #>> op $ + | repl_var (Abs (n, T, t)) = repl_var t #>> (fn t' => Abs (n, T, t')) + | repl_var t = pair t + in + (Inttab.empty, ctxt) + |> fold_map repl_var eqs + ||>> fold_map repl_var defs + |> fst + end + + +(* overall procedure *) + +val is_free_constraint = Term.exists_subterm (fn Free _ => true | _ => false) + +fun is_free_def (Const (@{const_name HOL.eq}, _) $ Free _ $ _) = true + | is_free_def _ = false + +fun defined tp = + try (apply2 (fst o HOLogic.dest_eq)) tp + |> the_default false o Option.map (op aconv) + +fun add_free_defs free_cs defs = + let val (free_defs, defs') = List.partition is_free_def defs + in (free_cs @ filter_out (member defined free_cs) free_defs, defs') end + +fun is_const_def (Const (@{const_name HOL.eq}, _) $ Const _ $ _) = true + | is_const_def _ = false + +fun parse_counterex ctxt ({terms, ...} : Old_SMT_Translate.recon) ls = + read_cex terms ls + |> with_context terms translate + |> apfst flat o split_list + |> remove_int_nat_coercions + |> unfold_funapp + |> unfold_eqs + |>> map swap_free + |>> filter is_free_constraint + |-> add_free_defs + |> frees_for_vars ctxt + ||> filter is_const_def + +end + diff --git a/src/test-gen/Old_SMT/old_z3_proof_literals.ML b/src/test-gen/Old_SMT/old_z3_proof_literals.ML new file mode 100644 index 0000000..89ce7d1 --- /dev/null +++ b/src/test-gen/Old_SMT/old_z3_proof_literals.ML @@ -0,0 +1,363 @@ +(* Title: HOL/Library/Old_SMT/old_z3_proof_literals.ML + Author: Sascha Boehme, TU Muenchen + +Proof tools related to conjunctions and disjunctions. +*) + +signature OLD_Z3_PROOF_LITERALS = +sig + (*literal table*) + type littab = thm Termtab.table + val make_littab: thm list -> littab + val insert_lit: thm -> littab -> littab + val delete_lit: thm -> littab -> littab + val lookup_lit: littab -> term -> thm option + val get_first_lit: (term -> bool) -> littab -> thm option + + (*rules*) + val true_thm: thm + val rewrite_true: thm + + (*properties*) + val is_conj: term -> bool + val is_disj: term -> bool + val exists_lit: bool -> (term -> bool) -> term -> bool + val negate: cterm -> cterm + + (*proof tools*) + val explode: bool -> bool -> bool -> term list -> thm -> thm list + val join: bool -> littab -> term -> thm + val prove_conj_disj_eq: cterm -> thm +end + +structure Old_Z3_Proof_Literals: OLD_Z3_PROOF_LITERALS = +struct + + + +(* literal table *) + +type littab = thm Termtab.table + +fun make_littab thms = + fold (Termtab.update o `Old_SMT_Utils.prop_of) thms Termtab.empty + +fun insert_lit thm = Termtab.update (`Old_SMT_Utils.prop_of thm) +fun delete_lit thm = Termtab.delete (Old_SMT_Utils.prop_of thm) +fun lookup_lit lits = Termtab.lookup lits +fun get_first_lit f = + Termtab.get_first (fn (t, thm) => if f t then SOME thm else NONE) + + + +(* rules *) + +val true_thm = @{lemma "~False" by simp} +val rewrite_true = @{lemma "True == ~ False" by simp} + + + +(* properties and term operations *) + +val is_neg = (fn @{const Not} $ _ => true | _ => false) +fun is_neg' f = (fn @{const Not} $ t => f t | _ => false) +val is_dneg = is_neg' is_neg +val is_conj = (fn @{const HOL.conj} $ _ $ _ => true | _ => false) +val is_disj = (fn @{const HOL.disj} $ _ $ _ => true | _ => false) + +fun dest_disj_term' f = (fn + @{const Not} $ (@{const HOL.disj} $ t $ u) => SOME (f t, f u) + | _ => NONE) + +val dest_conj_term = (fn @{const HOL.conj} $ t $ u => SOME (t, u) | _ => NONE) +val dest_disj_term = + dest_disj_term' (fn @{const Not} $ t => t | t => @{const Not} $ t) + +fun exists_lit is_conj P = + let + val dest = if is_conj then dest_conj_term else dest_disj_term + fun exists t = P t orelse + (case dest t of + SOME (t1, t2) => exists t1 orelse exists t2 + | NONE => false) + in exists end + +val negate = Thm.apply (Thm.cterm_of @{context} @{const Not}) + + + +(* proof tools *) + +(** explosion of conjunctions and disjunctions **) + +local + val precomp = Old_Z3_Proof_Tools.precompose2 + + fun destc ct = Thm.dest_binop (Thm.dest_arg ct) + val dest_conj1 = precomp destc @{thm conjunct1} + val dest_conj2 = precomp destc @{thm conjunct2} + fun dest_conj_rules t = + dest_conj_term t |> Option.map (K (dest_conj1, dest_conj2)) + + fun destd f ct = f (Thm.dest_binop (Thm.dest_arg (Thm.dest_arg ct))) + val dn1 = apfst Thm.dest_arg and dn2 = apsnd Thm.dest_arg + val dest_disj1 = precomp (destd I) @{lemma "~(P | Q) ==> ~P" by fast} + val dest_disj2 = precomp (destd dn1) @{lemma "~(~P | Q) ==> P" by fast} + val dest_disj3 = precomp (destd I) @{lemma "~(P | Q) ==> ~Q" by fast} + val dest_disj4 = precomp (destd dn2) @{lemma "~(P | ~Q) ==> Q" by fast} + + fun dest_disj_rules t = + (case dest_disj_term' is_neg t of + SOME (true, true) => SOME (dest_disj2, dest_disj4) + | SOME (true, false) => SOME (dest_disj2, dest_disj3) + | SOME (false, true) => SOME (dest_disj1, dest_disj4) + | SOME (false, false) => SOME (dest_disj1, dest_disj3) + | NONE => NONE) + + fun destn ct = [Thm.dest_arg (Thm.dest_arg (Thm.dest_arg ct))] + val dneg_rule = Old_Z3_Proof_Tools.precompose destn @{thm notnotD} +in + +(* + explode a term into literals and collect all rules to be able to deduce + particular literals afterwards +*) +fun explode_term is_conj = + let + val dest = if is_conj then dest_conj_term else dest_disj_term + val dest_rules = if is_conj then dest_conj_rules else dest_disj_rules + + fun add (t, rs) = Termtab.map_default (t, rs) + (fn rs' => if length rs' < length rs then rs' else rs) + + fun explode1 rules t = + (case dest t of + SOME (t1, t2) => + let val (rule1, rule2) = the (dest_rules t) + in + explode1 (rule1 :: rules) t1 #> + explode1 (rule2 :: rules) t2 #> + add (t, rev rules) + end + | NONE => add (t, rev rules)) + + fun explode0 (@{const Not} $ (@{const Not} $ t)) = + Termtab.make [(t, [dneg_rule])] + | explode0 t = explode1 [] t Termtab.empty + + in explode0 end + +(* + extract a literal by applying previously collected rules +*) +fun extract_lit thm rules = fold Old_Z3_Proof_Tools.compose rules thm + + +(* + explode a theorem into its literals +*) +fun explode is_conj full keep_intermediate stop_lits = + let + val dest_rules = if is_conj then dest_conj_rules else dest_disj_rules + val tab = fold (Termtab.update o rpair ()) stop_lits Termtab.empty + + fun explode1 thm = + if Termtab.defined tab (Old_SMT_Utils.prop_of thm) then cons thm + else + (case dest_rules (Old_SMT_Utils.prop_of thm) of + SOME (rule1, rule2) => + explode2 rule1 thm #> + explode2 rule2 thm #> + keep_intermediate ? cons thm + | NONE => cons thm) + + and explode2 dest_rule thm = + if full orelse + exists_lit is_conj (Termtab.defined tab) (Old_SMT_Utils.prop_of thm) + then explode1 (Old_Z3_Proof_Tools.compose dest_rule thm) + else cons (Old_Z3_Proof_Tools.compose dest_rule thm) + + fun explode0 thm = + if not is_conj andalso is_dneg (Old_SMT_Utils.prop_of thm) + then [Old_Z3_Proof_Tools.compose dneg_rule thm] + else explode1 thm [] + + in explode0 end + +end + + + +(** joining of literals to conjunctions or disjunctions **) + +local + fun on_cprem i f thm = f (Thm.cprem_of thm i) + fun on_cprop f thm = f (Thm.cprop_of thm) + fun precomp2 f g thm = (on_cprem 1 f thm, on_cprem 2 g thm, f, g, thm) + fun comp2 (cv1, cv2, f, g, rule) thm1 thm2 = + Thm.instantiate ([], + [(dest_Var (Thm.term_of cv1), on_cprop f thm1), + (dest_Var (Thm.term_of cv2), on_cprop g thm2)]) rule + |> Old_Z3_Proof_Tools.discharge thm1 |> Old_Z3_Proof_Tools.discharge thm2 + + fun d1 ct = Thm.dest_arg ct and d2 ct = Thm.dest_arg (Thm.dest_arg ct) + + val conj_rule = precomp2 d1 d1 @{thm conjI} + fun comp_conj ((_, thm1), (_, thm2)) = comp2 conj_rule thm1 thm2 + + val disj1 = precomp2 d2 d2 @{lemma "~P ==> ~Q ==> ~(P | Q)" by fast} + val disj2 = precomp2 d2 d1 @{lemma "~P ==> Q ==> ~(P | ~Q)" by fast} + val disj3 = precomp2 d1 d2 @{lemma "P ==> ~Q ==> ~(~P | Q)" by fast} + val disj4 = precomp2 d1 d1 @{lemma "P ==> Q ==> ~(~P | ~Q)" by fast} + + fun comp_disj ((false, thm1), (false, thm2)) = comp2 disj1 thm1 thm2 + | comp_disj ((false, thm1), (true, thm2)) = comp2 disj2 thm1 thm2 + | comp_disj ((true, thm1), (false, thm2)) = comp2 disj3 thm1 thm2 + | comp_disj ((true, thm1), (true, thm2)) = comp2 disj4 thm1 thm2 + + fun dest_conj (@{const HOL.conj} $ t $ u) = ((false, t), (false, u)) + | dest_conj t = raise TERM ("dest_conj", [t]) + + val neg = (fn @{const Not} $ t => (true, t) | t => (false, @{const Not} $ t)) + fun dest_disj (@{const Not} $ (@{const HOL.disj} $ t $ u)) = (neg t, neg u) + | dest_disj t = raise TERM ("dest_disj", [t]) + + val precomp = Old_Z3_Proof_Tools.precompose + val dnegE = precomp (single o d2 o d1) @{thm notnotD} + val dnegI = precomp (single o d1) @{lemma "P ==> ~~P" by fast} + fun as_dneg f t = f (@{const Not} $ (@{const Not} $ t)) + + val precomp2 = Old_Z3_Proof_Tools.precompose2 + fun dni f = apsnd f o Thm.dest_binop o f o d1 + val negIffE = precomp2 (dni d1) @{lemma "~(P = (~Q)) ==> Q = P" by fast} + val negIffI = precomp2 (dni I) @{lemma "P = Q ==> ~(Q = (~P))" by fast} + val iff_const = @{const HOL.eq (bool)} + fun as_negIff f (@{const HOL.eq (bool)} $ t $ u) = + f (@{const Not} $ (iff_const $ u $ (@{const Not} $ t))) + | as_negIff _ _ = NONE +in + +fun join is_conj littab t = + let + val comp = if is_conj then comp_conj else comp_disj + val dest = if is_conj then dest_conj else dest_disj + + val lookup = lookup_lit littab + + fun lookup_rule t = + (case t of + @{const Not} $ (@{const Not} $ t) => + (Old_Z3_Proof_Tools.compose dnegI, lookup t) + | @{const Not} $ (@{const HOL.eq (bool)} $ t $ (@{const Not} $ u)) => + (Old_Z3_Proof_Tools.compose negIffI, lookup (iff_const $ u $ t)) + | @{const Not} $ ((eq as Const (@{const_name HOL.eq}, _)) $ t $ u) => + let fun rewr lit = lit COMP @{thm not_sym} + in (rewr, lookup (@{const Not} $ (eq $ u $ t))) end + | _ => + (case as_dneg lookup t of + NONE => (Old_Z3_Proof_Tools.compose negIffE, as_negIff lookup t) + | x => (Old_Z3_Proof_Tools.compose dnegE, x))) + + fun join1 (s, t) = + (case lookup t of + SOME lit => (s, lit) + | NONE => + (case lookup_rule t of + (rewrite, SOME lit) => (s, rewrite lit) + | (_, NONE) => (s, comp (apply2 join1 (dest t))))) + + in snd (join1 (if is_conj then (false, t) else (true, t))) end + +end + + + +(** proving equality of conjunctions or disjunctions **) + +fun iff_intro thm1 thm2 = thm2 COMP (thm1 COMP @{thm iffI}) + +local + val cp1 = @{lemma "(~P) = (~Q) ==> P = Q" by simp} + val cp2 = @{lemma "(~P) = Q ==> P = (~Q)" by fastforce} + val cp3 = @{lemma "P = (~Q) ==> (~P) = Q" by simp} +in +fun contrapos1 prove (ct, cu) = prove (negate ct, negate cu) COMP cp1 +fun contrapos2 prove (ct, cu) = prove (negate ct, Thm.dest_arg cu) COMP cp2 +fun contrapos3 prove (ct, cu) = prove (Thm.dest_arg ct, negate cu) COMP cp3 +end + + +local + val contra_rule = @{lemma "P ==> ~P ==> False" by (rule notE)} + fun contra_left conj thm = + let + val rules = explode_term conj (Old_SMT_Utils.prop_of thm) + fun contra_lits (t, rs) = + (case t of + @{const Not} $ u => Termtab.lookup rules u |> Option.map (pair rs) + | _ => NONE) + in + (case Termtab.lookup rules @{const False} of + SOME rs => extract_lit thm rs + | NONE => + the (Termtab.get_first contra_lits rules) + |> apply2 (extract_lit thm) + |> (fn (nlit, plit) => nlit COMP (plit COMP contra_rule))) + end + + val falseE_v = dest_Var (Thm.term_of (Thm.dest_arg (Thm.dest_arg (Thm.cprop_of @{thm FalseE})))) + fun contra_right ct = Thm.instantiate ([], [(falseE_v, ct)]) @{thm FalseE} +in +fun contradict conj ct = + iff_intro (Old_Z3_Proof_Tools.under_assumption (contra_left conj) ct) + (contra_right ct) +end + + +local + fun prove_eq l r (cl, cr) = + let + fun explode' is_conj = explode is_conj true (l <> r) [] + fun make_tab is_conj thm = make_littab (true_thm :: explode' is_conj thm) + fun prove is_conj ct tab = join is_conj tab (Thm.term_of ct) + + val thm1 = Old_Z3_Proof_Tools.under_assumption (prove r cr o make_tab l) cl + val thm2 = Old_Z3_Proof_Tools.under_assumption (prove l cl o make_tab r) cr + in iff_intro thm1 thm2 end + + datatype conj_disj = CONJ | DISJ | NCON | NDIS + fun kind_of t = + if is_conj t then SOME CONJ + else if is_disj t then SOME DISJ + else if is_neg' is_conj t then SOME NCON + else if is_neg' is_disj t then SOME NDIS + else NONE +in + +fun prove_conj_disj_eq ct = + let val cp as (cl, cr) = Thm.dest_binop (Thm.dest_arg ct) + in + (case (kind_of (Thm.term_of cl), Thm.term_of cr) of + (SOME CONJ, @{const False}) => contradict true cl + | (SOME DISJ, @{const Not} $ @{const False}) => + contrapos2 (contradict false o fst) cp + | (kl, _) => + (case (kl, kind_of (Thm.term_of cr)) of + (SOME CONJ, SOME CONJ) => prove_eq true true cp + | (SOME CONJ, SOME NDIS) => prove_eq true false cp + | (SOME CONJ, _) => prove_eq true true cp + | (SOME DISJ, SOME DISJ) => contrapos1 (prove_eq false false) cp + | (SOME DISJ, SOME NCON) => contrapos2 (prove_eq false true) cp + | (SOME DISJ, _) => contrapos1 (prove_eq false false) cp + | (SOME NCON, SOME NCON) => contrapos1 (prove_eq true true) cp + | (SOME NCON, SOME DISJ) => contrapos3 (prove_eq true false) cp + | (SOME NCON, NONE) => contrapos3 (prove_eq true false) cp + | (SOME NDIS, SOME NDIS) => prove_eq false false cp + | (SOME NDIS, SOME CONJ) => prove_eq false true cp + | (SOME NDIS, NONE) => prove_eq false true cp + | _ => raise CTERM ("prove_conj_disj_eq", [ct]))) + end + +end + +end diff --git a/src/test-gen/Old_SMT/old_z3_proof_methods.ML b/src/test-gen/Old_SMT/old_z3_proof_methods.ML new file mode 100644 index 0000000..c27174d --- /dev/null +++ b/src/test-gen/Old_SMT/old_z3_proof_methods.ML @@ -0,0 +1,149 @@ +(* Title: HOL/Library/Old_SMT/old_z3_proof_methods.ML + Author: Sascha Boehme, TU Muenchen + +Proof methods for Z3 proof reconstruction. +*) + +signature OLD_Z3_PROOF_METHODS = +sig + val prove_injectivity: Proof.context -> cterm -> thm + val prove_ite: Proof.context -> cterm -> thm +end + +structure Old_Z3_Proof_Methods: OLD_Z3_PROOF_METHODS = +struct + + +fun apply tac st = + (case Seq.pull (tac 1 st) of + NONE => raise THM ("tactic failed", 1, [st]) + | SOME (st', _) => st') + + + +(* if-then-else *) + +val pull_ite = mk_meta_eq + @{lemma "f (if P then x else y) = (if P then f x else f y)" by simp} + +fun pull_ites_conv ct = + (Conv.rewr_conv pull_ite then_conv + Conv.binop_conv (Conv.try_conv pull_ites_conv)) ct + +fun prove_ite ctxt = + Old_Z3_Proof_Tools.by_tac ctxt ( + CONVERSION (Conv.arg_conv (Conv.arg1_conv pull_ites_conv)) + THEN' resolve_tac ctxt @{thms refl}) + + + +(* injectivity *) + +local + +val B = @{typ bool} +fun mk_univ T = Const (@{const_name top}, HOLogic.mk_setT T) +fun mk_inj_on T U = + Const (@{const_name inj_on}, (T --> U) --> HOLogic.mk_setT T --> B) +fun mk_inv_into T U = + Const (@{const_name inv_into}, [HOLogic.mk_setT T, T --> U, U] ---> T) + +fun mk_inv_of ctxt ct = + let + val (dT, rT) = Term.dest_funT (Thm.typ_of_cterm ct) + val inv = Thm.cterm_of ctxt (mk_inv_into dT rT) + val univ = Thm.cterm_of ctxt (mk_univ dT) + in Thm.mk_binop inv univ ct end + +fun mk_inj_prop ctxt ct = + let + val (dT, rT) = Term.dest_funT (Thm.typ_of_cterm ct) + val inj = Thm.cterm_of ctxt (mk_inj_on dT rT) + val univ = Thm.cterm_of ctxt (mk_univ dT) + in Old_SMT_Utils.mk_cprop (Thm.mk_binop inj ct univ) end + + +val disjE = @{lemma "~P | Q ==> P ==> Q" by fast} + +fun prove_inj_prop ctxt def lhs = + let + val (ct, ctxt') = Old_SMT_Utils.dest_all_cabs (Thm.rhs_of def) ctxt + val rule = disjE OF [Object_Logic.rulify ctxt' (Thm.assume lhs)] + in + Goal.init (mk_inj_prop ctxt' (Thm.dest_arg ct)) + |> apply (resolve_tac ctxt' @{thms injI}) + |> apply (Tactic.solve_tac ctxt' [rule, rule RS @{thm sym}]) + |> Goal.norm_result ctxt' o Goal.finish ctxt' + |> singleton (Variable.export ctxt' ctxt) + end + +fun prove_rhs ctxt def lhs = + Old_Z3_Proof_Tools.by_tac ctxt ( + CONVERSION (Conv.top_sweep_conv (K (Conv.rewr_conv def)) ctxt) + THEN' REPEAT_ALL_NEW (match_tac ctxt @{thms allI}) + THEN' resolve_tac ctxt [@{thm inv_f_f} OF [prove_inj_prop ctxt def lhs]]) + + +fun expand thm ct = + let + val cpat = Thm.dest_arg (Thm.rhs_of thm) + val (cl, cr) = Thm.dest_binop (Thm.dest_arg (Thm.dest_arg1 ct)) + val thm1 = Thm.instantiate (Thm.match (cpat, cl)) thm + val thm2 = Thm.instantiate (Thm.match (cpat, cr)) thm + in Conv.arg_conv (Conv.binop_conv (Conv.rewrs_conv [thm1, thm2])) ct end + +fun prove_lhs ctxt rhs = + let + val eq = Thm.symmetric (mk_meta_eq (Object_Logic.rulify ctxt (Thm.assume rhs))) + val conv = Old_SMT_Utils.binders_conv (K (expand eq)) ctxt + in + Old_Z3_Proof_Tools.by_tac ctxt ( + CONVERSION (Old_SMT_Utils.prop_conv conv) + THEN' Simplifier.simp_tac (put_simpset HOL_ss ctxt)) + end + + +fun mk_inv_def ctxt rhs = + let + val (ct, ctxt') = + Old_SMT_Utils.dest_all_cbinders (Old_SMT_Utils.dest_cprop rhs) ctxt + val (cl, cv) = Thm.dest_binop ct + val (cg, (cargs, cf)) = Drule.strip_comb cl ||> split_last + val cu = fold_rev Thm.lambda cargs (mk_inv_of ctxt' (Thm.lambda cv cf)) + in Thm.assume (Old_SMT_Utils.mk_cequals cg cu) end + +fun prove_inj_eq ctxt ct = + let + val (lhs, rhs) = + apply2 Old_SMT_Utils.mk_cprop (Thm.dest_binop (Old_SMT_Utils.dest_cprop ct)) + val lhs_thm = Thm.implies_intr rhs (prove_lhs ctxt rhs lhs) + val rhs_thm = + Thm.implies_intr lhs (prove_rhs ctxt (mk_inv_def ctxt rhs) lhs rhs) + in lhs_thm COMP (rhs_thm COMP @{thm iffI}) end + + +val swap_eq_thm = mk_meta_eq @{thm eq_commute} +val swap_disj_thm = mk_meta_eq @{thm disj_commute} + +fun swap_conv dest eq = + Old_SMT_Utils.if_true_conv ((op <) o apply2 Term.size_of_term o dest) + (Conv.rewr_conv eq) + +val swap_eq_conv = swap_conv HOLogic.dest_eq swap_eq_thm +val swap_disj_conv = swap_conv Old_SMT_Utils.dest_disj swap_disj_thm + +fun norm_conv ctxt = + swap_eq_conv then_conv + Conv.arg1_conv (Old_SMT_Utils.binders_conv (K swap_disj_conv) ctxt) then_conv + Conv.arg_conv (Old_SMT_Utils.binders_conv (K swap_eq_conv) ctxt) + +in + +fun prove_injectivity ctxt = + Old_Z3_Proof_Tools.by_tac ctxt ( + CONVERSION (Old_SMT_Utils.prop_conv (norm_conv ctxt)) + THEN' CSUBGOAL (uncurry (resolve_tac ctxt o single o prove_inj_eq ctxt))) + +end + +end diff --git a/src/test-gen/Old_SMT/old_z3_proof_parser.ML b/src/test-gen/Old_SMT/old_z3_proof_parser.ML new file mode 100644 index 0000000..aa44b11 --- /dev/null +++ b/src/test-gen/Old_SMT/old_z3_proof_parser.ML @@ -0,0 +1,446 @@ +(* Title: HOL/Library/Old_SMT/old_z3_proof_parser.ML + Author: Sascha Boehme, TU Muenchen + +Parser for Z3 proofs. +*) + +signature OLD_Z3_PROOF_PARSER = +sig + (*proof rules*) + datatype rule = True_Axiom | Asserted | Goal | Modus_Ponens | Reflexivity | + Symmetry | Transitivity | Transitivity_Star | Monotonicity | Quant_Intro | + Distributivity | And_Elim | Not_Or_Elim | Rewrite | Rewrite_Star | + Pull_Quant | Pull_Quant_Star | Push_Quant | Elim_Unused_Vars | + Dest_Eq_Res | Quant_Inst | Hypothesis | Lemma | Unit_Resolution | + Iff_True | Iff_False | Commutativity | Def_Axiom | Intro_Def | Apply_Def | + Iff_Oeq | Nnf_Pos | Nnf_Neg | Nnf_Star | Cnf_Star | Skolemize | + Modus_Ponens_Oeq | Th_Lemma of string list + val string_of_rule: rule -> string + + (*proof parser*) + datatype proof_step = Proof_Step of { + rule: rule, + args: cterm list, + prems: int list, + prop: cterm } + val parse: Proof.context -> typ Symtab.table -> term Symtab.table -> + string list -> + (int * cterm) list * (int * proof_step) list * string list * Proof.context +end + +structure Old_Z3_Proof_Parser: OLD_Z3_PROOF_PARSER = +struct + + +(* proof rules *) + +datatype rule = True_Axiom | Asserted | Goal | Modus_Ponens | Reflexivity | + Symmetry | Transitivity | Transitivity_Star | Monotonicity | Quant_Intro | + Distributivity | And_Elim | Not_Or_Elim | Rewrite | Rewrite_Star | + Pull_Quant | Pull_Quant_Star | Push_Quant | Elim_Unused_Vars | Dest_Eq_Res | + Quant_Inst | Hypothesis | Lemma | Unit_Resolution | Iff_True | Iff_False | + Commutativity | Def_Axiom | Intro_Def | Apply_Def | Iff_Oeq | Nnf_Pos | + Nnf_Neg | Nnf_Star | Cnf_Star | Skolemize | Modus_Ponens_Oeq | + Th_Lemma of string list + +val rule_names = Symtab.make [ + ("true-axiom", True_Axiom), + ("asserted", Asserted), + ("goal", Goal), + ("mp", Modus_Ponens), + ("refl", Reflexivity), + ("symm", Symmetry), + ("trans", Transitivity), + ("trans*", Transitivity_Star), + ("monotonicity", Monotonicity), + ("quant-intro", Quant_Intro), + ("distributivity", Distributivity), + ("and-elim", And_Elim), + ("not-or-elim", Not_Or_Elim), + ("rewrite", Rewrite), + ("rewrite*", Rewrite_Star), + ("pull-quant", Pull_Quant), + ("pull-quant*", Pull_Quant_Star), + ("push-quant", Push_Quant), + ("elim-unused", Elim_Unused_Vars), + ("der", Dest_Eq_Res), + ("quant-inst", Quant_Inst), + ("hypothesis", Hypothesis), + ("lemma", Lemma), + ("unit-resolution", Unit_Resolution), + ("iff-true", Iff_True), + ("iff-false", Iff_False), + ("commutativity", Commutativity), + ("def-axiom", Def_Axiom), + ("intro-def", Intro_Def), + ("apply-def", Apply_Def), + ("iff~", Iff_Oeq), + ("nnf-pos", Nnf_Pos), + ("nnf-neg", Nnf_Neg), + ("nnf*", Nnf_Star), + ("cnf*", Cnf_Star), + ("sk", Skolemize), + ("mp~", Modus_Ponens_Oeq), + ("th-lemma", Th_Lemma [])] + +fun string_of_rule (Th_Lemma args) = space_implode " " ("th-lemma" :: args) + | string_of_rule r = + let fun eq_rule (s, r') = if r = r' then SOME s else NONE + in the (Symtab.get_first eq_rule rule_names) end + + + +(* certified terms and variables *) + +val (var_prefix, decl_prefix) = ("v", "sk") +(* + "decl_prefix" is for skolem constants (represented by free variables), + "var_prefix" is for pseudo-schematic variables (schematic with respect + to the Z3 proof, but represented by free variables). + + Both prefixes must be distinct to avoid name interferences. + More precisely, the naming of pseudo-schematic variables must be + context-independent modulo the current proof context to be able to + use fast inference kernel rules during proof reconstruction. +*) + +fun mk_inst ctxt vars = + let + val max = fold (Integer.max o fst) vars 0 + val ns = fst (Variable.variant_fixes (replicate (max + 1) var_prefix) ctxt) + fun mk (i, v) = + (dest_Var (Thm.term_of v), Thm.cterm_of ctxt (Free (nth ns i, Thm.typ_of_cterm v))) + in map mk vars end + +fun close ctxt (ct, vars) = + let + val inst = mk_inst ctxt vars + val names = fold (Term.add_free_names o Thm.term_of o snd) inst [] + in (Thm.instantiate_cterm ([], inst) ct, names) end + + +fun mk_bound ctxt (i, T) = + let val ct = Thm.cterm_of ctxt (Var ((Name.uu, 0), T)) + in (ct, [(i, ct)]) end + +local + fun mk_quant1 ctxt q T (ct, vars) = + let + val cv = + (case AList.lookup (op =) vars 0 of + SOME cv => cv + | _ => Thm.cterm_of ctxt (Var ((Name.uu, Thm.maxidx_of_cterm ct + 1), T))) + fun dec (i, v) = if i = 0 then NONE else SOME (i-1, v) + val vars' = map_filter dec vars + in (Thm.apply (Old_SMT_Utils.instT' cv q) (Thm.lambda cv ct), vars') end + + fun quant name = + Old_SMT_Utils.mk_const_pat @{theory} name (Old_SMT_Utils.destT1 o Old_SMT_Utils.destT1) + val forall = quant @{const_name All} + val exists = quant @{const_name Ex} +in + +fun mk_quant is_forall ctxt = + fold_rev (mk_quant1 ctxt (if is_forall then forall else exists)) + +end + +local + fun prep (ct, vars) (maxidx, all_vars) = + let + val maxidx' = maxidx + Thm.maxidx_of_cterm ct + 1 + + fun part (i, cv) = + (case AList.lookup (op =) all_vars i of + SOME cu => apfst (if cu aconvc cv then I else cons (cv, cu)) + | NONE => + let val cv' = Thm.incr_indexes_cterm maxidx cv + in apfst (cons (cv, cv')) #> apsnd (cons (i, cv')) end) + + val (inst, vars') = + if null vars then ([], vars) + else fold part vars ([], []) + + in + (Thm.instantiate_cterm ([], map (apfst (dest_Var o Thm.term_of)) inst) ct, + (maxidx', vars' @ all_vars)) + end +in +fun mk_fun f ts = + let val (cts, (_, vars)) = fold_map prep ts (0, []) + in f cts |> Option.map (rpair vars) end +end + + + +(* proof parser *) + +datatype proof_step = Proof_Step of { + rule: rule, + args: cterm list, + prems: int list, + prop: cterm } + + +(** parser context **) + +val not_false = Thm.cterm_of @{context} (@{const Not} $ @{const False}) + +fun make_context ctxt typs terms = + let + val ctxt' = + ctxt + |> Symtab.fold (Variable.declare_typ o snd) typs + |> Symtab.fold (Variable.declare_term o snd) terms + + fun cert @{const True} = not_false + | cert t = Thm.cterm_of ctxt' t + + in (typs, Symtab.map (K cert) terms, Inttab.empty, [], [], ctxt') end + +fun fresh_name n (typs, terms, exprs, steps, vars, ctxt) = + let val (n', ctxt') = yield_singleton Variable.variant_fixes n ctxt + in (n', (typs, terms, exprs, steps, vars, ctxt')) end + +fun context_of (_, _, _, _, _, ctxt) = ctxt + +fun add_decl (n, T) (cx as (_, terms, _, _, _, _)) = + (case Symtab.lookup terms n of + SOME _ => cx + | NONE => cx |> fresh_name (decl_prefix ^ n) + |> (fn (m, (typs, terms, exprs, steps, vars, ctxt)) => + let + val upd = Symtab.update (n, Thm.cterm_of ctxt (Free (m, T))) + in (typs, upd terms, exprs, steps, vars, ctxt) end)) + +fun mk_typ (typs, _, _, _, _, ctxt) (s as Old_Z3_Interface.Sym (n, _)) = + (case Old_Z3_Interface.mk_builtin_typ ctxt s of + SOME T => SOME T + | NONE => Symtab.lookup typs n) + +fun mk_num (_, _, _, _, _, ctxt) (i, T) = + mk_fun (K (Old_Z3_Interface.mk_builtin_num ctxt i T)) [] + +fun mk_app (_, terms, _, _, _, ctxt) (s as Old_Z3_Interface.Sym (n, _), es) = + mk_fun (fn cts => + (case Old_Z3_Interface.mk_builtin_fun ctxt s cts of + SOME ct => SOME ct + | NONE => + Symtab.lookup terms n |> Option.map (Drule.list_comb o rpair cts))) es + +fun add_expr k t (typs, terms, exprs, steps, vars, ctxt) = + (typs, terms, Inttab.update (k, t) exprs, steps, vars, ctxt) + +fun lookup_expr (_, _, exprs, _, _, _) = Inttab.lookup exprs + +fun add_proof_step k ((r, args), prop) cx = + let + val (typs, terms, exprs, steps, vars, ctxt) = cx + val (ct, vs) = close ctxt prop + fun part (SOME e, _) (cts, ps) = (close ctxt e :: cts, ps) + | part (NONE, i) (cts, ps) = (cts, i :: ps) + val (args', prems) = fold (part o `(lookup_expr cx)) args ([], []) + val (cts, vss) = split_list args' + val step = Proof_Step {rule=r, args=rev cts, prems=rev prems, + prop = Old_SMT_Utils.mk_cprop ct} + val vars' = fold (union (op =)) (vs :: vss) vars + in (typs, terms, exprs, (k, step) :: steps, vars', ctxt) end + +fun finish (_, _, _, steps, vars, ctxt) = + let + fun coll (p as (k, Proof_Step {prems, rule, prop, ...})) (ars, ps, ids) = + (case rule of + Asserted => ((k, prop) :: ars, ps, ids) + | Goal => ((k, prop) :: ars, ps, ids) + | _ => + if Inttab.defined ids k then + (ars, p :: ps, fold (Inttab.update o rpair ()) prems ids) + else (ars, ps, ids)) + + val (ars, steps', _) = fold coll steps ([], [], Inttab.make [(~1, ())]) + in (ars, steps', vars, ctxt) end + + +(** core parser **) + +fun parse_exn line_no msg = raise Old_SMT_Failure.SMT (Old_SMT_Failure.Other_Failure + ("Z3 proof parser (line " ^ string_of_int line_no ^ "): " ^ msg)) + +fun scan_exn msg ((line_no, _), _) = parse_exn line_no msg + +fun with_info f cx = + (case f ((NONE, 1), cx) of + ((SOME _, _), cx') => cx' + | ((_, line_no), _) => parse_exn line_no "bad proof") + +fun parse_line _ _ (st as ((SOME _, _), _)) = st + | parse_line scan line ((_, line_no), cx) = + let val st = ((line_no, cx), raw_explode line) + in + (case Scan.catch (Scan.finite' Symbol.stopper (Scan.option scan)) st of + (SOME r, ((_, cx'), _)) => ((r, line_no+1), cx') + | (NONE, _) => parse_exn line_no ("bad proof line: " ^ quote line)) + end + +fun with_context f x ((line_no, cx), st) = + let val (y, cx') = f x cx + in (y, ((line_no, cx'), st)) end + + +fun lookup_context f x (st as ((_, cx), _)) = (f cx x, st) + + +(** parser combinators and parsers for basic entities **) + +fun $$ s = Scan.lift (Scan.$$ s) +fun this s = Scan.lift (Scan.this_string s) +val is_blank = Symbol.is_ascii_blank +fun blank st = Scan.lift (Scan.many1 is_blank) st +fun sep scan = blank |-- scan +fun seps scan = Scan.repeat (sep scan) +fun seps1 scan = Scan.repeat1 (sep scan) +fun seps_by scan_sep scan = scan ::: Scan.repeat (scan_sep |-- scan) + +val lpar = "(" and rpar = ")" +val lbra = "[" and rbra = "]" +fun par scan = $$ lpar |-- scan --| $$ rpar +fun bra scan = $$ lbra |-- scan --| $$ rbra + +val digit = (fn + "0" => SOME 0 | "1" => SOME 1 | "2" => SOME 2 | "3" => SOME 3 | + "4" => SOME 4 | "5" => SOME 5 | "6" => SOME 6 | "7" => SOME 7 | + "8" => SOME 8 | "9" => SOME 9 | _ => NONE) + +fun digits st = (Scan.lift (Scan.many1 Symbol.is_ascii_digit) >> implode) st + +fun nat_num st = (Scan.lift (Scan.repeat1 (Scan.some digit)) >> (fn ds => + fold (fn d => fn i => i * 10 + d) ds 0)) st + +fun int_num st = (Scan.optional ($$ "-" >> K (fn i => ~i)) I :|-- + (fn sign => nat_num >> sign)) st + +val is_char = Symbol.is_ascii_letter orf Symbol.is_ascii_digit orf + member (op =) (raw_explode "_+*-/%~=<>$&|?!.@^#") + +fun name st = (Scan.lift (Scan.many1 is_char) >> implode) st + +fun sym st = (name -- + Scan.optional (bra (seps_by ($$ ":") sym)) [] >> Old_Z3_Interface.Sym) st + +fun id st = ($$ "#" |-- nat_num) st + + +(** parsers for various parts of Z3 proofs **) + +fun sort st = Scan.first [ + this "array" |-- bra (sort --| $$ ":" -- sort) >> (op -->), + par (this "->" |-- seps1 sort) >> ((op --->) o split_last), + sym :|-- (fn s as Old_Z3_Interface.Sym (n, _) => lookup_context mk_typ s :|-- (fn + SOME T => Scan.succeed T + | NONE => scan_exn ("unknown sort: " ^ quote n)))] st + +fun bound st = (par (this ":var" |-- sep nat_num -- sep sort) :|-- + lookup_context (mk_bound o context_of)) st + +fun numb (n as (i, _)) = lookup_context mk_num n :|-- (fn + SOME n' => Scan.succeed n' + | NONE => scan_exn ("unknown number: " ^ quote (string_of_int i))) + +fun appl (app as (Old_Z3_Interface.Sym (n, _), _)) = + lookup_context mk_app app :|-- (fn + SOME app' => Scan.succeed app' + | NONE => scan_exn ("unknown function symbol: " ^ quote n)) + +fun bv_size st = (digits >> (fn sz => + Old_Z3_Interface.Sym ("bv", [Old_Z3_Interface.Sym (sz, [])]))) st + +fun bv_number_sort st = (bv_size :|-- lookup_context mk_typ :|-- (fn + SOME cT => Scan.succeed cT + | NONE => scan_exn ("unknown sort: " ^ quote "bv"))) st + +fun bv_number st = + (this "bv" |-- bra (nat_num --| $$ ":" -- bv_number_sort) :|-- numb) st + +fun frac_number st = ( + int_num --| $$ "/" -- int_num --| this "::" -- sort :|-- (fn ((i, j), T) => + numb (i, T) -- numb (j, T) :|-- (fn (n, m) => + appl (Old_Z3_Interface.Sym ("/", []), [n, m])))) st + +fun plain_number st = (int_num --| this "::" -- sort :|-- numb) st + +fun number st = Scan.first [bv_number, frac_number, plain_number] st + +fun constant st = ((sym >> rpair []) :|-- appl) st + +fun expr_id st = (id :|-- (fn i => lookup_context lookup_expr i :|-- (fn + SOME e => Scan.succeed e + | NONE => scan_exn ("unknown term id: " ^ quote (string_of_int i))))) st + +fun arg st = Scan.first [expr_id, number, constant] st + +fun application st = par ((sym -- Scan.repeat1 (sep arg)) :|-- appl) st + +fun variables st = par (this "vars" |-- seps1 (par (name |-- sep sort))) st + +fun pats st = seps (par ((this ":pat" || this ":nopat") |-- seps1 id)) st + +val ctrue = Thm.cterm_of @{context} @{const True} + +fun pattern st = par (this "pattern" |-- Scan.repeat1 (sep arg) >> + (the o mk_fun (K (SOME ctrue)))) st + +fun quant_kind st = st |> ( + this "forall" >> K (mk_quant true o context_of) || + this "exists" >> K (mk_quant false o context_of)) + +fun quantifier st = + (par (quant_kind -- sep variables --| pats -- sep arg) :|-- + lookup_context (fn cx => fn ((mk_q, Ts), body) => mk_q cx Ts body)) st + +fun expr k = + Scan.first [bound, quantifier, pattern, application, number, constant] :|-- + with_context (pair NONE oo add_expr k) + +val rule_arg = id + (* if this is modified, then 'th_lemma_arg' needs reviewing *) + +fun th_lemma_arg st = Scan.unless (sep rule_arg >> K "" || $$ rbra) (sep name) st + +fun rule_name st = ((name >> `(Symtab.lookup rule_names)) :|-- (fn + (SOME (Th_Lemma _), _) => Scan.repeat th_lemma_arg >> Th_Lemma + | (SOME r, _) => Scan.succeed r + | (NONE, n) => scan_exn ("unknown proof rule: " ^ quote n))) st + +fun rule f k = + bra (rule_name -- seps id) --| $$ ":" -- sep arg #-> + with_context (pair (f k) oo add_proof_step k) + +fun decl st = (this "decl" |-- sep name --| sep (this "::") -- sep sort :|-- + with_context (pair NONE oo add_decl)) st + +fun def st = (id --| sep (this ":=")) st + +fun node st = st |> ( + decl || + def :|-- (fn k => sep (expr k) || sep (rule (K NONE) k)) || + rule SOME ~1) + + +(** overall parser **) + +(* + Currently, terms are parsed bottom-up (i.e., along with parsing the proof + text line by line), but proofs are reconstructed top-down (i.e. by an + in-order top-down traversal of the proof tree/graph). The latter approach + was taken because some proof texts comprise irrelevant proof steps which + will thus not be reconstructed. This approach might also be beneficial + for constructing terms, but it would also increase the complexity of the + (otherwise rather modular) code. +*) + +fun parse ctxt typs terms proof_text = + make_context ctxt typs terms + |> with_info (fold (parse_line node) proof_text) + |> finish + +end diff --git a/src/test-gen/Old_SMT/old_z3_proof_reconstruction.ML b/src/test-gen/Old_SMT/old_z3_proof_reconstruction.ML new file mode 100644 index 0000000..e2302cd --- /dev/null +++ b/src/test-gen/Old_SMT/old_z3_proof_reconstruction.ML @@ -0,0 +1,891 @@ +(* Title: HOL/Library/Old_SMT/old_z3_proof_reconstruction.ML + Author: Sascha Boehme, TU Muenchen + +Proof reconstruction for proofs found by Z3. +*) + +signature OLD_Z3_PROOF_RECONSTRUCTION = +sig + val add_z3_rule: thm -> Context.generic -> Context.generic + val reconstruct: Proof.context -> Old_SMT_Translate.recon -> string list -> int list * thm +end + +structure Old_Z3_Proof_Reconstruction: OLD_Z3_PROOF_RECONSTRUCTION = +struct + + +fun z3_exn msg = raise Old_SMT_Failure.SMT (Old_SMT_Failure.Other_Failure + ("Z3 proof reconstruction: " ^ msg)) + + + +(* net of schematic rules *) + +local + val description = "declaration of Z3 proof rules" + + val eq = Thm.eq_thm + + structure Old_Z3_Rules = Generic_Data + ( + type T = thm Net.net + val empty = Net.empty + val extend = I + val merge = Net.merge eq + ) + + fun prep context = + `Thm.prop_of o rewrite_rule (Context.proof_of context) [Old_Z3_Proof_Literals.rewrite_true] + + fun ins thm context = + context |> Old_Z3_Rules.map (fn net => Net.insert_term eq (prep context thm) net handle Net.INSERT => net) + fun rem thm context = + context |> Old_Z3_Rules.map (fn net => Net.delete_term eq (prep context thm) net handle Net.DELETE => net) + + val add = Thm.declaration_attribute ins + val del = Thm.declaration_attribute rem +in + +val add_z3_rule = ins + +fun by_schematic_rule ctxt ct = + the (Old_Z3_Proof_Tools.net_instance (Old_Z3_Rules.get (Context.Proof ctxt)) ct) + +val _ = Theory.setup + (Attrib.setup @{binding old_z3_rule} (Attrib.add_del add del) description #> + Global_Theory.add_thms_dynamic (@{binding old_z3_rule}, Net.content o Old_Z3_Rules.get)) + +end + + + +(* proof tools *) + +fun named ctxt name prover ct = + let val _ = Old_SMT_Config.trace_msg ctxt I ("Z3: trying " ^ name ^ " ...") + in prover ct end + +fun NAMED ctxt name tac i st = + let val _ = Old_SMT_Config.trace_msg ctxt I ("Z3: trying " ^ name ^ " ...") + in tac i st end + +fun pretty_goal ctxt thms t = + [Pretty.block [Pretty.str "proposition: ", Syntax.pretty_term ctxt t]] + |> not (null thms) ? cons (Pretty.big_list "assumptions:" + (map (Thm.pretty_thm ctxt) thms)) + +fun try_apply ctxt thms = + let + fun try_apply_err ct = Pretty.string_of (Pretty.chunks [ + Pretty.big_list ("Z3 found a proof," ^ + " but proof reconstruction failed at the following subgoal:") + (pretty_goal ctxt thms (Thm.term_of ct)), + Pretty.str ("Declaring a rule as [old_z3_rule] might solve this problem.")]) + + fun apply [] ct = error (try_apply_err ct) + | apply (prover :: provers) ct = + (case try prover ct of + SOME thm => (Old_SMT_Config.trace_msg ctxt I "Z3: succeeded"; thm) + | NONE => apply provers ct) + + fun schematic_label full = "schematic rules" |> full ? suffix " (full)" + fun schematic ctxt full ct = + ct + |> full ? fold_rev (curry Drule.mk_implies o Thm.cprop_of) thms + |> named ctxt (schematic_label full) (by_schematic_rule ctxt) + |> fold Thm.elim_implies thms + + in apply o cons (schematic ctxt false) o cons (schematic ctxt true) end + +local + val rewr_if = + @{lemma "(if P then Q1 else Q2) = ((P --> Q1) & (~P --> Q2))" by simp} +in + +fun HOL_fast_tac ctxt = + Classical.fast_tac (put_claset HOL_cs ctxt) + +fun simp_fast_tac ctxt = + Simplifier.simp_tac (put_simpset HOL_ss ctxt addsimps [rewr_if]) + THEN_ALL_NEW HOL_fast_tac ctxt + +end + + + +(* theorems and proofs *) + +(** theorem incarnations **) + +datatype theorem = + Thm of thm | (* theorem without special features *) + MetaEq of thm | (* meta equality "t == s" *) + Literals of thm * Old_Z3_Proof_Literals.littab + (* "P1 & ... & Pn" and table of all literals P1, ..., Pn *) + +fun thm_of (Thm thm) = thm + | thm_of (MetaEq thm) = thm COMP @{thm meta_eq_to_obj_eq} + | thm_of (Literals (thm, _)) = thm + +fun meta_eq_of (MetaEq thm) = thm + | meta_eq_of p = mk_meta_eq (thm_of p) + +fun literals_of (Literals (_, lits)) = lits + | literals_of p = Old_Z3_Proof_Literals.make_littab [thm_of p] + + + +(** core proof rules **) + +(* assumption *) + +local + val remove_trigger = mk_meta_eq @{thm trigger_def} + val remove_weight = mk_meta_eq @{thm weight_def} + val remove_fun_app = mk_meta_eq @{thm fun_app_def} + + fun rewrite_conv _ [] = Conv.all_conv + | rewrite_conv ctxt eqs = Simplifier.full_rewrite (empty_simpset ctxt addsimps eqs) + + val prep_rules = [@{thm Let_def}, remove_trigger, remove_weight, + remove_fun_app, Old_Z3_Proof_Literals.rewrite_true] + + fun rewrite _ [] = I + | rewrite ctxt eqs = Conv.fconv_rule (rewrite_conv ctxt eqs) + + fun lookup_assm assms_net ct = + Old_Z3_Proof_Tools.net_instances assms_net ct + |> map (fn ithm as (_, thm) => (ithm, Thm.cprop_of thm aconvc ct)) +in + +fun add_asserted outer_ctxt rewrite_rules assms asserted ctxt = + let + val eqs = map (rewrite ctxt [Old_Z3_Proof_Literals.rewrite_true]) rewrite_rules + val eqs' = union Thm.eq_thm eqs prep_rules + + val assms_net = + assms + |> map (apsnd (rewrite ctxt eqs')) + |> map (apsnd (Conv.fconv_rule Thm.eta_conversion)) + |> Old_Z3_Proof_Tools.thm_net_of snd + + fun revert_conv ctxt = rewrite_conv ctxt eqs' then_conv Thm.eta_conversion + + fun assume thm ctxt = + let + val ct = Thm.cprem_of thm 1 + val (thm', ctxt') = yield_singleton Assumption.add_assumes ct ctxt + in (Thm.implies_elim thm thm', ctxt') end + + fun add1 idx thm1 ((i, th), exact) ((is, thms), (ctxt, ptab)) = + let + val (thm, ctxt') = + if exact then (Thm.implies_elim thm1 th, ctxt) + else assume thm1 ctxt + val thms' = if exact then thms else th :: thms + in + ((insert (op =) i is, thms'), + (ctxt', Inttab.update (idx, Thm thm) ptab)) + end + + fun add (idx, ct) (cx as ((is, thms), (ctxt, ptab))) = + let + val thm1 = + Thm.trivial ct + |> Conv.fconv_rule (Conv.arg1_conv (revert_conv outer_ctxt)) + val thm2 = singleton (Variable.export ctxt outer_ctxt) thm1 + in + (case lookup_assm assms_net (Thm.cprem_of thm2 1) of + [] => + let val (thm, ctxt') = assume thm1 ctxt + in ((is, thms), (ctxt', Inttab.update (idx, Thm thm) ptab)) end + | ithms => fold (add1 idx thm1) ithms cx) + end + in fold add asserted (([], []), (ctxt, Inttab.empty)) end + +end + + +(* P = Q ==> P ==> Q or P --> Q ==> P ==> Q *) +local + val precomp = Old_Z3_Proof_Tools.precompose2 + val comp = Old_Z3_Proof_Tools.compose + + val meta_iffD1 = @{lemma "P == Q ==> P ==> (Q::bool)" by simp} + val meta_iffD1_c = precomp Thm.dest_binop meta_iffD1 + + val iffD1_c = precomp (Thm.dest_binop o Thm.dest_arg) @{thm iffD1} + val mp_c = precomp (Thm.dest_binop o Thm.dest_arg) @{thm mp} +in +fun mp (MetaEq thm) p = Thm (Thm.implies_elim (comp meta_iffD1_c thm) p) + | mp p_q p = + let + val pq = thm_of p_q + val thm = comp iffD1_c pq handle THM _ => comp mp_c pq + in Thm (Thm.implies_elim thm p) end +end + + +(* and_elim: P1 & ... & Pn ==> Pi *) +(* not_or_elim: ~(P1 | ... | Pn) ==> ~Pi *) +local + fun is_sublit conj t = Old_Z3_Proof_Literals.exists_lit conj (fn u => u aconv t) + + fun derive conj t lits idx ptab = + let + val lit = the (Old_Z3_Proof_Literals.get_first_lit (is_sublit conj t) lits) + val ls = Old_Z3_Proof_Literals.explode conj false false [t] lit + val lits' = fold Old_Z3_Proof_Literals.insert_lit ls + (Old_Z3_Proof_Literals.delete_lit lit lits) + + fun upd thm = Literals (thm_of thm, lits') + val ptab' = Inttab.map_entry idx upd ptab + in (the (Old_Z3_Proof_Literals.lookup_lit lits' t), ptab') end + + fun lit_elim conj (p, idx) ct ptab = + let val lits = literals_of p + in + (case Old_Z3_Proof_Literals.lookup_lit lits (Old_SMT_Utils.term_of ct) of + SOME lit => (Thm lit, ptab) + | NONE => apfst Thm (derive conj (Old_SMT_Utils.term_of ct) lits idx ptab)) + end +in +val and_elim = lit_elim true +val not_or_elim = lit_elim false +end + + +(* P1, ..., Pn |- False ==> |- ~P1 | ... | ~Pn *) +local + fun step lit thm = + Thm.implies_elim (Thm.implies_intr (Thm.cprop_of lit) thm) lit + val explode_disj = Old_Z3_Proof_Literals.explode false false false + fun intro hyps thm th = fold step (explode_disj hyps th) thm + + fun dest_ccontr ct = [Thm.dest_arg (Thm.dest_arg (Thm.dest_arg1 ct))] + val ccontr = Old_Z3_Proof_Tools.precompose dest_ccontr @{thm ccontr} +in +fun lemma thm ct = + let + val cu = Old_Z3_Proof_Literals.negate (Thm.dest_arg ct) + val hyps = map_filter (try HOLogic.dest_Trueprop) (Thm.hyps_of thm) + val th = Old_Z3_Proof_Tools.under_assumption (intro hyps thm) cu + in Thm (Old_Z3_Proof_Tools.compose ccontr th) end +end + + +(* \/{P1, ..., Pn, Q1, ..., Qn}, ~P1, ..., ~Pn ==> \/{Q1, ..., Qn} *) +local + val explode_disj = Old_Z3_Proof_Literals.explode false true false + val join_disj = Old_Z3_Proof_Literals.join false + fun unit thm thms th = + let + val t = @{const Not} $ Old_SMT_Utils.prop_of thm + val ts = map Old_SMT_Utils.prop_of thms + in + join_disj (Old_Z3_Proof_Literals.make_littab (thms @ explode_disj ts th)) t + end + + fun dest_arg2 ct = Thm.dest_arg (Thm.dest_arg ct) + fun dest ct = apply2 dest_arg2 (Thm.dest_binop ct) + val contrapos = + Old_Z3_Proof_Tools.precompose2 dest @{lemma "(~P ==> ~Q) ==> Q ==> P" by fast} +in +fun unit_resolution thm thms ct = + Old_Z3_Proof_Literals.negate (Thm.dest_arg ct) + |> Old_Z3_Proof_Tools.under_assumption (unit thm thms) + |> Thm o Old_Z3_Proof_Tools.discharge thm o Old_Z3_Proof_Tools.compose contrapos +end + + +(* P ==> P == True or P ==> P == False *) +local + val iff1 = @{lemma "P ==> P == (~ False)" by simp} + val iff2 = @{lemma "~P ==> P == False" by simp} +in +fun iff_true thm = MetaEq (thm COMP iff1) +fun iff_false thm = MetaEq (thm COMP iff2) +end + + +(* distributivity of | over & *) +fun distributivity ctxt = Thm o try_apply ctxt [] [ + named ctxt "fast" (Old_Z3_Proof_Tools.by_tac ctxt (HOL_fast_tac ctxt))] + (* FIXME: not very well tested *) + + +(* Tseitin-like axioms *) +local + val disjI1 = @{lemma "(P ==> Q) ==> ~P | Q" by fast} + val disjI2 = @{lemma "(~P ==> Q) ==> P | Q" by fast} + val disjI3 = @{lemma "(~Q ==> P) ==> P | Q" by fast} + val disjI4 = @{lemma "(Q ==> P) ==> P | ~Q" by fast} + + fun prove' conj1 conj2 ct2 thm = + let + val littab = + Old_Z3_Proof_Literals.explode conj1 true (conj1 <> conj2) [] thm + |> cons Old_Z3_Proof_Literals.true_thm + |> Old_Z3_Proof_Literals.make_littab + in Old_Z3_Proof_Literals.join conj2 littab (Thm.term_of ct2) end + + fun prove rule (ct1, conj1) (ct2, conj2) = + Old_Z3_Proof_Tools.under_assumption (prove' conj1 conj2 ct2) ct1 COMP rule + + fun prove_def_axiom ct = + let val (ct1, ct2) = Thm.dest_binop (Thm.dest_arg ct) + in + (case Thm.term_of ct1 of + @{const Not} $ (@{const HOL.conj} $ _ $ _) => + prove disjI1 (Thm.dest_arg ct1, true) (ct2, true) + | @{const HOL.conj} $ _ $ _ => + prove disjI3 (Old_Z3_Proof_Literals.negate ct2, false) (ct1, true) + | @{const Not} $ (@{const HOL.disj} $ _ $ _) => + prove disjI3 (Old_Z3_Proof_Literals.negate ct2, false) (ct1, false) + | @{const HOL.disj} $ _ $ _ => + prove disjI2 (Old_Z3_Proof_Literals.negate ct1, false) (ct2, true) + | Const (@{const_name distinct}, _) $ _ => + let + fun dis_conv cv = Conv.arg_conv (Conv.arg1_conv cv) + val unfold_dis_conv = dis_conv Old_Z3_Proof_Tools.unfold_distinct_conv + fun prv cu = + let val (cu1, cu2) = Thm.dest_binop (Thm.dest_arg cu) + in prove disjI4 (Thm.dest_arg cu2, true) (cu1, true) end + in Old_Z3_Proof_Tools.with_conv unfold_dis_conv prv ct end + | @{const Not} $ (Const (@{const_name distinct}, _) $ _) => + let + fun dis_conv cv = Conv.arg_conv (Conv.arg1_conv (Conv.arg_conv cv)) + val unfold_dis_conv = dis_conv Old_Z3_Proof_Tools.unfold_distinct_conv + fun prv cu = + let val (cu1, cu2) = Thm.dest_binop (Thm.dest_arg cu) + in prove disjI1 (Thm.dest_arg cu1, true) (cu2, true) end + in Old_Z3_Proof_Tools.with_conv unfold_dis_conv prv ct end + | _ => raise CTERM ("prove_def_axiom", [ct])) + end +in +fun def_axiom ctxt = Thm o try_apply ctxt [] [ + named ctxt "conj/disj/distinct" prove_def_axiom, + Old_Z3_Proof_Tools.by_abstraction 0 (true, false) ctxt [] (fn ctxt' => + named ctxt' "simp+fast" (Old_Z3_Proof_Tools.by_tac ctxt (simp_fast_tac ctxt')))] +end + + +(* local definitions *) +local + val intro_rules = [ + @{lemma "n == P ==> (~n | P) & (n | ~P)" by simp}, + @{lemma "n == (if P then s else t) ==> (~P | n = s) & (P | n = t)" + by simp}, + @{lemma "n == P ==> n = P" by (rule meta_eq_to_obj_eq)} ] + + val apply_rules = [ + @{lemma "(~n | P) & (n | ~P) ==> P == n" by (atomize(full)) fast}, + @{lemma "(~P | n = s) & (P | n = t) ==> (if P then s else t) == n" + by (atomize(full)) fastforce} ] + + val inst_rule = Old_Z3_Proof_Tools.match_instantiate Thm.dest_arg + + fun apply_rule ct = + (case get_first (try (inst_rule ct)) intro_rules of + SOME thm => thm + | NONE => raise CTERM ("intro_def", [ct])) +in +fun intro_def ct = Old_Z3_Proof_Tools.make_hyp_def (apply_rule ct) #>> Thm + +fun apply_def thm = + get_first (try (fn rule => MetaEq (thm COMP rule))) apply_rules + |> the_default (Thm thm) +end + + +(* negation normal form *) +local + val quant_rules1 = ([ + @{lemma "(!!x. P x == Q) ==> ALL x. P x == Q" by simp}, + @{lemma "(!!x. P x == Q) ==> EX x. P x == Q" by simp}], [ + @{lemma "(!!x. P x == Q x) ==> ALL x. P x == ALL x. Q x" by simp}, + @{lemma "(!!x. P x == Q x) ==> EX x. P x == EX x. Q x" by simp}]) + + val quant_rules2 = ([ + @{lemma "(!!x. ~P x == Q) ==> ~(ALL x. P x) == Q" by simp}, + @{lemma "(!!x. ~P x == Q) ==> ~(EX x. P x) == Q" by simp}], [ + @{lemma "(!!x. ~P x == Q x) ==> ~(ALL x. P x) == EX x. Q x" by simp}, + @{lemma "(!!x. ~P x == Q x) ==> ~(EX x. P x) == ALL x. Q x" by simp}]) + + fun nnf_quant_tac ctxt thm (qs as (qs1, qs2)) i st = ( + resolve_tac ctxt [thm] ORELSE' + (match_tac ctxt qs1 THEN' nnf_quant_tac ctxt thm qs) ORELSE' + (match_tac ctxt qs2 THEN' nnf_quant_tac ctxt thm qs)) i st + + fun nnf_quant_tac_varified ctxt vars eq = + nnf_quant_tac ctxt (Old_Z3_Proof_Tools.varify vars eq) + + fun nnf_quant ctxt vars qs p ct = + Old_Z3_Proof_Tools.as_meta_eq ct + |> Old_Z3_Proof_Tools.by_tac ctxt (nnf_quant_tac_varified ctxt vars (meta_eq_of p) qs) + + fun prove_nnf ctxt = try_apply ctxt [] [ + named ctxt "conj/disj" Old_Z3_Proof_Literals.prove_conj_disj_eq, + Old_Z3_Proof_Tools.by_abstraction 0 (true, false) ctxt [] (fn ctxt' => + named ctxt' "simp+fast" (Old_Z3_Proof_Tools.by_tac ctxt' (simp_fast_tac ctxt')))] +in +fun nnf ctxt vars ps ct = + (case Old_SMT_Utils.term_of ct of + _ $ (l as Const _ $ Abs _) $ (r as Const _ $ Abs _) => + if l aconv r + then MetaEq (Thm.reflexive (Thm.dest_arg (Thm.dest_arg ct))) + else MetaEq (nnf_quant ctxt vars quant_rules1 (hd ps) ct) + | _ $ (@{const Not} $ (Const _ $ Abs _)) $ (Const _ $ Abs _) => + MetaEq (nnf_quant ctxt vars quant_rules2 (hd ps) ct) + | _ => + let + val nnf_rewr_conv = Conv.arg_conv (Conv.arg_conv + (Old_Z3_Proof_Tools.unfold_eqs ctxt + (map (Thm.symmetric o meta_eq_of) ps))) + in Thm (Old_Z3_Proof_Tools.with_conv nnf_rewr_conv (prove_nnf ctxt) ct) end) +end + + + +(** equality proof rules **) + +(* |- t = t *) +fun refl ct = MetaEq (Thm.reflexive (Thm.dest_arg (Thm.dest_arg ct))) + + +(* s = t ==> t = s *) +local + val symm_rule = @{lemma "s = t ==> t == s" by simp} +in +fun symm (MetaEq thm) = MetaEq (Thm.symmetric thm) + | symm p = MetaEq (thm_of p COMP symm_rule) +end + + +(* s = t ==> t = u ==> s = u *) +local + val trans1 = @{lemma "s == t ==> t = u ==> s == u" by simp} + val trans2 = @{lemma "s = t ==> t == u ==> s == u" by simp} + val trans3 = @{lemma "s = t ==> t = u ==> s == u" by simp} +in +fun trans (MetaEq thm1) (MetaEq thm2) = MetaEq (Thm.transitive thm1 thm2) + | trans (MetaEq thm) q = MetaEq (thm_of q COMP (thm COMP trans1)) + | trans p (MetaEq thm) = MetaEq (thm COMP (thm_of p COMP trans2)) + | trans p q = MetaEq (thm_of q COMP (thm_of p COMP trans3)) +end + + +(* t1 = s1 ==> ... ==> tn = sn ==> f t1 ... tn = f s1 .. sn + (reflexive antecendents are droppped) *) +local + exception MONO + + fun prove_refl (ct, _) = Thm.reflexive ct + fun prove_comb f g cp = + let val ((ct1, ct2), (cu1, cu2)) = apply2 Thm.dest_comb cp + in Thm.combination (f (ct1, cu1)) (g (ct2, cu2)) end + fun prove_arg f = prove_comb prove_refl f + + fun prove f cp = prove_comb (prove f) f cp handle CTERM _ => prove_refl cp + + fun prove_nary is_comb f = + let + fun prove (cp as (ct, _)) = f cp handle MONO => + if is_comb (Thm.term_of ct) + then prove_comb (prove_arg prove) prove cp + else prove_refl cp + in prove end + + fun prove_list f n cp = + if n = 0 then prove_refl cp + else prove_comb (prove_arg f) (prove_list f (n-1)) cp + + fun with_length f (cp as (cl, _)) = + f (length (HOLogic.dest_list (Thm.term_of cl))) cp + + fun prove_distinct f = prove_arg (with_length (prove_list f)) + + fun prove_eq exn lookup cp = + (case lookup (Logic.mk_equals (apply2 Thm.term_of cp)) of + SOME eq => eq + | NONE => if exn then raise MONO else prove_refl cp) + + val prove_exn = prove_eq true + and prove_safe = prove_eq false + + fun mono f (cp as (cl, _)) = + (case Term.head_of (Thm.term_of cl) of + @{const HOL.conj} => prove_nary Old_Z3_Proof_Literals.is_conj (prove_exn f) + | @{const HOL.disj} => prove_nary Old_Z3_Proof_Literals.is_disj (prove_exn f) + | Const (@{const_name distinct}, _) => prove_distinct (prove_safe f) + | _ => prove (prove_safe f)) cp +in +fun monotonicity eqs ct = + let + fun and_symmetric (t, thm) = [(t, thm), (t, Thm.symmetric thm)] + val teqs = maps (and_symmetric o `Thm.prop_of o meta_eq_of) eqs + val lookup = AList.lookup (op aconv) teqs + val cp = Thm.dest_binop (Thm.dest_arg ct) + in MetaEq (prove_exn lookup cp handle MONO => mono lookup cp) end +end + + +(* |- f a b = f b a (where f is equality) *) +local + val rule = @{lemma "a = b == b = a" by (atomize(full)) (rule eq_commute)} +in +fun commutativity ct = + MetaEq (Old_Z3_Proof_Tools.match_instantiate I + (Old_Z3_Proof_Tools.as_meta_eq ct) rule) +end + + + +(** quantifier proof rules **) + +(* P ?x = Q ?x ==> (ALL x. P x) = (ALL x. Q x) + P ?x = Q ?x ==> (EX x. P x) = (EX x. Q x) *) +local + val rules = [ + @{lemma "(!!x. P x == Q x) ==> (ALL x. P x) == (ALL x. Q x)" by simp}, + @{lemma "(!!x. P x == Q x) ==> (EX x. P x) == (EX x. Q x)" by simp}] +in +fun quant_intro ctxt vars p ct = + let + val thm = meta_eq_of p + val rules' = Old_Z3_Proof_Tools.varify vars thm :: rules + val cu = Old_Z3_Proof_Tools.as_meta_eq ct + val tac = REPEAT_ALL_NEW (match_tac ctxt rules') + in MetaEq (Old_Z3_Proof_Tools.by_tac ctxt tac cu) end +end + + +(* |- ((ALL x. P x) | Q) = (ALL x. P x | Q) *) +fun pull_quant ctxt = Thm o try_apply ctxt [] [ + named ctxt "fast" (Old_Z3_Proof_Tools.by_tac ctxt (HOL_fast_tac ctxt))] + (* FIXME: not very well tested *) + + +(* |- (ALL x. P x & Q x) = ((ALL x. P x) & (ALL x. Q x)) *) +fun push_quant ctxt = Thm o try_apply ctxt [] [ + named ctxt "fast" (Old_Z3_Proof_Tools.by_tac ctxt (HOL_fast_tac ctxt))] + (* FIXME: not very well tested *) + + +(* |- (ALL x1 ... xn y1 ... yn. P x1 ... xn) = (ALL x1 ... xn. P x1 ... xn) *) +local + val elim_all = @{lemma "P = Q ==> (ALL x. P) = Q" by fast} + val elim_ex = @{lemma "P = Q ==> (EX x. P) = Q" by fast} + + fun elim_unused_tac ctxt i st = ( + match_tac ctxt [@{thm refl}] + ORELSE' (match_tac ctxt [elim_all, elim_ex] THEN' elim_unused_tac ctxt) + ORELSE' ( + match_tac ctxt [@{thm iff_allI}, @{thm iff_exI}] + THEN' elim_unused_tac ctxt)) i st +in + +fun elim_unused_vars ctxt = Thm o Old_Z3_Proof_Tools.by_tac ctxt (elim_unused_tac ctxt) + +end + + +(* |- (ALL x1 ... xn. ~(x1 = t1 & ... xn = tn) | P x1 ... xn) = P t1 ... tn *) +fun dest_eq_res ctxt = Thm o try_apply ctxt [] [ + named ctxt "fast" (Old_Z3_Proof_Tools.by_tac ctxt (HOL_fast_tac ctxt))] + (* FIXME: not very well tested *) + + +(* |- ~(ALL x1...xn. P x1...xn) | P a1...an *) +local + val rule = @{lemma "~ P x | Q ==> ~(ALL x. P x) | Q" by fast} +in +fun quant_inst ctxt = Thm o Old_Z3_Proof_Tools.by_tac ctxt ( + REPEAT_ALL_NEW (match_tac ctxt [rule]) + THEN' resolve_tac ctxt @{thms excluded_middle}) +end + + +(* |- (EX x. P x) = P c |- ~(ALL x. P x) = ~ P c *) +local + val forall = + Old_SMT_Utils.mk_const_pat @{theory} @{const_name Pure.all} + (Old_SMT_Utils.destT1 o Old_SMT_Utils.destT1) + fun mk_forall cv ct = + Thm.apply (Old_SMT_Utils.instT' cv forall) (Thm.lambda cv ct) + + fun get_vars f mk pred ctxt t = + Term.fold_aterms f t [] + |> map_filter (fn v => + if pred v then SOME (Thm.cterm_of ctxt (mk v)) else NONE) + + fun close vars f ct ctxt = + let + val frees_of = get_vars Term.add_frees Free (member (op =) vars o fst) + val vs = frees_of ctxt (Thm.term_of ct) + val (thm, ctxt') = f (fold_rev mk_forall vs ct) ctxt + val vars_of = get_vars Term.add_vars Var (K true) ctxt' + in + (Thm.instantiate ([], map (dest_Var o Thm.term_of) (vars_of (Thm.prop_of thm)) ~~ vs) thm, + ctxt') + end + + val sk_rules = @{lemma + "c = (SOME x. P x) ==> (EX x. P x) = P c" + "c = (SOME x. ~P x) ==> (~(ALL x. P x)) = (~P c)" + by (metis someI_ex)+} +in + +fun skolemize vars = + apfst Thm oo close vars (yield_singleton Assumption.add_assumes) + +fun discharge_sk_tac ctxt i st = ( + resolve_tac ctxt @{thms trans} i + THEN resolve_tac ctxt sk_rules i + THEN (resolve_tac ctxt @{thms refl} ORELSE' discharge_sk_tac ctxt) (i+1) + THEN resolve_tac ctxt @{thms refl} i) st + +end + + + +(** theory proof rules **) + +(* theory lemmas: linear arithmetic, arrays *) +fun th_lemma ctxt simpset thms = Thm o try_apply ctxt thms [ + Old_Z3_Proof_Tools.by_abstraction 0 (false, true) ctxt thms (fn ctxt' => + Old_Z3_Proof_Tools.by_tac ctxt' ( + NAMED ctxt' "arith" (Arith_Data.arith_tac ctxt') + ORELSE' NAMED ctxt' "simp+arith" ( + Simplifier.asm_full_simp_tac (put_simpset simpset ctxt') + THEN_ALL_NEW Arith_Data.arith_tac ctxt')))] + + +(* rewriting: prove equalities: + * ACI of conjunction/disjunction + * contradiction, excluded middle + * logical rewriting rules (for negation, implication, equivalence, + distinct) + * normal forms for polynoms (integer/real arithmetic) + * quantifier elimination over linear arithmetic + * ... ? **) +local + fun spec_meta_eq_of thm = + (case try (fn th => th RS @{thm spec}) thm of + SOME thm' => spec_meta_eq_of thm' + | NONE => mk_meta_eq thm) + + fun prep (Thm thm) = spec_meta_eq_of thm + | prep (MetaEq thm) = thm + | prep (Literals (thm, _)) = spec_meta_eq_of thm + + fun unfold_conv ctxt ths = + Conv.arg_conv (Conv.binop_conv (Old_Z3_Proof_Tools.unfold_eqs ctxt + (map prep ths))) + + fun with_conv _ [] prv = prv + | with_conv ctxt ths prv = + Old_Z3_Proof_Tools.with_conv (unfold_conv ctxt ths) prv + + val unfold_conv = + Conv.arg_conv (Conv.binop_conv + (Conv.try_conv Old_Z3_Proof_Tools.unfold_distinct_conv)) + val prove_conj_disj_eq = + Old_Z3_Proof_Tools.with_conv unfold_conv Old_Z3_Proof_Literals.prove_conj_disj_eq + + fun declare_hyps ctxt thm = + (thm, snd (Assumption.add_assumes (Thm.chyps_of thm) ctxt)) +in + +val abstraction_depth = 3 + (* + This value was chosen large enough to potentially catch exceptions, + yet small enough to not cause too much harm. The value might be + increased in the future, if reconstructing 'rewrite' fails on problems + that get too much abstracted to be reconstructable. + *) + +fun rewrite simpset ths ct ctxt = + apfst Thm (declare_hyps ctxt (with_conv ctxt ths (try_apply ctxt [] [ + named ctxt "conj/disj/distinct" prove_conj_disj_eq, + named ctxt "pull-ite" Old_Z3_Proof_Methods.prove_ite ctxt, + Old_Z3_Proof_Tools.by_abstraction 0 (true, false) ctxt [] (fn ctxt' => + Old_Z3_Proof_Tools.by_tac ctxt' ( + NAMED ctxt' "simp (logic)" (Simplifier.simp_tac (put_simpset simpset ctxt')) + THEN_ALL_NEW NAMED ctxt' "fast (logic)" (fast_tac ctxt'))), + Old_Z3_Proof_Tools.by_abstraction 0 (false, true) ctxt [] (fn ctxt' => + Old_Z3_Proof_Tools.by_tac ctxt' ( + (resolve_tac ctxt' @{thms iff_allI} ORELSE' K all_tac) + THEN' NAMED ctxt' "simp (theory)" (Simplifier.simp_tac (put_simpset simpset ctxt')) + THEN_ALL_NEW ( + NAMED ctxt' "fast (theory)" (HOL_fast_tac ctxt') + ORELSE' NAMED ctxt' "arith (theory)" (Arith_Data.arith_tac ctxt')))), + Old_Z3_Proof_Tools.by_abstraction 0 (true, true) ctxt [] (fn ctxt' => + Old_Z3_Proof_Tools.by_tac ctxt' ( + (resolve_tac ctxt' @{thms iff_allI} ORELSE' K all_tac) + THEN' NAMED ctxt' "simp (full)" (Simplifier.simp_tac (put_simpset simpset ctxt')) + THEN_ALL_NEW ( + NAMED ctxt' "fast (full)" (HOL_fast_tac ctxt') + ORELSE' NAMED ctxt' "arith (full)" (Arith_Data.arith_tac ctxt')))), + named ctxt "injectivity" (Old_Z3_Proof_Methods.prove_injectivity ctxt), + Old_Z3_Proof_Tools.by_abstraction abstraction_depth (true, true) ctxt [] + (fn ctxt' => + Old_Z3_Proof_Tools.by_tac ctxt' ( + (resolve_tac ctxt' @{thms iff_allI} ORELSE' K all_tac) + THEN' NAMED ctxt' "simp (deepen)" (Simplifier.simp_tac (put_simpset simpset ctxt')) + THEN_ALL_NEW ( + NAMED ctxt' "fast (deepen)" (HOL_fast_tac ctxt') + ORELSE' NAMED ctxt' "arith (deepen)" (Arith_Data.arith_tac + ctxt'))))]) ct)) + +end + + + +(* proof reconstruction *) + +(** tracing and checking **) + +fun trace_before ctxt idx = Old_SMT_Config.trace_msg ctxt (fn r => + "Z3: #" ^ string_of_int idx ^ ": " ^ Old_Z3_Proof_Parser.string_of_rule r) + +fun check_after idx r ps ct (p, (ctxt, _)) = + if not (Config.get ctxt Old_SMT_Config.trace) then () + else + let val thm = thm_of p |> tap (Thm.join_proofs o single) + in + if (Thm.cprop_of thm) aconvc ct then () + else + z3_exn (Pretty.string_of (Pretty.big_list + ("proof step failed: " ^ quote (Old_Z3_Proof_Parser.string_of_rule r) ^ + " (#" ^ string_of_int idx ^ ")") + (pretty_goal ctxt (map (thm_of o fst) ps) (Thm.prop_of thm) @ + [Pretty.block [Pretty.str "expected: ", + Syntax.pretty_term ctxt (Thm.term_of ct)]]))) + end + + +(** overall reconstruction procedure **) + +local + fun not_supported r = raise Fail ("Z3: proof rule not implemented: " ^ + quote (Old_Z3_Proof_Parser.string_of_rule r)) + + fun prove_step simpset vars r ps ct (cxp as (cx, ptab)) = + (case (r, ps) of + (* core rules *) + (Old_Z3_Proof_Parser.True_Axiom, _) => (Thm Old_Z3_Proof_Literals.true_thm, cxp) + | (Old_Z3_Proof_Parser.Asserted, _) => raise Fail "bad assertion" + | (Old_Z3_Proof_Parser.Goal, _) => raise Fail "bad assertion" + | (Old_Z3_Proof_Parser.Modus_Ponens, [(p, _), (q, _)]) => + (mp q (thm_of p), cxp) + | (Old_Z3_Proof_Parser.Modus_Ponens_Oeq, [(p, _), (q, _)]) => + (mp q (thm_of p), cxp) + | (Old_Z3_Proof_Parser.And_Elim, [(p, i)]) => + and_elim (p, i) ct ptab ||> pair cx + | (Old_Z3_Proof_Parser.Not_Or_Elim, [(p, i)]) => + not_or_elim (p, i) ct ptab ||> pair cx + | (Old_Z3_Proof_Parser.Hypothesis, _) => (Thm (Thm.assume ct), cxp) + | (Old_Z3_Proof_Parser.Lemma, [(p, _)]) => (lemma (thm_of p) ct, cxp) + | (Old_Z3_Proof_Parser.Unit_Resolution, (p, _) :: ps) => + (unit_resolution (thm_of p) (map (thm_of o fst) ps) ct, cxp) + | (Old_Z3_Proof_Parser.Iff_True, [(p, _)]) => (iff_true (thm_of p), cxp) + | (Old_Z3_Proof_Parser.Iff_False, [(p, _)]) => (iff_false (thm_of p), cxp) + | (Old_Z3_Proof_Parser.Distributivity, _) => (distributivity cx ct, cxp) + | (Old_Z3_Proof_Parser.Def_Axiom, _) => (def_axiom cx ct, cxp) + | (Old_Z3_Proof_Parser.Intro_Def, _) => intro_def ct cx ||> rpair ptab + | (Old_Z3_Proof_Parser.Apply_Def, [(p, _)]) => (apply_def (thm_of p), cxp) + | (Old_Z3_Proof_Parser.Iff_Oeq, [(p, _)]) => (p, cxp) + | (Old_Z3_Proof_Parser.Nnf_Pos, _) => (nnf cx vars (map fst ps) ct, cxp) + | (Old_Z3_Proof_Parser.Nnf_Neg, _) => (nnf cx vars (map fst ps) ct, cxp) + + (* equality rules *) + | (Old_Z3_Proof_Parser.Reflexivity, _) => (refl ct, cxp) + | (Old_Z3_Proof_Parser.Symmetry, [(p, _)]) => (symm p, cxp) + | (Old_Z3_Proof_Parser.Transitivity, [(p, _), (q, _)]) => (trans p q, cxp) + | (Old_Z3_Proof_Parser.Monotonicity, _) => (monotonicity (map fst ps) ct, cxp) + | (Old_Z3_Proof_Parser.Commutativity, _) => (commutativity ct, cxp) + + (* quantifier rules *) + | (Old_Z3_Proof_Parser.Quant_Intro, [(p, _)]) => (quant_intro cx vars p ct, cxp) + | (Old_Z3_Proof_Parser.Pull_Quant, _) => (pull_quant cx ct, cxp) + | (Old_Z3_Proof_Parser.Push_Quant, _) => (push_quant cx ct, cxp) + | (Old_Z3_Proof_Parser.Elim_Unused_Vars, _) => (elim_unused_vars cx ct, cxp) + | (Old_Z3_Proof_Parser.Dest_Eq_Res, _) => (dest_eq_res cx ct, cxp) + | (Old_Z3_Proof_Parser.Quant_Inst, _) => (quant_inst cx ct, cxp) + | (Old_Z3_Proof_Parser.Skolemize, _) => skolemize vars ct cx ||> rpair ptab + + (* theory rules *) + | (Old_Z3_Proof_Parser.Th_Lemma _, _) => (* FIXME: use arguments *) + (th_lemma cx simpset (map (thm_of o fst) ps) ct, cxp) + | (Old_Z3_Proof_Parser.Rewrite, _) => rewrite simpset [] ct cx ||> rpair ptab + | (Old_Z3_Proof_Parser.Rewrite_Star, ps) => + rewrite simpset (map fst ps) ct cx ||> rpair ptab + + | (Old_Z3_Proof_Parser.Nnf_Star, _) => not_supported r + | (Old_Z3_Proof_Parser.Cnf_Star, _) => not_supported r + | (Old_Z3_Proof_Parser.Transitivity_Star, _) => not_supported r + | (Old_Z3_Proof_Parser.Pull_Quant_Star, _) => not_supported r + + | _ => raise Fail ("Z3: proof rule " ^ + quote (Old_Z3_Proof_Parser.string_of_rule r) ^ + " has an unexpected number of arguments.")) + + fun lookup_proof ptab idx = + (case Inttab.lookup ptab idx of + SOME p => (p, idx) + | NONE => z3_exn ("unknown proof id: " ^ quote (string_of_int idx))) + + fun prove simpset vars (idx, step) (_, cxp as (ctxt, ptab)) = + let + val Old_Z3_Proof_Parser.Proof_Step {rule=r, prems, prop, ...} = step + val ps = map (lookup_proof ptab) prems + val _ = trace_before ctxt idx r + val (thm, (ctxt', ptab')) = + cxp + |> prove_step simpset vars r ps prop + |> tap (check_after idx r ps prop) + in (thm, (ctxt', Inttab.update (idx, thm) ptab')) end + + fun make_discharge_rules rules = rules @ [@{thm allI}, @{thm refl}, + @{thm reflexive}, Old_Z3_Proof_Literals.true_thm] + + fun discharge_assms_tac ctxt rules = + REPEAT (HEADGOAL (resolve_tac ctxt rules ORELSE' SOLVED' (discharge_sk_tac ctxt))) + + fun discharge_assms ctxt rules thm = + if Thm.nprems_of thm = 0 then Goal.norm_result ctxt thm + else + (case Seq.pull (discharge_assms_tac ctxt rules thm) of + SOME (thm', _) => Goal.norm_result ctxt thm' + | NONE => raise THM ("failed to discharge premise", 1, [thm])) + + fun discharge rules outer_ctxt (p, (inner_ctxt, _)) = + thm_of p + |> singleton (Proof_Context.export inner_ctxt outer_ctxt) + |> discharge_assms outer_ctxt (make_discharge_rules rules) +in + +fun reconstruct outer_ctxt recon output = + let + val {context=ctxt, typs, terms, rewrite_rules, assms} = recon + val (asserted, steps, vars, ctxt1) = + Old_Z3_Proof_Parser.parse ctxt typs terms output + + val simpset = + Old_Z3_Proof_Tools.make_simpset ctxt1 (Named_Theorems.get ctxt1 @{named_theorems old_z3_simp}) + + val ((is, rules), cxp as (ctxt2, _)) = + add_asserted outer_ctxt rewrite_rules assms asserted ctxt1 + in + if Config.get ctxt2 Old_SMT_Config.filter_only_facts then (is, @{thm TrueI}) + else + (Thm @{thm TrueI}, cxp) + |> fold (prove simpset vars) steps + |> discharge rules outer_ctxt + |> pair [] + end + +end + +end diff --git a/src/test-gen/Old_SMT/old_z3_proof_tools.ML b/src/test-gen/Old_SMT/old_z3_proof_tools.ML new file mode 100644 index 0000000..8fc65ba --- /dev/null +++ b/src/test-gen/Old_SMT/old_z3_proof_tools.ML @@ -0,0 +1,374 @@ +(* Title: HOL/Library/Old_SMT/old_z3_proof_tools.ML + Author: Sascha Boehme, TU Muenchen + +Helper functions required for Z3 proof reconstruction. +*) + +signature OLD_Z3_PROOF_TOOLS = +sig + (*modifying terms*) + val as_meta_eq: cterm -> cterm + + (*theorem nets*) + val thm_net_of: ('a -> thm) -> 'a list -> 'a Net.net + val net_instances: (int * thm) Net.net -> cterm -> (int * thm) list + val net_instance: thm Net.net -> cterm -> thm option + + (*proof combinators*) + val under_assumption: (thm -> thm) -> cterm -> thm + val with_conv: conv -> (cterm -> thm) -> cterm -> thm + val discharge: thm -> thm -> thm + val varify: string list -> thm -> thm + val unfold_eqs: Proof.context -> thm list -> conv + val match_instantiate: (cterm -> cterm) -> cterm -> thm -> thm + val by_tac: Proof.context -> (int -> tactic) -> cterm -> thm + val make_hyp_def: thm -> Proof.context -> thm * Proof.context + val by_abstraction: int -> bool * bool -> Proof.context -> thm list -> + (Proof.context -> cterm -> thm) -> cterm -> thm + + (*a faster COMP*) + type compose_data = cterm list * (cterm -> cterm list) * thm + val precompose: (cterm -> cterm list) -> thm -> compose_data + val precompose2: (cterm -> cterm * cterm) -> thm -> compose_data + val compose: compose_data -> thm -> thm + + (*unfolding of 'distinct'*) + val unfold_distinct_conv: conv + + (*simpset*) + val add_simproc: Simplifier.simproc -> Context.generic -> Context.generic + val make_simpset: Proof.context -> thm list -> simpset +end + +structure Old_Z3_Proof_Tools: OLD_Z3_PROOF_TOOLS = +struct + + + +(* modifying terms *) + +fun as_meta_eq ct = + uncurry Old_SMT_Utils.mk_cequals (Thm.dest_binop (Old_SMT_Utils.dest_cprop ct)) + + + +(* theorem nets *) + +fun thm_net_of f xthms = + let fun insert xthm = Net.insert_term (K false) (Thm.prop_of (f xthm), xthm) + in fold insert xthms Net.empty end + +fun maybe_instantiate ct thm = + try Thm.first_order_match (Thm.cprop_of thm, ct) + |> Option.map (fn inst => Thm.instantiate inst thm) + +local + fun instances_from_net match f net ct = + let + val lookup = if match then Net.match_term else Net.unify_term + val xthms = lookup net (Thm.term_of ct) + fun select ct = map_filter (f (maybe_instantiate ct)) xthms + fun select' ct = + let val thm = Thm.trivial ct + in map_filter (f (try (fn rule => rule COMP thm))) xthms end + in (case select ct of [] => select' ct | xthms' => xthms') end +in + +fun net_instances net = + instances_from_net false (fn f => fn (i, thm) => Option.map (pair i) (f thm)) + net + +fun net_instance net = try hd o instances_from_net true I net + +end + + + +(* proof combinators *) + +fun under_assumption f ct = + let val ct' = Old_SMT_Utils.mk_cprop ct + in Thm.implies_intr ct' (f (Thm.assume ct')) end + +fun with_conv conv prove ct = + let val eq = Thm.symmetric (conv ct) + in Thm.equal_elim eq (prove (Thm.lhs_of eq)) end + +fun discharge p pq = Thm.implies_elim pq p + +fun varify vars = Drule.generalize ([], vars) + +fun unfold_eqs _ [] = Conv.all_conv + | unfold_eqs ctxt eqs = + Conv.top_sweep_conv (K (Conv.rewrs_conv eqs)) ctxt + +fun match_instantiate f ct thm = + Thm.instantiate (Thm.match (f (Thm.cprop_of thm), ct)) thm + +fun by_tac ctxt tac ct = Goal.norm_result ctxt (Goal.prove_internal ctxt [] ct (K (tac 1))) + +(* + |- c x == t x ==> P (c x) + --------------------------- + c == t |- P (c x) +*) +fun make_hyp_def thm ctxt = + let + val (lhs, rhs) = Thm.dest_binop (Thm.cprem_of thm 1) + val (cf, cvs) = Drule.strip_comb lhs + val eq = Old_SMT_Utils.mk_cequals cf (fold_rev Thm.lambda cvs rhs) + fun apply cv th = + Thm.combination th (Thm.reflexive cv) + |> Conv.fconv_rule (Conv.arg_conv (Thm.beta_conversion false)) + in + yield_singleton Assumption.add_assumes eq ctxt + |>> Thm.implies_elim thm o fold apply cvs + end + + + +(* abstraction *) + +local + +fun abs_context ctxt = (ctxt, Termtab.empty, 1, false) + +fun context_of (ctxt, _, _, _) = ctxt + +fun replace (_, (cv, ct)) = Thm.forall_elim ct o Thm.forall_intr cv + +fun abs_instantiate (_, tab, _, beta_norm) = + fold replace (Termtab.dest tab) #> + beta_norm ? Conv.fconv_rule (Thm.beta_conversion true) + +fun lambda_abstract cvs t = + let + val frees = map Free (Term.add_frees t []) + val cvs' = filter (fn cv => member (op aconv) frees (Thm.term_of cv)) cvs + val vs = map (Term.dest_Free o Thm.term_of) cvs' + in (fold_rev absfree vs t, cvs') end + +fun fresh_abstraction (_, cvs) ct (cx as (ctxt, tab, idx, beta_norm)) = + let val (t, cvs') = lambda_abstract cvs (Thm.term_of ct) + in + (case Termtab.lookup tab t of + SOME (cv, _) => (Drule.list_comb (cv, cvs'), cx) + | NONE => + let + val (n, ctxt') = yield_singleton Variable.variant_fixes "x" ctxt + val cv = Thm.cterm_of ctxt' + (Free (n, map Thm.typ_of_cterm cvs' ---> Thm.typ_of_cterm ct)) + val cu = Drule.list_comb (cv, cvs') + val e = (t, (cv, fold_rev Thm.lambda cvs' ct)) + val beta_norm' = beta_norm orelse not (null cvs') + in (cu, (ctxt', Termtab.update e tab, idx + 1, beta_norm')) end) + end + +fun abs_comb f g dcvs ct = + let val (cf, cu) = Thm.dest_comb ct + in f dcvs cf ##>> g dcvs cu #>> uncurry Thm.apply end + +fun abs_arg f = abs_comb (K pair) f + +fun abs_args f dcvs ct = + (case Thm.term_of ct of + _ $ _ => abs_comb (abs_args f) f dcvs ct + | _ => pair ct) + +fun abs_list f g dcvs ct = + (case Thm.term_of ct of + Const (@{const_name Nil}, _) => pair ct + | Const (@{const_name Cons}, _) $ _ $ _ => + abs_comb (abs_arg f) (abs_list f g) dcvs ct + | _ => g dcvs ct) + +fun abs_abs f (depth, cvs) ct = + let val (cv, cu) = Thm.dest_abs NONE ct + in f (depth, cv :: cvs) cu #>> Thm.lambda cv end + +val is_atomic = + (fn Free _ => true | Var _ => true | Bound _ => true | _ => false) + +fun abstract depth (ext_logic, with_theories) = + let + fun abstr1 cvs ct = abs_arg abstr cvs ct + and abstr2 cvs ct = abs_comb abstr1 abstr cvs ct + and abstr3 cvs ct = abs_comb abstr2 abstr cvs ct + and abstr_abs cvs ct = abs_arg (abs_abs abstr) cvs ct + + and abstr (dcvs as (d, cvs)) ct = + (case Thm.term_of ct of + @{const Trueprop} $ _ => abstr1 dcvs ct + | @{const Pure.imp} $ _ $ _ => abstr2 dcvs ct + | @{const True} => pair ct + | @{const False} => pair ct + | @{const Not} $ _ => abstr1 dcvs ct + | @{const HOL.conj} $ _ $ _ => abstr2 dcvs ct + | @{const HOL.disj} $ _ $ _ => abstr2 dcvs ct + | @{const HOL.implies} $ _ $ _ => abstr2 dcvs ct + | Const (@{const_name HOL.eq}, _) $ _ $ _ => abstr2 dcvs ct + | Const (@{const_name distinct}, _) $ _ => + if ext_logic then abs_arg (abs_list abstr fresh_abstraction) dcvs ct + else fresh_abstraction dcvs ct + | Const (@{const_name If}, _) $ _ $ _ $ _ => + if ext_logic then abstr3 dcvs ct else fresh_abstraction dcvs ct + | Const (@{const_name All}, _) $ _ => + if ext_logic then abstr_abs dcvs ct else fresh_abstraction dcvs ct + | Const (@{const_name Ex}, _) $ _ => + if ext_logic then abstr_abs dcvs ct else fresh_abstraction dcvs ct + | t => (fn cx => + if is_atomic t orelse can HOLogic.dest_number t then (ct, cx) + else if with_theories andalso + Old_Z3_Interface.is_builtin_theory_term (context_of cx) t + then abs_args abstr dcvs ct cx + else if d = 0 then fresh_abstraction dcvs ct cx + else + (case Term.strip_comb t of + (Const _, _) => abs_args abstr (d-1, cvs) ct cx + | (Free _, _) => abs_args abstr (d-1, cvs) ct cx + | _ => fresh_abstraction dcvs ct cx))) + in abstr (depth, []) end + +val cimp = Thm.cterm_of @{context} @{const Pure.imp} + +fun deepen depth f x = + if depth = 0 then f depth x + else (case try (f depth) x of SOME y => y | NONE => deepen (depth - 1) f x) + +fun with_prems depth thms f ct = + fold_rev (Thm.mk_binop cimp o Thm.cprop_of) thms ct + |> deepen depth f + |> fold (fn prem => fn th => Thm.implies_elim th prem) thms + +in + +fun by_abstraction depth mode ctxt thms prove = + with_prems depth thms (fn d => fn ct => + let val (cu, cx) = abstract d mode ct (abs_context ctxt) + in abs_instantiate cx (prove (context_of cx) cu) end) + +end + + + +(* a faster COMP *) + +type compose_data = cterm list * (cterm -> cterm list) * thm + +fun list2 (x, y) = [x, y] + +fun precompose f rule : compose_data = (f (Thm.cprem_of rule 1), f, rule) +fun precompose2 f rule : compose_data = precompose (list2 o f) rule + +fun compose (cvs, f, rule) thm = + discharge thm (Thm.instantiate ([], map (dest_Var o Thm.term_of) cvs ~~ f (Thm.cprop_of thm)) rule) + + + +(* unfolding of 'distinct' *) + +local + val set1 = @{lemma "x ~: set [] == ~False" by simp} + val set2 = @{lemma "x ~: set [x] == False" by simp} + val set3 = @{lemma "x ~: set [y] == x ~= y" by simp} + val set4 = @{lemma "x ~: set (x # ys) == False" by simp} + val set5 = @{lemma "x ~: set (y # ys) == x ~= y & x ~: set ys" by simp} + + fun set_conv ct = + (Conv.rewrs_conv [set1, set2, set3, set4] else_conv + (Conv.rewr_conv set5 then_conv Conv.arg_conv set_conv)) ct + + val dist1 = @{lemma "distinct [] == ~False" by (simp add: distinct_def)} + val dist2 = @{lemma "distinct [x] == ~False" by (simp add: distinct_def)} + val dist3 = @{lemma "distinct (x # xs) == x ~: set xs & distinct xs" + by (simp add: distinct_def)} + + fun binop_conv cv1 cv2 = Conv.combination_conv (Conv.arg_conv cv1) cv2 +in +fun unfold_distinct_conv ct = + (Conv.rewrs_conv [dist1, dist2] else_conv + (Conv.rewr_conv dist3 then_conv binop_conv set_conv unfold_distinct_conv)) ct +end + + + +(* simpset *) + +local + val antisym_le1 = mk_meta_eq @{thm order_class.antisym_conv} + val antisym_le2 = mk_meta_eq @{thm linorder_class.antisym_conv2} + val antisym_less1 = mk_meta_eq @{thm linorder_class.antisym_conv1} + val antisym_less2 = mk_meta_eq @{thm linorder_class.antisym_conv3} + + fun eq_prop t thm = HOLogic.mk_Trueprop t aconv Thm.prop_of thm + fun dest_binop ((c as Const _) $ t $ u) = (c, t, u) + | dest_binop t = raise TERM ("dest_binop", [t]) + + fun prove_antisym_le ctxt ct = + let + val (le, r, s) = dest_binop (Thm.term_of ct) + val less = Const (@{const_name less}, Term.fastype_of le) + val prems = Simplifier.prems_of ctxt + in + (case find_first (eq_prop (le $ s $ r)) prems of + NONE => + find_first (eq_prop (HOLogic.mk_not (less $ r $ s))) prems + |> Option.map (fn thm => thm RS antisym_less1) + | SOME thm => SOME (thm RS antisym_le1)) + end + handle THM _ => NONE + + fun prove_antisym_less ctxt ct = + let + val (less, r, s) = dest_binop (HOLogic.dest_not (Thm.term_of ct)) + val le = Const (@{const_name less_eq}, Term.fastype_of less) + val prems = Simplifier.prems_of ctxt + in + (case find_first (eq_prop (le $ r $ s)) prems of + NONE => + find_first (eq_prop (HOLogic.mk_not (less $ s $ r))) prems + |> Option.map (fn thm => thm RS antisym_less2) + | SOME thm => SOME (thm RS antisym_le2)) + end + handle THM _ => NONE + + val basic_simpset = + simpset_of (put_simpset HOL_ss @{context} + addsimps @{thms field_simps} + addsimps [@{thm times_divide_eq_right}, @{thm times_divide_eq_left}] + addsimps @{thms arith_special} addsimps @{thms arith_simps} + addsimps @{thms rel_simps} + addsimps @{thms array_rules} + addsimps @{thms term_true_def} addsimps @{thms term_false_def} + addsimps @{thms z3div_def} addsimps @{thms z3mod_def} + addsimprocs [@{simproc numeral_divmod}] + addsimprocs [ + Simplifier.make_simproc @{context} "fast_int_arith" + {lhss = [@{term "(m::int) < n"}, @{term "(m::int) \ n"}, @{term "(m::int) = n"}], + proc = K Lin_Arith.simproc}, + Simplifier.make_simproc @{context} "antisym_le" + {lhss = [@{term "(x::'a::order) \ y"}], + proc = K prove_antisym_le}, + Simplifier.make_simproc @{context} "antisym_less" + {lhss = [@{term "\ (x::'a::linorder) < y"}], + proc = K prove_antisym_less}]) + + structure Simpset = Generic_Data + ( + type T = simpset + val empty = basic_simpset + val extend = I + val merge = Simplifier.merge_ss + ) +in + +fun add_simproc simproc context = + Simpset.map (simpset_map (Context.proof_of context) + (fn ctxt => ctxt addsimprocs [simproc])) context + +fun make_simpset ctxt rules = + simpset_of (put_simpset (Simpset.get (Context.Proof ctxt)) ctxt addsimps rules) + +end + +end From fa790a8ebb91b0a2033f37d2e016dec9b2bfb260 Mon Sep 17 00:00:00 2001 From: Peyman Derafshkavian Date: Sat, 5 Feb 2022 15:39:29 -0500 Subject: [PATCH 3/8] SMT added from 2016 (cherry picked from commit 85ca6078c2e8a5d09285888781abdca5a4075d29) --- src/test-gen/SMT/conj_disj_perm.ML | 127 +++++ src/test-gen/SMT/cvc4_interface.ML | 31 ++ src/test-gen/SMT/cvc4_proof_parse.ML | 46 ++ src/test-gen/SMT/smt_builtin.ML | 222 +++++++++ src/test-gen/SMT/smt_config.ML | 265 ++++++++++ src/test-gen/SMT/smt_datatypes.ML | 152 ++++++ src/test-gen/SMT/smt_failure.ML | 40 ++ src/test-gen/SMT/smt_normalize.ML | 444 +++++++++++++++++ src/test-gen/SMT/smt_real.ML | 115 +++++ src/test-gen/SMT/smt_solver.ML | 307 ++++++++++++ src/test-gen/SMT/smt_systems.ML | 154 ++++++ src/test-gen/SMT/smt_translate.ML | 527 ++++++++++++++++++++ src/test-gen/SMT/smt_util.ML | 240 +++++++++ src/test-gen/SMT/smtlib.ML | 191 +++++++ src/test-gen/SMT/smtlib_interface.ML | 171 +++++++ src/test-gen/SMT/smtlib_isar.ML | 75 +++ src/test-gen/SMT/smtlib_proof.ML | 298 +++++++++++ src/test-gen/SMT/verit_isar.ML | 60 +++ src/test-gen/SMT/verit_proof.ML | 324 ++++++++++++ src/test-gen/SMT/verit_proof_parse.ML | 78 +++ src/test-gen/SMT/z3_interface.ML | 192 ++++++++ src/test-gen/SMT/z3_isar.ML | 120 +++++ src/test-gen/SMT/z3_proof.ML | 303 ++++++++++++ src/test-gen/SMT/z3_real.ML | 32 ++ src/test-gen/SMT/z3_replay.ML | 262 ++++++++++ src/test-gen/SMT/z3_replay_methods.ML | 685 ++++++++++++++++++++++++++ src/test-gen/SMT/z3_replay_rules.ML | 54 ++ src/test-gen/SMT/z3_replay_util.ML | 155 ++++++ 28 files changed, 5670 insertions(+) create mode 100644 src/test-gen/SMT/conj_disj_perm.ML create mode 100644 src/test-gen/SMT/cvc4_interface.ML create mode 100644 src/test-gen/SMT/cvc4_proof_parse.ML create mode 100644 src/test-gen/SMT/smt_builtin.ML create mode 100644 src/test-gen/SMT/smt_config.ML create mode 100644 src/test-gen/SMT/smt_datatypes.ML create mode 100644 src/test-gen/SMT/smt_failure.ML create mode 100644 src/test-gen/SMT/smt_normalize.ML create mode 100644 src/test-gen/SMT/smt_real.ML create mode 100644 src/test-gen/SMT/smt_solver.ML create mode 100644 src/test-gen/SMT/smt_systems.ML create mode 100644 src/test-gen/SMT/smt_translate.ML create mode 100644 src/test-gen/SMT/smt_util.ML create mode 100644 src/test-gen/SMT/smtlib.ML create mode 100644 src/test-gen/SMT/smtlib_interface.ML create mode 100644 src/test-gen/SMT/smtlib_isar.ML create mode 100644 src/test-gen/SMT/smtlib_proof.ML create mode 100644 src/test-gen/SMT/verit_isar.ML create mode 100644 src/test-gen/SMT/verit_proof.ML create mode 100644 src/test-gen/SMT/verit_proof_parse.ML create mode 100644 src/test-gen/SMT/z3_interface.ML create mode 100644 src/test-gen/SMT/z3_isar.ML create mode 100644 src/test-gen/SMT/z3_proof.ML create mode 100644 src/test-gen/SMT/z3_real.ML create mode 100644 src/test-gen/SMT/z3_replay.ML create mode 100644 src/test-gen/SMT/z3_replay_methods.ML create mode 100644 src/test-gen/SMT/z3_replay_rules.ML create mode 100644 src/test-gen/SMT/z3_replay_util.ML diff --git a/src/test-gen/SMT/conj_disj_perm.ML b/src/test-gen/SMT/conj_disj_perm.ML new file mode 100644 index 0000000..30b85d6 --- /dev/null +++ b/src/test-gen/SMT/conj_disj_perm.ML @@ -0,0 +1,127 @@ +(* Title: HOL/Tools/SMT/conj_disj_perm.ML + Author: Sascha Boehme, TU Muenchen + +Tactic to prove equivalence of permutations of conjunctions and disjunctions. +*) + +signature CONJ_DISJ_PERM = +sig + val conj_disj_perm_tac: Proof.context -> int -> tactic +end + +structure Conj_Disj_Perm: CONJ_DISJ_PERM = +struct + +fun with_assumption ct f = + let val ct' = Thm.apply @{cterm HOL.Trueprop} ct + in Thm.implies_intr ct' (f (Thm.assume ct')) end + +fun eq_from_impls thm1 thm2 = thm2 INCR_COMP (thm1 INCR_COMP @{thm iffI}) + +fun add_lit thm = Termtab.update (HOLogic.dest_Trueprop (Thm.prop_of thm), thm) + +val ndisj1_rule = @{lemma "~(P | Q) ==> ~P" by auto} +val ndisj2_rule = @{lemma "~(P | Q) ==> ~Q" by auto} + +fun explode_thm thm = + (case HOLogic.dest_Trueprop (Thm.prop_of thm) of + @{const HOL.conj} $ _ $ _ => explode_conj_thm @{thm conjunct1} @{thm conjunct2} thm + | @{const HOL.Not} $ (@{const HOL.disj} $ _ $ _) => explode_conj_thm ndisj1_rule ndisj2_rule thm + | @{const HOL.Not} $ (@{const HOL.Not} $ _) => explode_thm (thm RS @{thm notnotD}) + | _ => add_lit thm) + +and explode_conj_thm rule1 rule2 thm lits = + explode_thm (thm RS rule1) (explode_thm (thm RS rule2) (add_lit thm lits)) + +val not_false_rule = @{lemma "~False" by auto} +fun explode thm = explode_thm thm (add_lit not_false_rule (add_lit @{thm TrueI} Termtab.empty)) + +fun find_dual_lit lits (@{const HOL.Not} $ t, thm) = Termtab.lookup lits t |> Option.map (pair thm) + | find_dual_lit _ _ = NONE + +fun find_dual_lits lits = Termtab.get_first (find_dual_lit lits) lits + +val not_not_rule = @{lemma "P ==> ~~P" by auto} +val ndisj_rule = @{lemma "~P ==> ~Q ==> ~(P | Q)" by auto} + +fun join lits t = + (case Termtab.lookup lits t of + SOME thm => thm + | NONE => join_term lits t) + +and join_term lits (@{const HOL.conj} $ t $ u) = @{thm conjI} OF (map (join lits) [t, u]) + | join_term lits (@{const HOL.Not} $ (@{const HOL.disj} $ t $ u)) = + ndisj_rule OF (map (join lits o HOLogic.mk_not) [t, u]) + | join_term lits (@{const HOL.Not} $ (@{const HOL.Not} $ t)) = join lits t RS not_not_rule + | join_term _ t = raise TERM ("join_term", [t]) + +fun prove_conj_disj_imp ct cu = with_assumption ct (fn thm => join (explode thm) (Thm.term_of cu)) + +fun prove_conj_disj_eq (clhs, crhs) = + let + val thm1 = prove_conj_disj_imp clhs crhs + val thm2 = prove_conj_disj_imp crhs clhs + in eq_from_impls thm1 thm2 end + +val not_not_false_rule = @{lemma "~~False ==> P" by auto} +val not_true_rule = @{lemma "~True ==> P" by auto} + +fun prove_any_imp ct = + (case Thm.term_of ct of + @{const HOL.False} => @{thm FalseE} + | @{const HOL.Not} $ (@{const HOL.Not} $ @{const HOL.False}) => not_not_false_rule + | @{const HOL.Not} $ @{const HOL.True} => not_true_rule + | _ => raise CTERM ("prove_any_imp", [ct])) + +fun prove_contradiction_imp ct = + with_assumption ct (fn thm => + let val lits = explode thm + in + (case Termtab.lookup lits @{const HOL.False} of + SOME thm' => thm' RS @{thm FalseE} + | NONE => + (case Termtab.lookup lits (@{const HOL.Not} $ @{const HOL.True}) of + SOME thm' => thm' RS not_true_rule + | NONE => + (case find_dual_lits lits of + SOME (not_lit_thm, lit_thm) => @{thm notE} OF [not_lit_thm, lit_thm] + | NONE => raise CTERM ("prove_contradiction", [ct])))) + end) + +fun prove_contradiction_eq to_right (clhs, crhs) = + let + val thm1 = if to_right then prove_contradiction_imp clhs else prove_any_imp clhs + val thm2 = if to_right then prove_any_imp crhs else prove_contradiction_imp crhs + in eq_from_impls thm1 thm2 end + +val contrapos_rule = @{lemma "(~P) = (~Q) ==> P = Q" by auto} +fun contrapos prove cp = contrapos_rule OF [prove (apply2 (Thm.apply @{cterm HOL.Not}) cp)] + +datatype kind = True | False | Conj | Disj | Other + +fun choose t _ _ _ @{const HOL.True} = t + | choose _ f _ _ @{const HOL.False} = f + | choose _ _ c _ (@{const HOL.conj} $ _ $ _) = c + | choose _ _ _ d (@{const HOL.disj} $ _ $ _) = d + | choose _ _ _ _ _ = Other + +fun kind_of (@{const HOL.Not} $ t) = choose False True Disj Conj t + | kind_of t = choose True False Conj Disj t + +fun prove_conj_disj_perm ct cp = + (case apply2 (kind_of o Thm.term_of) cp of + (Conj, Conj) => prove_conj_disj_eq cp + | (Disj, Disj) => contrapos prove_conj_disj_eq cp + | (Conj, False) => prove_contradiction_eq true cp + | (False, Conj) => prove_contradiction_eq false cp + | (Disj, True) => contrapos (prove_contradiction_eq true) cp + | (True, Disj) => contrapos (prove_contradiction_eq false) cp + | _ => raise CTERM ("prove_conj_disj_perm", [ct])) + +fun conj_disj_perm_tac ctxt = CSUBGOAL (fn (ct, i) => + (case Thm.term_of ct of + @{const HOL.Trueprop} $ (@{const HOL.eq(bool)} $ _ $ _) => + resolve_tac ctxt [prove_conj_disj_perm ct (Thm.dest_binop (Thm.dest_arg ct))] i + | _ => no_tac)) + +end diff --git a/src/test-gen/SMT/cvc4_interface.ML b/src/test-gen/SMT/cvc4_interface.ML new file mode 100644 index 0000000..68cad31 --- /dev/null +++ b/src/test-gen/SMT/cvc4_interface.ML @@ -0,0 +1,31 @@ +(* Title: HOL/Tools/SMT/cvc4_interface.ML + Author: Jasmin Blanchette, TU Muenchen + +Interface to CVC4 based on an extended version of SMT-LIB. +*) + +signature CVC4_INTERFACE = +sig + val smtlib_cvc4C: SMT_Util.class +end; + +structure CVC4_Interface: CVC4_INTERFACE = +struct + +val smtlib_cvc4C = SMTLIB_Interface.smtlibC @ ["cvc4"] + + +(* interface *) + +local + fun translate_config ctxt = + {logic = K "(set-logic ALL_SUPPORTED)\n", fp_kinds = [BNF_Util.Least_FP, BNF_Util.Greatest_FP], + serialize = #serialize (SMTLIB_Interface.translate_config ctxt)} +in + +val _ = + Theory.setup (Context.theory_map (SMT_Translate.add_config (smtlib_cvc4C, translate_config))) + +end + +end; diff --git a/src/test-gen/SMT/cvc4_proof_parse.ML b/src/test-gen/SMT/cvc4_proof_parse.ML new file mode 100644 index 0000000..2807164 --- /dev/null +++ b/src/test-gen/SMT/cvc4_proof_parse.ML @@ -0,0 +1,46 @@ +(* Title: HOL/Tools/SMT/cvc4_proof_parse.ML + Author: Jasmin Blanchette, TU Muenchen + +CVC4 proof (actually, unsat core) parsing. +*) + +signature CVC4_PROOF_PARSE = +sig + val parse_proof: SMT_Translate.replay_data -> + ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> + SMT_Solver.parsed_proof +end; + +structure CVC4_Proof_Parse: CVC4_PROOF_PARSE = +struct + +fun parse_proof ({ll_defs, assms, ...} : SMT_Translate.replay_data) xfacts prems _ output = + if exists (String.isPrefix "(error \"This build of CVC4 doesn't have proof support") output then + {outcome = NONE, fact_ids = NONE, atp_proof = K []} + else + let + val num_ll_defs = length ll_defs + + val id_of_index = Integer.add num_ll_defs + val index_of_id = Integer.add (~ num_ll_defs) + + val used_assert_ids = map_filter (try SMTLIB_Interface.assert_index_of_name) output + val used_assm_js = + map_filter (fn id => let val i = index_of_id id in if i >= 0 then SOME i else NONE end) + used_assert_ids + + val conjecture_i = 0 + val prems_i = conjecture_i + 1 + val num_prems = length prems + val facts_i = prems_i + num_prems + + val fact_ids' = + map_filter (fn j => + let val (i, _) = nth assms j in + try (apsnd (nth xfacts)) (id_of_index j, i - facts_i) + end) used_assm_js + in + {outcome = NONE, fact_ids = SOME fact_ids', atp_proof = K []} + end + +end; diff --git a/src/test-gen/SMT/smt_builtin.ML b/src/test-gen/SMT/smt_builtin.ML new file mode 100644 index 0000000..a5955c7 --- /dev/null +++ b/src/test-gen/SMT/smt_builtin.ML @@ -0,0 +1,222 @@ +(* Title: HOL/Tools/SMT/smt_builtin.ML + Author: Sascha Boehme, TU Muenchen + +Tables of types and terms directly supported by SMT solvers. +*) + +signature SMT_BUILTIN = +sig + (*for experiments*) + val filter_builtins: (typ -> bool) -> Proof.context -> Proof.context + + (*built-in types*) + val add_builtin_typ: SMT_Util.class -> + typ * (typ -> string option) * (typ -> int -> string option) -> Context.generic -> + Context.generic + val add_builtin_typ_ext: typ * (typ -> bool) -> Context.generic -> + Context.generic + val dest_builtin_typ: Proof.context -> typ -> string option + val is_builtin_typ_ext: Proof.context -> typ -> bool + + (*built-in numbers*) + val dest_builtin_num: Proof.context -> term -> (string * typ) option + val is_builtin_num: Proof.context -> term -> bool + val is_builtin_num_ext: Proof.context -> term -> bool + + (*built-in functions*) + type 'a bfun = Proof.context -> typ -> term list -> 'a + type bfunr = string * int * term list * (term list -> term) + val add_builtin_fun: SMT_Util.class -> (string * typ) * bfunr option bfun -> Context.generic -> + Context.generic + val add_builtin_fun': SMT_Util.class -> term * string -> Context.generic -> Context.generic + val add_builtin_fun_ext: (string * typ) * term list bfun -> Context.generic -> Context.generic + val add_builtin_fun_ext': string * typ -> Context.generic -> Context.generic + val add_builtin_fun_ext'': string -> Context.generic -> Context.generic + val dest_builtin_fun: Proof.context -> string * typ -> term list -> bfunr option + val dest_builtin_eq: Proof.context -> term -> term -> bfunr option + val dest_builtin_pred: Proof.context -> string * typ -> term list -> bfunr option + val dest_builtin_conn: Proof.context -> string * typ -> term list -> bfunr option + val dest_builtin: Proof.context -> string * typ -> term list -> bfunr option + val dest_builtin_ext: Proof.context -> string * typ -> term list -> term list option + val is_builtin_fun: Proof.context -> string * typ -> term list -> bool + val is_builtin_fun_ext: Proof.context -> string * typ -> term list -> bool +end; + +structure SMT_Builtin: SMT_BUILTIN = +struct + + +(* built-in tables *) + +datatype ('a, 'b) kind = Ext of 'a | Int of 'b + +type ('a, 'b) ttab = ((typ * ('a, 'b) kind) Ord_List.T) SMT_Util.dict + +fun typ_ord ((T, _), (U, _)) = + let + fun tord (TVar _, Type _) = GREATER + | tord (Type _, TVar _) = LESS + | tord (Type (n, Ts), Type (m, Us)) = + if n = m then list_ord tord (Ts, Us) + else Term_Ord.typ_ord (T, U) + | tord TU = Term_Ord.typ_ord TU + in tord (T, U) end + +fun insert_ttab cs T f = + SMT_Util.dict_map_default (cs, []) + (Ord_List.insert typ_ord (perhaps (try Logic.varifyT_global) T, f)) + +fun merge_ttab ttabp = SMT_Util.dict_merge (Ord_List.merge typ_ord) ttabp + +fun lookup_ttab ctxt ttab T = + let fun match (U, _) = Sign.typ_instance (Proof_Context.theory_of ctxt) (T, U) + in + get_first (find_first match) (SMT_Util.dict_lookup ttab (SMT_Config.solver_class_of ctxt)) + end + +type ('a, 'b) btab = ('a, 'b) ttab Symtab.table + +fun insert_btab cs n T f = + Symtab.map_default (n, []) (insert_ttab cs T f) + +fun merge_btab btabp = Symtab.join (K merge_ttab) btabp + +fun lookup_btab ctxt btab (n, T) = + (case Symtab.lookup btab n of + NONE => NONE + | SOME ttab => lookup_ttab ctxt ttab T) + +type 'a bfun = Proof.context -> typ -> term list -> 'a + +type bfunr = string * int * term list * (term list -> term) + +structure Builtins = Generic_Data +( + type T = + (typ -> bool, (typ -> string option) * (typ -> int -> string option)) ttab * + (term list bfun, bfunr option bfun) btab + val empty = ([], Symtab.empty) + val extend = I + fun merge ((t1, b1), (t2, b2)) = (merge_ttab (t1, t2), merge_btab (b1, b2)) +) + +fun filter_ttab keep_T = map (apsnd (filter (keep_T o fst))) + +fun filter_builtins keep_T = + Context.proof_map (Builtins.map (fn (ttab, btab) => + (filter_ttab keep_T ttab, Symtab.map (K (filter_ttab keep_T)) btab))) + + +(* built-in types *) + +fun add_builtin_typ cs (T, f, g) = + Builtins.map (apfst (insert_ttab cs T (Int (f, g)))) + +fun add_builtin_typ_ext (T, f) = Builtins.map (apfst (insert_ttab SMT_Util.basicC T (Ext f))) + +fun lookup_builtin_typ ctxt = + lookup_ttab ctxt (fst (Builtins.get (Context.Proof ctxt))) + +fun dest_builtin_typ ctxt T = + (case lookup_builtin_typ ctxt T of + SOME (_, Int (f, _)) => f T + | _ => NONE) + +fun is_builtin_typ_ext ctxt T = + (case lookup_builtin_typ ctxt T of + SOME (_, Int (f, _)) => is_some (f T) + | SOME (_, Ext f) => f T + | NONE => false) + + +(* built-in numbers *) + +fun dest_builtin_num ctxt t = + (case try HOLogic.dest_number t of + NONE => NONE + | SOME (T, i) => + if i < 0 then NONE else + (case lookup_builtin_typ ctxt T of + SOME (_, Int (_, g)) => g T i |> Option.map (rpair T) + | _ => NONE)) + +val is_builtin_num = is_some oo dest_builtin_num + +fun is_builtin_num_ext ctxt t = + (case try HOLogic.dest_number t of + NONE => false + | SOME (T, _) => is_builtin_typ_ext ctxt T) + + +(* built-in functions *) + +fun add_builtin_fun cs ((n, T), f) = + Builtins.map (apsnd (insert_btab cs n T (Int f))) + +fun add_builtin_fun' cs (t, n) = + let + val c as (m, T) = Term.dest_Const t + fun app U ts = Term.list_comb (Const (m, U), ts) + fun bfun _ U ts = SOME (n, length (Term.binder_types T), ts, app U) + in add_builtin_fun cs (c, bfun) end + +fun add_builtin_fun_ext ((n, T), f) = + Builtins.map (apsnd (insert_btab SMT_Util.basicC n T (Ext f))) + +fun add_builtin_fun_ext' c = add_builtin_fun_ext (c, fn _ => fn _ => I) + +fun add_builtin_fun_ext'' n context = + let val thy = Context.theory_of context + in add_builtin_fun_ext' (n, Sign.the_const_type thy n) context end + +fun lookup_builtin_fun ctxt = + lookup_btab ctxt (snd (Builtins.get (Context.Proof ctxt))) + +fun dest_builtin_fun ctxt (c as (_, T)) ts = + (case lookup_builtin_fun ctxt c of + SOME (_, Int f) => f ctxt T ts + | _ => NONE) + +fun dest_builtin_eq ctxt t u = + let + val aT = TFree (Name.aT, @{sort type}) + val c = (@{const_name HOL.eq}, aT --> aT --> @{typ bool}) + fun mk ts = Term.list_comb (HOLogic.eq_const (Term.fastype_of (hd ts)), ts) + in + dest_builtin_fun ctxt c [] + |> Option.map (fn (n, i, _, _) => (n, i, [t, u], mk)) + end + +fun special_builtin_fun pred ctxt (c as (_, T)) ts = + if pred (Term.body_type T, Term.binder_types T) then + dest_builtin_fun ctxt c ts + else NONE + +fun dest_builtin_pred ctxt = special_builtin_fun (equal @{typ bool} o fst) ctxt + +fun dest_builtin_conn ctxt = + special_builtin_fun (forall (equal @{typ bool}) o (op ::)) ctxt + +fun dest_builtin ctxt c ts = + let val t = Term.list_comb (Const c, ts) + in + (case dest_builtin_num ctxt t of + SOME (n, _) => SOME (n, 0, [], K t) + | NONE => dest_builtin_fun ctxt c ts) + end + +fun dest_builtin_fun_ext ctxt (c as (_, T)) ts = + (case lookup_builtin_fun ctxt c of + SOME (_, Int f) => f ctxt T ts |> Option.map (fn (_, _, us, _) => us) + | SOME (_, Ext f) => SOME (f ctxt T ts) + | NONE => NONE) + +fun dest_builtin_ext ctxt c ts = + if is_builtin_num_ext ctxt (Term.list_comb (Const c, ts)) then SOME [] + else dest_builtin_fun_ext ctxt c ts + +fun is_builtin_fun ctxt c ts = is_some (dest_builtin_fun ctxt c ts) + +fun is_builtin_fun_ext ctxt c ts = is_some (dest_builtin_fun_ext ctxt c ts) + +end; diff --git a/src/test-gen/SMT/smt_config.ML b/src/test-gen/SMT/smt_config.ML new file mode 100644 index 0000000..8b8d029 --- /dev/null +++ b/src/test-gen/SMT/smt_config.ML @@ -0,0 +1,265 @@ +(* Title: HOL/Tools/SMT/smt_config.ML + Author: Sascha Boehme, TU Muenchen + +Configuration options and diagnostic tools for SMT. +*) + +signature SMT_CONFIG = +sig + (*solver*) + type solver_info = { + name: string, + class: Proof.context -> SMT_Util.class, + avail: unit -> bool, + options: Proof.context -> string list } + val add_solver: solver_info -> Context.generic -> Context.generic + val set_solver_options: string * string -> Context.generic -> Context.generic + val is_available: Proof.context -> string -> bool + val available_solvers_of: Proof.context -> string list + val select_solver: string -> Context.generic -> Context.generic + val solver_of: Proof.context -> string + val solver_class_of: Proof.context -> SMT_Util.class + val solver_options_of: Proof.context -> string list + + (*options*) + val oracle: bool Config.T + val timeout: real Config.T + val reconstruction_step_timeout: real Config.T + val random_seed: int Config.T + val read_only_certificates: bool Config.T + val verbose: bool Config.T + val trace: bool Config.T + val statistics: bool Config.T + val monomorph_limit: int Config.T + val monomorph_instances: int Config.T + val infer_triggers: bool Config.T + val debug_files: string Config.T + val sat_solver: string Config.T + + (*tools*) + val with_time_limit: Proof.context -> real Config.T -> ('a -> 'b) -> 'a -> 'b + val with_timeout: Proof.context -> ('a -> 'b) -> 'a -> 'b + + (*diagnostics*) + val trace_msg: Proof.context -> ('a -> string) -> 'a -> unit + val verbose_msg: Proof.context -> ('a -> string) -> 'a -> unit + val statistics_msg: Proof.context -> ('a -> string) -> 'a -> unit + + (*certificates*) + val select_certificates: string -> Context.generic -> Context.generic + val certificates_of: Proof.context -> Cache_IO.cache option + + (*setup*) + val print_setup: Proof.context -> unit +end; + +structure SMT_Config: SMT_CONFIG = +struct + +(* solver *) + +type solver_info = { + name: string, + class: Proof.context -> SMT_Util.class, + avail: unit -> bool, + options: Proof.context -> string list} + +type data = { + solvers: (solver_info * string list) Symtab.table, + solver: string option, + certs: Cache_IO.cache option} + +fun mk_data solvers solver certs: data = {solvers=solvers, solver=solver, certs=certs} + +val empty_data = mk_data Symtab.empty NONE NONE + +fun solvers_of ({solvers, ...}: data) = solvers +fun solver_of ({solver, ...}: data) = solver +fun certs_of ({certs, ...}: data) = certs + +fun map_solvers f ({solvers, solver, certs}: data) = mk_data (f solvers) solver certs +fun map_solver f ({solvers, solver, certs}: data) = mk_data solvers (f solver) certs +fun put_certs c ({solvers, solver, ...}: data) = mk_data solvers solver c + +fun merge_data ({solvers=ss1,solver=s1,certs=c1}: data, {solvers=ss2,solver=s2,certs=c2}: data) = + mk_data (Symtab.merge (K true) (ss1, ss2)) (merge_options (s1, s2)) (merge_options (c1, c2)) + +structure Data = Generic_Data +( + type T = data + val empty = empty_data + val extend = I + val merge = merge_data +) + +fun set_solver_options (name, options) = + let val opts = String.tokens (Symbol.is_ascii_blank o str) options + in Data.map (map_solvers (Symtab.map_entry name (apsnd (K opts)))) end + +fun add_solver (info as {name, ...} : solver_info) context = + if Symtab.defined (solvers_of (Data.get context)) name then + error ("Solver already registered: " ^ quote name) + else + context + |> Data.map (map_solvers (Symtab.update (name, (info, [])))) + |> Context.map_theory (Attrib.setup (Binding.name (name ^ "_options")) + (Scan.lift (@{keyword "="} |-- Args.name) >> + (Thm.declaration_attribute o K o set_solver_options o pair name)) + ("additional command line options for SMT solver " ^ quote name)) + +fun all_solvers_of ctxt = Symtab.keys (solvers_of (Data.get (Context.Proof ctxt))) + +fun solver_name_of ctxt = solver_of (Data.get (Context.Proof ctxt)) + +fun is_available ctxt name = + (case Symtab.lookup (solvers_of (Data.get (Context.Proof ctxt))) name of + SOME ({avail, ...}, _) => avail () + | NONE => false) + +fun available_solvers_of ctxt = + filter (is_available ctxt) (all_solvers_of ctxt) + +fun warn_solver (Context.Proof ctxt) name = + if Context_Position.is_visible ctxt then + warning ("The SMT solver " ^ quote name ^ " is not installed") + else () + | warn_solver _ _ = () + +fun select_solver name context = + let + val ctxt = Context.proof_of context + val upd = Data.map (map_solver (K (SOME name))) + in + if not (member (op =) (all_solvers_of ctxt) name) then + error ("Trying to select unknown solver: " ^ quote name) + else if not (is_available ctxt name) then + (warn_solver context name; upd context) + else upd context + end + +fun no_solver_err () = error "No SMT solver selected" + +fun solver_of ctxt = + (case solver_name_of ctxt of + SOME name => name + | NONE => no_solver_err ()) + +fun solver_info_of default select ctxt = + (case solver_name_of ctxt of + NONE => default () + | SOME name => select (Symtab.lookup (solvers_of (Data.get (Context.Proof ctxt))) name)) + +fun solver_class_of ctxt = + let fun class_of ({class, ...}: solver_info, _) = class ctxt + in solver_info_of no_solver_err (class_of o the) ctxt end + +fun solver_options_of ctxt = + let + fun all_options NONE = [] + | all_options (SOME ({options, ...} : solver_info, opts)) = + opts @ options ctxt + in solver_info_of (K []) all_options ctxt end + +val setup_solver = + Attrib.setup @{binding smt_solver} + (Scan.lift (@{keyword "="} |-- Args.name) >> + (Thm.declaration_attribute o K o select_solver)) + "SMT solver configuration" + + +(* options *) + +val oracle = Attrib.setup_config_bool @{binding smt_oracle} (K true) +val timeout = Attrib.setup_config_real @{binding smt_timeout} (K 30.0) +val reconstruction_step_timeout = Attrib.setup_config_real @{binding smt_reconstruction_step_timeout} (K 10.0) +val random_seed = Attrib.setup_config_int @{binding smt_random_seed} (K 1) +val read_only_certificates = Attrib.setup_config_bool @{binding smt_read_only_certificates} (K false) +val verbose = Attrib.setup_config_bool @{binding smt_verbose} (K true) +val trace = Attrib.setup_config_bool @{binding smt_trace} (K false) +val statistics = Attrib.setup_config_bool @{binding smt_statistics} (K false) +val monomorph_limit = Attrib.setup_config_int @{binding smt_monomorph_limit} (K 10) +val monomorph_instances = Attrib.setup_config_int @{binding smt_monomorph_instances} (K 500) +val infer_triggers = Attrib.setup_config_bool @{binding smt_infer_triggers} (K false) +val debug_files = Attrib.setup_config_string @{binding smt_debug_files} (K "") +val sat_solver = Attrib.setup_config_string @{binding smt_sat_solver} (K "cdclite") + + +(* diagnostics *) + +fun cond_trace flag f x = if flag then tracing ("SMT: " ^ f x) else () + +fun verbose_msg ctxt = cond_trace (Config.get ctxt verbose) +fun trace_msg ctxt = cond_trace (Config.get ctxt trace) +fun statistics_msg ctxt = cond_trace (Config.get ctxt statistics) + + +(* tools *) + +fun with_time_limit ctxt timeout_config f x = + Timeout.apply (seconds (Config.get ctxt timeout_config)) f x + handle Timeout.TIMEOUT _ => raise SMT_Failure.SMT SMT_Failure.Time_Out + +fun with_timeout ctxt = with_time_limit ctxt timeout + + +(* certificates *) + +val certificates_of = certs_of o Data.get o Context.Proof + +val get_certificates_path = Option.map (Cache_IO.cache_path_of) o certificates_of + +fun select_certificates name context = context |> Data.map (put_certs ( + if name = "" then NONE + else + Path.explode name + |> Path.append (Resources.master_directory (Context.theory_of context)) + |> SOME o Cache_IO.unsynchronized_init)) + +val setup_certificates = + Attrib.setup @{binding smt_certificates} + (Scan.lift (@{keyword "="} |-- Args.name) >> + (Thm.declaration_attribute o K o select_certificates)) + "SMT certificates configuration" + + +(* setup *) + +val _ = Theory.setup ( + setup_solver #> + setup_certificates) + +fun print_setup ctxt = + let + fun string_of_bool b = if b then "true" else "false" + + val names = available_solvers_of ctxt + val ns = if null names then ["(none)"] else sort_strings names + val n = the_default "(none)" (solver_name_of ctxt) + val opts = solver_options_of ctxt + + val t = string_of_real (Config.get ctxt timeout) + + val certs_filename = + (case get_certificates_path ctxt of + SOME path => Path.print path + | NONE => "(disabled)") + in + Pretty.writeln (Pretty.big_list "SMT setup:" [ + Pretty.str ("Current SMT solver: " ^ n), + Pretty.str ("Current SMT solver options: " ^ space_implode " " opts), + Pretty.str_list "Available SMT solvers: " "" ns, + Pretty.str ("Current timeout: " ^ t ^ " seconds"), + Pretty.str ("With proofs: " ^ + string_of_bool (not (Config.get ctxt oracle))), + Pretty.str ("Certificates cache: " ^ certs_filename), + Pretty.str ("Fixed certificates: " ^ + string_of_bool (Config.get ctxt read_only_certificates))]) + end + +val _ = + Outer_Syntax.command @{command_keyword smt_status} + "show the available SMT solvers, the currently selected SMT solver, \ + \and the values of SMT configuration options" + (Scan.succeed (Toplevel.keep (print_setup o Toplevel.context_of))) + +end; diff --git a/src/test-gen/SMT/smt_datatypes.ML b/src/test-gen/SMT/smt_datatypes.ML new file mode 100644 index 0000000..2467cab --- /dev/null +++ b/src/test-gen/SMT/smt_datatypes.ML @@ -0,0 +1,152 @@ +(* Title: HOL/Tools/SMT/smt_datatypes.ML + Author: Sascha Boehme, TU Muenchen + +Collector functions for common type declarations and their representation +as (co)algebraic datatypes. +*) + +signature SMT_DATATYPES = +sig + val add_decls: BNF_Util.fp_kind list -> typ -> + (BNF_Util.fp_kind * (typ * (term * term list) list)) list list * Proof.context -> + (BNF_Util.fp_kind * (typ * (term * term list) list)) list list * Proof.context +end; + +structure SMT_Datatypes: SMT_DATATYPES = +struct + +fun mk_selectors T Ts sels = + if null sels then + Variable.variant_fixes (replicate (length Ts) "select") + #>> map2 (fn U => fn n => Free (n, T --> U)) Ts + else + pair sels + + +(* free constructor type declarations *) + +fun get_ctr_sugar_decl ({ctrs = ctrs0, selss = selss0, ...} : Ctr_Sugar.ctr_sugar) T Ts ctxt = + let + val selss = map (map (Ctr_Sugar.mk_disc_or_sel Ts)) selss0 + val ctrs = map (Ctr_Sugar.mk_ctr Ts) ctrs0 + + fun mk_constr ctr sels = + mk_selectors T (binder_types (fastype_of ctr)) sels #>> pair ctr + + val selss' = + (if has_duplicates (op aconv) (flat selss) orelse + exists (exists (can (dest_funT o range_type o fastype_of))) selss then + [] + else + selss) + |> Ctr_Sugar_Util.pad_list [] (length ctrs) + in + @{fold_map 2} mk_constr ctrs selss' ctxt + |>> (pair T #> single) + end + + +(* typedef declarations *) + +fun get_typedef_decl (({Abs_name, Rep_name, abs_type, rep_type, ...}, {Abs_inverse, ...}) + : Typedef.info) T Ts = + if can (curry (op RS) @{thm UNIV_I}) Abs_inverse then + let + val env = snd (Term.dest_Type abs_type) ~~ Ts + val instT = Term.map_atyps (perhaps (AList.lookup (op =) env)) + + val constr = Const (Abs_name, instT (rep_type --> abs_type)) + val select = Const (Rep_name, instT (abs_type --> rep_type)) + in [(T, [(constr, [select])])] end + else + [] + + +(* collection of declarations *) + +val extN = "_ext" (* cf. "HOL/Tools/typedef.ML" *) + +fun get_decls fps T n Ts ctxt = + let + fun maybe_typedef () = + (case Typedef.get_info ctxt n of + [] => ([], ctxt) + | info :: _ => (map (pair (hd fps)) (get_typedef_decl info T Ts), ctxt)) + in + (case BNF_FP_Def_Sugar.fp_sugar_of ctxt n of + SOME {fp, fp_res = {Ts = fp_Ts, ...}, fp_ctr_sugar = {ctr_sugar, ...}, ...} => + if member (op =) fps fp then + let + val ns = map (fst o dest_Type) fp_Ts + val mutual_fp_sugars = map_filter (BNF_FP_Def_Sugar.fp_sugar_of ctxt) ns + val Xs = map #X mutual_fp_sugars + val ctrXs_Tsss = map (#ctrXs_Tss o #fp_ctr_sugar) mutual_fp_sugars + + (* Datatypes nested through datatypes and codatatypes nested through codatatypes are + allowed. So are mutually (co)recursive (co)datatypes. *) + fun is_same_fp s = + (case BNF_FP_Def_Sugar.fp_sugar_of ctxt s of + SOME {fp = fp', ...} => fp' = fp + | NONE => false) + fun is_homogenously_nested_co_recursive (Type (s, Ts)) = + forall (if is_same_fp s then is_homogenously_nested_co_recursive + else not o BNF_FP_Rec_Sugar_Util.exists_subtype_in Xs) Ts + | is_homogenously_nested_co_recursive _ = true + + val Type (_, As) :: _ = fp_Ts + val substAs = Term.typ_subst_atomic (As ~~ Ts); + in + (* TODO/FIXME: The "bool" check is there to work around a CVC4 bug + (http://church.cims.nyu.edu/bugzilla3/show_bug.cgi?id=597). It should be removed once + the bug is fixed. *) + if forall (forall (forall (is_homogenously_nested_co_recursive))) ctrXs_Tsss andalso + forall (forall (forall (curry (op <>) @{typ bool}))) + (map (map (map substAs)) ctrXs_Tsss) then + get_ctr_sugar_decl ctr_sugar T Ts ctxt |>> map (pair fp) + else + maybe_typedef () + end + else + ([], ctxt) + | NONE => + if String.isSuffix extN n then + (* for records (FIXME: hack) *) + (case Ctr_Sugar.ctr_sugar_of ctxt n of + SOME ctr_sugar => + get_ctr_sugar_decl ctr_sugar T Ts ctxt |>> map (pair (hd fps)) + | NONE => maybe_typedef ()) + else + maybe_typedef ()) + end + +fun add_decls fps T (declss, ctxt) = + let + fun declared T = exists (exists (equal T o fst o snd)) + fun declared' T = exists (exists (equal T o fst o snd) o snd) + fun depends ds = exists (member (op =) (map (fst o snd) ds)) + + fun add (TFree _) = I + | add (TVar _) = I + | add (T as Type (@{type_name fun}, _)) = + fold add (Term.body_type T :: Term.binder_types T) + | add @{typ bool} = I + | add (T as Type (n, Ts)) = (fn (dss, ctxt1) => + if declared T declss orelse declared' T dss then + (dss, ctxt1) + else if SMT_Builtin.is_builtin_typ_ext ctxt1 T then + (dss, ctxt1) + else + (case get_decls fps T n Ts ctxt1 of + ([], _) => (dss, ctxt1) + | (ds, ctxt2) => + let + val constrTs = maps (map (snd o Term.dest_Const o fst) o snd o snd) ds + val Us = fold (union (op =) o Term.binder_types) constrTs [] + + fun ins [] = [(Us, ds)] + | ins ((Uds as (Us', _)) :: Udss) = + if depends ds Us' then (Us, ds) :: Uds :: Udss else Uds :: ins Udss + in fold add Us (ins dss, ctxt2) end)) + in add T ([], ctxt) |>> append declss o map snd end + +end; diff --git a/src/test-gen/SMT/smt_failure.ML b/src/test-gen/SMT/smt_failure.ML new file mode 100644 index 0000000..ba892ae --- /dev/null +++ b/src/test-gen/SMT/smt_failure.ML @@ -0,0 +1,40 @@ +(* Title: HOL/Tools/SMT/smt_failure.ML + Author: Sascha Boehme, TU Muenchen + +Failures and exception of SMT. +*) + +signature SMT_FAILURE = +sig + datatype failure = + Counterexample of bool | + Time_Out | + Out_Of_Memory | + Abnormal_Termination of int | + Other_Failure of string + val string_of_failure: failure -> string + exception SMT of failure +end; + +structure SMT_Failure: SMT_FAILURE = +struct + +datatype failure = + Counterexample of bool | + Time_Out | + Out_Of_Memory | + Abnormal_Termination of int | + Other_Failure of string + +fun string_of_failure (Counterexample genuine) = + if genuine then "Counterexample found (possibly spurious)" + else "Potential counterexample found" + | string_of_failure Time_Out = "Timed out" + | string_of_failure Out_Of_Memory = "Ran out of memory" + | string_of_failure (Abnormal_Termination err) = + "Solver terminated abnormally with error code " ^ string_of_int err + | string_of_failure (Other_Failure msg) = msg + +exception SMT of failure + +end; diff --git a/src/test-gen/SMT/smt_normalize.ML b/src/test-gen/SMT/smt_normalize.ML new file mode 100644 index 0000000..98e820b --- /dev/null +++ b/src/test-gen/SMT/smt_normalize.ML @@ -0,0 +1,444 @@ +(* Title: HOL/Tools/SMT/smt_normalize.ML + Author: Sascha Boehme, TU Muenchen + +Normalization steps on theorems required by SMT solvers. +*) + +signature SMT_NORMALIZE = +sig + val drop_fact_warning: Proof.context -> thm -> unit + val atomize_conv: Proof.context -> conv + + val special_quant_table: (string * thm) list + val case_bool_entry: string * thm + val abs_min_max_table: (string * thm) list + + type extra_norm = Proof.context -> thm list * thm list -> thm list * thm list + val add_extra_norm: SMT_Util.class * extra_norm -> Context.generic -> Context.generic + val normalize: Proof.context -> thm list -> (int * thm) list +end; + +structure SMT_Normalize: SMT_NORMALIZE = +struct + +fun drop_fact_warning ctxt = + SMT_Config.verbose_msg ctxt (prefix "Warning: dropping assumption: " o + Thm.string_of_thm ctxt) + + +(* general theorem normalizations *) + +(** instantiate elimination rules **) + +local + val (cpfalse, cfalse) = `SMT_Util.mk_cprop (Thm.cterm_of @{context} @{const False}) + + fun inst f ct thm = + let val cv = f (Drule.strip_imp_concl (Thm.cprop_of thm)) + in Thm.instantiate ([], [(dest_Var (Thm.term_of cv), ct)]) thm end +in + +fun instantiate_elim thm = + (case Thm.concl_of thm of + @{const Trueprop} $ Var (_, @{typ bool}) => inst Thm.dest_arg cfalse thm + | Var _ => inst I cpfalse thm + | _ => thm) + +end + + +(** normalize definitions **) + +fun norm_def thm = + (case Thm.prop_of thm of + @{const Trueprop} $ (Const (@{const_name HOL.eq}, _) $ _ $ Abs _) => + norm_def (thm RS @{thm fun_cong}) + | Const (@{const_name Pure.eq}, _) $ _ $ Abs _ => norm_def (thm RS @{thm meta_eq_to_obj_eq}) + | _ => thm) + + +(** atomization **) + +fun atomize_conv ctxt ct = + (case Thm.term_of ct of + @{const Pure.imp} $ _ $ _ => + Conv.binop_conv (atomize_conv ctxt) then_conv Conv.rewr_conv @{thm atomize_imp} + | Const (@{const_name Pure.eq}, _) $ _ $ _ => + Conv.binop_conv (atomize_conv ctxt) then_conv Conv.rewr_conv @{thm atomize_eq} + | Const (@{const_name Pure.all}, _) $ Abs _ => + Conv.binder_conv (atomize_conv o snd) ctxt then_conv Conv.rewr_conv @{thm atomize_all} + | _ => Conv.all_conv) ct + handle CTERM _ => Conv.all_conv ct + +val setup_atomize = + fold SMT_Builtin.add_builtin_fun_ext'' [@{const_name Pure.imp}, @{const_name Pure.eq}, + @{const_name Pure.all}, @{const_name Trueprop}] + + +(** unfold special quantifiers **) + +val special_quant_table = [ + (@{const_name Ex1}, @{thm Ex1_def_raw}), + (@{const_name Ball}, @{thm Ball_def_raw}), + (@{const_name Bex}, @{thm Bex_def_raw})] + +local + fun special_quant (Const (n, _)) = AList.lookup (op =) special_quant_table n + | special_quant _ = NONE + + fun special_quant_conv _ ct = + (case special_quant (Thm.term_of ct) of + SOME thm => Conv.rewr_conv thm + | NONE => Conv.all_conv) ct +in + +fun unfold_special_quants_conv ctxt = + SMT_Util.if_exists_conv (is_some o special_quant) (Conv.top_conv special_quant_conv ctxt) + +val setup_unfolded_quants = fold (SMT_Builtin.add_builtin_fun_ext'' o fst) special_quant_table + +end + + +(** trigger inference **) + +local + (*** check trigger syntax ***) + + fun dest_trigger (Const (@{const_name pat}, _) $ _) = SOME true + | dest_trigger (Const (@{const_name nopat}, _) $ _) = SOME false + | dest_trigger _ = NONE + + fun eq_list [] = false + | eq_list (b :: bs) = forall (equal b) bs + + fun proper_trigger t = + t + |> these o try SMT_Util.dest_symb_list + |> map (map_filter dest_trigger o these o try SMT_Util.dest_symb_list) + |> (fn [] => false | bss => forall eq_list bss) + + fun proper_quant inside f t = + (case t of + Const (@{const_name All}, _) $ Abs (_, _, u) => proper_quant true f u + | Const (@{const_name Ex}, _) $ Abs (_, _, u) => proper_quant true f u + | @{const trigger} $ p $ u => + (if inside then f p else false) andalso proper_quant false f u + | Abs (_, _, u) => proper_quant false f u + | u1 $ u2 => proper_quant false f u1 andalso proper_quant false f u2 + | _ => true) + + fun check_trigger_error ctxt t = + error ("SMT triggers must only occur under quantifier and multipatterns " ^ + "must have the same kind: " ^ Syntax.string_of_term ctxt t) + + fun check_trigger_conv ctxt ct = + if proper_quant false proper_trigger (SMT_Util.term_of ct) then Conv.all_conv ct + else check_trigger_error ctxt (Thm.term_of ct) + + + (*** infer simple triggers ***) + + fun dest_cond_eq ct = + (case Thm.term_of ct of + Const (@{const_name HOL.eq}, _) $ _ $ _ => Thm.dest_binop ct + | @{const HOL.implies} $ _ $ _ => dest_cond_eq (Thm.dest_arg ct) + | _ => raise CTERM ("no equation", [ct])) + + fun get_constrs thy (Type (n, _)) = these (BNF_LFP_Compat.get_constrs thy n) + | get_constrs _ _ = [] + + fun is_constr thy (n, T) = + let fun match (m, U) = m = n andalso Sign.typ_instance thy (T, U) + in can (the o find_first match o get_constrs thy o Term.body_type) T end + + fun is_constr_pat thy t = + (case Term.strip_comb t of + (Free _, []) => true + | (Const c, ts) => is_constr thy c andalso forall (is_constr_pat thy) ts + | _ => false) + + fun is_simp_lhs ctxt t = + (case Term.strip_comb t of + (Const c, ts as _ :: _) => + not (SMT_Builtin.is_builtin_fun_ext ctxt c ts) andalso + forall (is_constr_pat (Proof_Context.theory_of ctxt)) ts + | _ => false) + + fun has_all_vars vs t = + subset (op aconv) (vs, map Free (Term.add_frees t [])) + + fun minimal_pats vs ct = + if has_all_vars vs (Thm.term_of ct) then + (case Thm.term_of ct of + _ $ _ => + (case apply2 (minimal_pats vs) (Thm.dest_comb ct) of + ([], []) => [[ct]] + | (ctss, ctss') => union (eq_set (op aconvc)) ctss ctss') + | _ => []) + else [] + + fun proper_mpat _ _ _ [] = false + | proper_mpat thy gen u cts = + let + val tps = (op ~~) (`gen (map Thm.term_of cts)) + fun some_match u = tps |> exists (fn (t', t) => + Pattern.matches thy (t', u) andalso not (t aconv u)) + in not (Term.exists_subterm some_match u) end + + val pat = SMT_Util.mk_const_pat @{theory} @{const_name pat} SMT_Util.destT1 + fun mk_pat ct = Thm.apply (SMT_Util.instT' ct pat) ct + + fun mk_clist T = + apply2 (Thm.cterm_of @{context}) (SMT_Util.symb_cons_const T, SMT_Util.symb_nil_const T) + fun mk_list (ccons, cnil) f cts = fold_rev (Thm.mk_binop ccons o f) cts cnil + val mk_pat_list = mk_list (mk_clist @{typ pattern}) + val mk_mpat_list = mk_list (mk_clist @{typ "pattern symb_list"}) + fun mk_trigger ctss = mk_mpat_list (mk_pat_list mk_pat) ctss + + val trigger_eq = mk_meta_eq @{lemma "p = trigger t p" by (simp add: trigger_def)} + + fun insert_trigger_conv [] ct = Conv.all_conv ct + | insert_trigger_conv ctss ct = + let + val (ctr, cp) = Thm.dest_binop (Thm.rhs_of trigger_eq) ||> rpair ct + val inst = map (apfst (dest_Var o Thm.term_of)) [cp, (ctr, mk_trigger ctss)] + in Thm.instantiate ([], inst) trigger_eq end + + fun infer_trigger_eq_conv outer_ctxt (ctxt, cvs) ct = + let + val (lhs, rhs) = dest_cond_eq ct + + val vs = map Thm.term_of cvs + val thy = Proof_Context.theory_of ctxt + + fun get_mpats ct = + if is_simp_lhs ctxt (Thm.term_of ct) then minimal_pats vs ct + else [] + val gen = Variable.export_terms ctxt outer_ctxt + val filter_mpats = filter (proper_mpat thy gen (Thm.term_of rhs)) + + in insert_trigger_conv (filter_mpats (get_mpats lhs)) ct end + + fun has_trigger (@{const trigger} $ _ $ _) = true + | has_trigger _ = false + + fun try_trigger_conv cv ct = + if SMT_Util.under_quant has_trigger (SMT_Util.term_of ct) then Conv.all_conv ct + else Conv.try_conv cv ct + + fun infer_trigger_conv ctxt = + if Config.get ctxt SMT_Config.infer_triggers then + try_trigger_conv (SMT_Util.under_quant_conv (infer_trigger_eq_conv ctxt) ctxt) + else Conv.all_conv +in + +fun trigger_conv ctxt = + SMT_Util.prop_conv (check_trigger_conv ctxt then_conv infer_trigger_conv ctxt) + +val setup_trigger = + fold SMT_Builtin.add_builtin_fun_ext'' + [@{const_name pat}, @{const_name nopat}, @{const_name trigger}] + +end + + +(** combined general normalizations **) + +fun gen_normalize1_conv ctxt = + atomize_conv ctxt then_conv + unfold_special_quants_conv ctxt then_conv + Thm.beta_conversion true then_conv + trigger_conv ctxt + +fun gen_normalize1 ctxt = + instantiate_elim #> + norm_def #> + Conv.fconv_rule (Thm.beta_conversion true then_conv Thm.eta_conversion) #> + Drule.forall_intr_vars #> + Conv.fconv_rule (gen_normalize1_conv ctxt) #> + (* Z3 4.3.1 silently normalizes "P --> Q --> R" to "P & Q --> R" *) + Raw_Simplifier.rewrite_rule ctxt @{thms HOL.imp_conjL[symmetric, THEN eq_reflection]} + +fun gen_norm1_safe ctxt (i, thm) = + (case try (gen_normalize1 ctxt) thm of + SOME thm' => SOME (i, thm') + | NONE => (drop_fact_warning ctxt thm; NONE)) + +fun gen_normalize ctxt iwthms = map_filter (gen_norm1_safe ctxt) iwthms + + +(* unfolding of definitions and theory-specific rewritings *) + +fun expand_head_conv cv ct = + (case Thm.term_of ct of + _ $ _ => + Conv.fun_conv (expand_head_conv cv) then_conv + Conv.try_conv (Thm.beta_conversion false) + | _ => cv) ct + + +(** rewrite bool case expressions as if expressions **) + +val case_bool_entry = (@{const_name "bool.case_bool"}, @{thm case_bool_if}) + +local + fun is_case_bool (Const (@{const_name "bool.case_bool"}, _)) = true + | is_case_bool _ = false + + fun unfold_conv _ = + SMT_Util.if_true_conv (is_case_bool o Term.head_of) + (expand_head_conv (Conv.rewr_conv @{thm case_bool_if})) +in + +fun rewrite_case_bool_conv ctxt = + SMT_Util.if_exists_conv is_case_bool (Conv.top_conv unfold_conv ctxt) + +val setup_case_bool = SMT_Builtin.add_builtin_fun_ext'' @{const_name "bool.case_bool"} + +end + + +(** unfold abs, min and max **) + +val abs_min_max_table = [ + (@{const_name min}, @{thm min_def_raw}), + (@{const_name max}, @{thm max_def_raw}), + (@{const_name abs}, @{thm abs_if_raw})] + +local + fun abs_min_max ctxt (Const (n, Type (@{type_name fun}, [T, _]))) = + (case AList.lookup (op =) abs_min_max_table n of + NONE => NONE + | SOME thm => if SMT_Builtin.is_builtin_typ_ext ctxt T then SOME thm else NONE) + | abs_min_max _ _ = NONE + + fun unfold_amm_conv ctxt ct = + (case abs_min_max ctxt (Term.head_of (Thm.term_of ct)) of + SOME thm => expand_head_conv (Conv.rewr_conv thm) + | NONE => Conv.all_conv) ct +in + +fun unfold_abs_min_max_conv ctxt = + SMT_Util.if_exists_conv (is_some o abs_min_max ctxt) (Conv.top_conv unfold_amm_conv ctxt) + +val setup_abs_min_max = fold (SMT_Builtin.add_builtin_fun_ext'' o fst) abs_min_max_table + +end + + +(** normalize numerals **) + +local + (* + rewrite Numeral1 into 1 + rewrite - 0 into 0 + *) + + fun is_irregular_number (Const (@{const_name numeral}, _) $ Const (@{const_name num.One}, _)) = + true + | is_irregular_number (Const (@{const_name uminus}, _) $ Const (@{const_name Groups.zero}, _)) = + true + | is_irregular_number _ = false + + fun is_strange_number ctxt t = is_irregular_number t andalso SMT_Builtin.is_builtin_num ctxt t + + val proper_num_ss = + simpset_of (put_simpset HOL_ss @{context} addsimps @{thms Num.numeral_One minus_zero}) + + fun norm_num_conv ctxt = + SMT_Util.if_conv (is_strange_number ctxt) (Simplifier.rewrite (put_simpset proper_num_ss ctxt)) + Conv.no_conv +in + +fun normalize_numerals_conv ctxt = + SMT_Util.if_exists_conv (is_strange_number ctxt) (Conv.top_sweep_conv norm_num_conv ctxt) + +end + + +(** combined unfoldings and rewritings **) + +fun unfold_conv ctxt = + rewrite_case_bool_conv ctxt then_conv + unfold_abs_min_max_conv ctxt then_conv + Thm.beta_conversion true + +fun unfold_polymorph ctxt = map (apsnd (Conv.fconv_rule (unfold_conv ctxt))) +fun unfold_monomorph ctxt = map (apsnd (Conv.fconv_rule (normalize_numerals_conv ctxt))) + + +(* overall normalization *) + +fun burrow_ids f ithms = + let + val (is, thms) = split_list ithms + val (thms', extra_thms) = f thms + in (is ~~ thms') @ map (pair ~1) extra_thms end + +type extra_norm = Proof.context -> thm list * thm list -> thm list * thm list + +structure Extra_Norms = Generic_Data +( + type T = extra_norm SMT_Util.dict + val empty = [] + val extend = I + fun merge data = SMT_Util.dict_merge fst data +) + +fun add_extra_norm (cs, norm) = Extra_Norms.map (SMT_Util.dict_update (cs, norm)) + +fun apply_extra_norms ctxt ithms = + let + val cs = SMT_Config.solver_class_of ctxt + val es = SMT_Util.dict_lookup (Extra_Norms.get (Context.Proof ctxt)) cs + in burrow_ids (fold (fn e => e ctxt) es o rpair []) ithms end + +local + val ignored = member (op =) [@{const_name All}, @{const_name Ex}, + @{const_name Let}, @{const_name If}, @{const_name HOL.eq}] + + val schematic_consts_of = + let + fun collect (@{const trigger} $ p $ t) = collect_trigger p #> collect t + | collect (t $ u) = collect t #> collect u + | collect (Abs (_, _, t)) = collect t + | collect (t as Const (n, _)) = + if not (ignored n) then Monomorph.add_schematic_consts_of t else I + | collect _ = I + and collect_trigger t = + let val dest = these o try SMT_Util.dest_symb_list + in fold (fold collect_pat o dest) (dest t) end + and collect_pat (Const (@{const_name pat}, _) $ t) = collect t + | collect_pat (Const (@{const_name nopat}, _) $ t) = collect t + | collect_pat _ = I + in (fn t => collect t Symtab.empty) end +in + +fun monomorph ctxt xthms = + let val (xs, thms) = split_list xthms + in + map (pair 1) thms + |> Monomorph.monomorph schematic_consts_of ctxt + |> maps (uncurry (map o pair)) o map2 pair xs o map (map snd) + end + +end + +fun normalize ctxt wthms = + wthms + |> map_index I + |> gen_normalize ctxt + |> unfold_polymorph ctxt + |> monomorph ctxt + |> unfold_monomorph ctxt + |> apply_extra_norms ctxt + +val _ = Theory.setup (Context.theory_map ( + setup_atomize #> + setup_unfolded_quants #> + setup_trigger #> + setup_case_bool #> + setup_abs_min_max)) + +end; diff --git a/src/test-gen/SMT/smt_real.ML b/src/test-gen/SMT/smt_real.ML new file mode 100644 index 0000000..8e08d4c --- /dev/null +++ b/src/test-gen/SMT/smt_real.ML @@ -0,0 +1,115 @@ +(* Title: HOL/Tools/SMT/smt_real.ML + Author: Sascha Boehme, TU Muenchen + +SMT setup for reals. +*) + +structure SMT_Real: sig end = +struct + + +(* SMT-LIB logic *) + +fun smtlib_logic ts = + if exists (Term.exists_type (Term.exists_subtype (equal @{typ real}))) ts + then SOME "AUFLIRA" + else NONE + + +(* SMT-LIB and Z3 built-ins *) + +local + fun real_num _ i = SOME (string_of_int i ^ ".0") + + fun is_linear [t] = SMT_Util.is_number t + | is_linear [t, u] = SMT_Util.is_number t orelse SMT_Util.is_number u + | is_linear _ = false + + fun mk_times ts = Term.list_comb (@{const times (real)}, ts) + + fun times _ _ ts = if is_linear ts then SOME ("*", 2, ts, mk_times) else NONE +in + +val setup_builtins = + SMT_Builtin.add_builtin_typ SMTLIB_Interface.smtlibC + (@{typ real}, K (SOME "Real"), real_num) #> + fold (SMT_Builtin.add_builtin_fun' SMTLIB_Interface.smtlibC) [ + (@{const less (real)}, "<"), + (@{const less_eq (real)}, "<="), + (@{const uminus (real)}, "-"), + (@{const plus (real)}, "+"), + (@{const minus (real)}, "-") ] #> + SMT_Builtin.add_builtin_fun SMTLIB_Interface.smtlibC + (Term.dest_Const @{const times (real)}, times) #> + SMT_Builtin.add_builtin_fun' Z3_Interface.smtlib_z3C + (@{const times (real)}, "*") #> + SMT_Builtin.add_builtin_fun' Z3_Interface.smtlib_z3C + (@{const divide (real)}, "/") + +end + + +(* Z3 constructors *) + +local + fun z3_mk_builtin_typ (Z3_Interface.Sym ("Real", _)) = SOME @{typ real} + | z3_mk_builtin_typ (Z3_Interface.Sym ("real", _)) = SOME @{typ real} + (*FIXME: delete*) + | z3_mk_builtin_typ _ = NONE + + fun z3_mk_builtin_num _ i T = + if T = @{typ real} then SOME (Numeral.mk_cnumber @{ctyp real} i) + else NONE + + fun mk_nary _ cu [] = cu + | mk_nary ct _ cts = uncurry (fold_rev (Thm.mk_binop ct)) (split_last cts) + + val mk_uminus = Thm.apply (Thm.cterm_of @{context} @{const uminus (real)}) + val add = Thm.cterm_of @{context} @{const plus (real)} + val real0 = Numeral.mk_cnumber @{ctyp real} 0 + val mk_sub = Thm.mk_binop (Thm.cterm_of @{context} @{const minus (real)}) + val mk_mul = Thm.mk_binop (Thm.cterm_of @{context} @{const times (real)}) + val mk_div = Thm.mk_binop (Thm.cterm_of @{context} @{const divide (real)}) + val mk_lt = Thm.mk_binop (Thm.cterm_of @{context} @{const less (real)}) + val mk_le = Thm.mk_binop (Thm.cterm_of @{context} @{const less_eq (real)}) + + fun z3_mk_builtin_fun (Z3_Interface.Sym ("-", _)) [ct] = SOME (mk_uminus ct) + | z3_mk_builtin_fun (Z3_Interface.Sym ("+", _)) cts = SOME (mk_nary add real0 cts) + | z3_mk_builtin_fun (Z3_Interface.Sym ("-", _)) [ct, cu] = SOME (mk_sub ct cu) + | z3_mk_builtin_fun (Z3_Interface.Sym ("*", _)) [ct, cu] = SOME (mk_mul ct cu) + | z3_mk_builtin_fun (Z3_Interface.Sym ("/", _)) [ct, cu] = SOME (mk_div ct cu) + | z3_mk_builtin_fun (Z3_Interface.Sym ("<", _)) [ct, cu] = SOME (mk_lt ct cu) + | z3_mk_builtin_fun (Z3_Interface.Sym ("<=", _)) [ct, cu] = SOME (mk_le ct cu) + | z3_mk_builtin_fun (Z3_Interface.Sym (">", _)) [ct, cu] = SOME (mk_lt cu ct) + | z3_mk_builtin_fun (Z3_Interface.Sym (">=", _)) [ct, cu] = SOME (mk_le cu ct) + | z3_mk_builtin_fun _ _ = NONE +in + +val z3_mk_builtins = { + mk_builtin_typ = z3_mk_builtin_typ, + mk_builtin_num = z3_mk_builtin_num, + mk_builtin_fun = (fn _ => fn sym => fn cts => + (case try (Thm.typ_of_cterm o hd) cts of + SOME @{typ real} => z3_mk_builtin_fun sym cts + | _ => NONE)) } + +end + + +(* Z3 proof replay *) + +val real_linarith_proc = + Simplifier.make_simproc @{context} "fast_real_arith" + {lhss = [@{term "(m::real) < n"}, @{term "(m::real) \ n"}, @{term "(m::real) = n"}], + proc = K Lin_Arith.simproc} + + +(* setup *) + +val _ = Theory.setup (Context.theory_map ( + SMTLIB_Interface.add_logic (10, smtlib_logic) #> + setup_builtins #> + Z3_Interface.add_mk_builtins z3_mk_builtins #> + Z3_Replay_Util.add_simproc real_linarith_proc)) + +end; diff --git a/src/test-gen/SMT/smt_solver.ML b/src/test-gen/SMT/smt_solver.ML new file mode 100644 index 0000000..7ff85dd --- /dev/null +++ b/src/test-gen/SMT/smt_solver.ML @@ -0,0 +1,307 @@ +(* Title: HOL/Tools/SMT/smt_solver.ML + Author: Sascha Boehme, TU Muenchen + +SMT solvers registry and SMT tactic. +*) + +signature SMT_SOLVER = +sig + (*configuration*) + datatype outcome = Unsat | Sat | Unknown + + type parsed_proof = + {outcome: SMT_Failure.failure option, + fact_ids: (int * ((string * ATP_Problem_Generate.stature) * thm)) list option, + atp_proof: unit -> (term, string) ATP_Proof.atp_step list} + + type solver_config = + {name: string, + class: Proof.context -> SMT_Util.class, + avail: unit -> bool, + command: unit -> string list, + options: Proof.context -> string list, + smt_options: (string * string) list, + default_max_relevant: int, + outcome: string -> string list -> outcome * string list, + parse_proof: (Proof.context -> SMT_Translate.replay_data -> + ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> + parsed_proof) option, + replay: (Proof.context -> SMT_Translate.replay_data -> string list -> thm) option} + + (*registry*) + val add_solver: solver_config -> theory -> theory + val default_max_relevant: Proof.context -> string -> int + + (*filter*) + val smt_filter: Proof.context -> thm -> ((string * ATP_Problem_Generate.stature) * thm) list -> + int -> Time.time -> parsed_proof + + (*tactic*) + val smt_tac: Proof.context -> thm list -> int -> tactic + val smt_tac': Proof.context -> thm list -> int -> tactic +end; + +structure SMT_Solver: SMT_SOLVER = +struct + +(* interface to external solvers *) + +local + +fun make_command command options problem_path proof_path = + "(exec 2>&1;" :: map Bash.string (command () @ options) @ + [File.bash_path problem_path, ")", ">", File.bash_path proof_path] + |> space_implode " " + +fun with_trace ctxt msg f x = + let val _ = SMT_Config.trace_msg ctxt (fn () => msg) () + in f x end + +fun run ctxt name mk_cmd input = + (case SMT_Config.certificates_of ctxt of + NONE => + if not (SMT_Config.is_available ctxt name) then + error ("The SMT solver " ^ quote name ^ " is not installed") + else if Config.get ctxt SMT_Config.debug_files = "" then + with_trace ctxt ("Invoking SMT solver " ^ quote name ^ " ...") (Cache_IO.run mk_cmd) input + else + let + val base_path = Path.explode (Config.get ctxt SMT_Config.debug_files) + val in_path = Path.ext "smt_in" base_path + val out_path = Path.ext "smt_out" base_path + in Cache_IO.raw_run mk_cmd input in_path out_path end + | SOME certs => + (case Cache_IO.lookup certs input of + (NONE, key) => + if Config.get ctxt SMT_Config.read_only_certificates then + error ("Bad certificate cache: missing certificate") + else + Cache_IO.run_and_cache certs key mk_cmd input + | (SOME output, _) => + with_trace ctxt ("Using cached certificate from " ^ + Path.print (Cache_IO.cache_path_of certs) ^ " ...") I output)) + +(* Z3 returns 1 if "get-proof" or "get-model" fails. veriT returns 255. *) +val normal_return_codes = [0, 1, 255] + +fun run_solver ctxt name mk_cmd input = + let + fun pretty tag lines = Pretty.string_of (Pretty.big_list tag (map Pretty.str lines)) + + val _ = SMT_Config.trace_msg ctxt (pretty "Problem:" o split_lines) input + + val {redirected_output = res, output = err, return_code} = + SMT_Config.with_timeout ctxt (run ctxt name mk_cmd) input + val _ = SMT_Config.trace_msg ctxt (pretty "Solver:") err + + val output = fst (take_suffix (equal "") res) + val _ = SMT_Config.trace_msg ctxt (pretty "Result:") output + + val _ = member (op =) normal_return_codes return_code orelse + raise SMT_Failure.SMT (SMT_Failure.Abnormal_Termination return_code) + in output end + +fun trace_assms ctxt = + SMT_Config.trace_msg ctxt (Pretty.string_of o + Pretty.big_list "Assertions:" o map (Thm.pretty_thm ctxt o snd)) + +fun trace_replay_data ({context = ctxt, typs, terms, ...} : SMT_Translate.replay_data) = + let + fun pretty_eq n p = Pretty.block [Pretty.str n, Pretty.str " = ", p] + fun p_typ (n, T) = pretty_eq n (Syntax.pretty_typ ctxt T) + fun p_term (n, t) = pretty_eq n (Syntax.pretty_term ctxt t) + in + SMT_Config.trace_msg ctxt (fn () => + Pretty.string_of (Pretty.big_list "Names:" [ + Pretty.big_list "sorts:" (map p_typ (Symtab.dest typs)), + Pretty.big_list "functions:" (map p_term (Symtab.dest terms))])) () + end + +in + +fun invoke name command smt_options ithms ctxt = + let + val options = SMT_Config.solver_options_of ctxt + val comments = [space_implode " " options] + + val (str, replay_data as {context = ctxt', ...}) = + ithms + |> tap (trace_assms ctxt) + |> SMT_Translate.translate ctxt smt_options comments + ||> tap trace_replay_data + in (run_solver ctxt' name (make_command command options) str, replay_data) end + +end + + +(* configuration *) + +datatype outcome = Unsat | Sat | Unknown + +type parsed_proof = + {outcome: SMT_Failure.failure option, + fact_ids: (int * ((string * ATP_Problem_Generate.stature) * thm)) list option, + atp_proof: unit -> (term, string) ATP_Proof.atp_step list} + +type solver_config = + {name: string, + class: Proof.context -> SMT_Util.class, + avail: unit -> bool, + command: unit -> string list, + options: Proof.context -> string list, + smt_options: (string * string) list, + default_max_relevant: int, + outcome: string -> string list -> outcome * string list, + parse_proof: (Proof.context -> SMT_Translate.replay_data -> + ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> + parsed_proof) option, + replay: (Proof.context -> SMT_Translate.replay_data -> string list -> thm) option} + + +(* check well-sortedness *) + +val has_topsort = Term.exists_type (Term.exists_subtype (fn + TFree (_, []) => true + | TVar (_, []) => true + | _ => false)) + +(* top sorts cause problems with atomization *) +fun check_topsort ctxt thm = + if has_topsort (Thm.prop_of thm) then (SMT_Normalize.drop_fact_warning ctxt thm; TrueI) else thm + + +(* registry *) + +type solver_info = { + command: unit -> string list, + smt_options: (string * string) list, + default_max_relevant: int, + parse_proof: Proof.context -> SMT_Translate.replay_data -> + ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> + parsed_proof, + replay: Proof.context -> SMT_Translate.replay_data -> string list -> thm} + +structure Solvers = Generic_Data +( + type T = solver_info Symtab.table + val empty = Symtab.empty + val extend = I + fun merge data = Symtab.merge (K true) data +) + +local + fun parse_proof outcome parse_proof0 outer_ctxt replay_data xfacts prems concl output = + (case outcome output of + (Unsat, lines) => + (case parse_proof0 of + SOME pp => pp outer_ctxt replay_data xfacts prems concl lines + | NONE => {outcome = NONE, fact_ids = NONE, atp_proof = K []}) + | (result, _) => raise SMT_Failure.SMT (SMT_Failure.Counterexample (result = Sat))) + + fun replay outcome replay0 oracle outer_ctxt + (replay_data as {context = ctxt, ...} : SMT_Translate.replay_data) output = + (case outcome output of + (Unsat, lines) => + if not (Config.get ctxt SMT_Config.oracle) andalso is_some replay0 + then the replay0 outer_ctxt replay_data lines + else oracle () + | (result, _) => raise SMT_Failure.SMT (SMT_Failure.Counterexample (result = Sat))) + + val cfalse = Thm.cterm_of @{context} @{prop False} +in + +fun add_solver ({name, class, avail, command, options, smt_options, default_max_relevant, outcome, + parse_proof = parse_proof0, replay = replay0} : solver_config) = + let + fun solver oracle = { + command = command, + smt_options = smt_options, + default_max_relevant = default_max_relevant, + parse_proof = parse_proof (outcome name) parse_proof0, + replay = replay (outcome name) replay0 oracle} + + val info = {name = name, class = class, avail = avail, options = options} + in + Thm.add_oracle (Binding.name name, K cfalse) #-> (fn (_, oracle) => + Context.theory_map (Solvers.map (Symtab.update_new (name, solver oracle)))) #> + Context.theory_map (SMT_Config.add_solver info) + end + +end + +fun get_info ctxt name = the (Symtab.lookup (Solvers.get (Context.Proof ctxt)) name) + +fun name_and_info_of ctxt = + let val name = SMT_Config.solver_of ctxt + in (name, get_info ctxt name) end + +val default_max_relevant = #default_max_relevant oo get_info + +fun apply_solver_and_replay ctxt thms0 = + let + val thms = map (check_topsort ctxt) thms0 + val (name, {command, smt_options, replay, ...}) = name_and_info_of ctxt + val (output, replay_data) = + invoke name command smt_options (SMT_Normalize.normalize ctxt thms) ctxt + in replay ctxt replay_data output end + + +(* filter *) + +fun smt_filter ctxt0 goal xfacts i time_limit = + let + val ctxt = ctxt0 |> Config.put SMT_Config.timeout (Time.toReal time_limit) + + val ({context = ctxt, prems, concl, ...}, _) = Subgoal.focus ctxt i NONE goal + fun negate ct = Thm.dest_comb ct ||> Thm.apply @{cterm Not} |-> Thm.apply + val cprop = + (case try negate (Thm.rhs_of (SMT_Normalize.atomize_conv ctxt concl)) of + SOME ct => ct + | NONE => raise SMT_Failure.SMT (SMT_Failure.Other_Failure "cannot atomize goal")) + + val conjecture = Thm.assume cprop + val facts = map snd xfacts + val thms = conjecture :: prems @ facts + val thms' = map (check_topsort ctxt) thms + + val (name, {command, smt_options, parse_proof, ...}) = name_and_info_of ctxt + val (output, replay_data) = + invoke name command smt_options (SMT_Normalize.normalize ctxt thms') ctxt + in + parse_proof ctxt replay_data xfacts (map Thm.prop_of prems) (Thm.term_of concl) output + end + handle SMT_Failure.SMT fail => {outcome = SOME fail, fact_ids = NONE, atp_proof = K []} + + +(* SMT tactic *) + +local + fun str_of ctxt fail = + "Solver " ^ SMT_Config.solver_of ctxt ^ ": " ^ SMT_Failure.string_of_failure fail + + fun safe_solve ctxt facts = SOME (apply_solver_and_replay ctxt facts) + handle + SMT_Failure.SMT (fail as SMT_Failure.Counterexample _) => + (SMT_Config.verbose_msg ctxt (str_of ctxt) fail; NONE) + | SMT_Failure.SMT (fail as SMT_Failure.Time_Out) => + error ("SMT: Solver " ^ quote (SMT_Config.solver_of ctxt) ^ ": " ^ + SMT_Failure.string_of_failure fail ^ " (setting the " ^ + "configuration option " ^ quote (Config.name_of SMT_Config.timeout) ^ " might help)") + | SMT_Failure.SMT fail => error (str_of ctxt fail) + + fun resolve ctxt (SOME thm) = resolve_tac ctxt [thm] 1 + | resolve _ NONE = no_tac + + fun tac prove ctxt rules = + CONVERSION (SMT_Normalize.atomize_conv ctxt) + THEN' resolve_tac ctxt @{thms ccontr} + THEN' SUBPROOF (fn {context = ctxt', prems, ...} => + resolve ctxt' (prove ctxt' (rules @ prems))) ctxt +in + +val smt_tac = tac safe_solve +val smt_tac' = tac (SOME oo apply_solver_and_replay) + +end + +end; diff --git a/src/test-gen/SMT/smt_systems.ML b/src/test-gen/SMT/smt_systems.ML new file mode 100644 index 0000000..b7581cb --- /dev/null +++ b/src/test-gen/SMT/smt_systems.ML @@ -0,0 +1,154 @@ +(* Title: HOL/Tools/SMT/smt_systems.ML + Author: Sascha Boehme, TU Muenchen + +Setup SMT solvers. +*) + +signature SMT_SYSTEMS = +sig + val cvc4_extensions: bool Config.T + val z3_extensions: bool Config.T +end; + +structure SMT_Systems: SMT_SYSTEMS = +struct + +(* helper functions *) + +fun make_avail name () = getenv (name ^ "_SOLVER") <> "" + +fun make_command name () = [getenv (name ^ "_SOLVER")] + +fun outcome_of unsat sat unknown solver_name line = + if String.isPrefix unsat line then SMT_Solver.Unsat + else if String.isPrefix sat line then SMT_Solver.Sat + else if String.isPrefix unknown line then SMT_Solver.Unknown + else raise SMT_Failure.SMT (SMT_Failure.Other_Failure ("Solver " ^ quote solver_name ^ + " failed -- enable tracing using the " ^ quote (Config.name_of SMT_Config.trace) ^ + " option for details")) + +fun is_blank_or_error_line "" = true + | is_blank_or_error_line s = String.isPrefix "(error " s + +fun on_first_line test_outcome solver_name lines = + let + val split_first = (fn [] => ("", []) | l :: ls => (l, ls)) + val (l, ls) = split_first (snd (take_prefix is_blank_or_error_line lines)) + in (test_outcome solver_name l, ls) end + +fun on_first_non_unsupported_line test_outcome solver_name lines = + on_first_line test_outcome solver_name (filter (curry (op <>) "unsupported") lines) + +(* CVC3 *) + +local + fun cvc3_options ctxt = [ + "-seed", string_of_int (Config.get ctxt SMT_Config.random_seed), + "-lang", "smt2", + "-timeout", string_of_int (Real.ceil (Config.get ctxt SMT_Config.timeout))] +in + +val cvc3: SMT_Solver.solver_config = { + name = "cvc3", + class = K SMTLIB_Interface.smtlibC, + avail = make_avail "CVC3", + command = make_command "CVC3", + options = cvc3_options, + smt_options = [], + default_max_relevant = 400 (* FUDGE *), + outcome = on_first_line (outcome_of "unsat" "sat" "unknown"), + parse_proof = NONE, + replay = NONE } + +end + +(* CVC4 *) + +val cvc4_extensions = Attrib.setup_config_bool @{binding cvc4_extensions} (K false) + +local + fun cvc4_options ctxt = [ + "--no-statistics", + "--random-seed=" ^ string_of_int (Config.get ctxt SMT_Config.random_seed), + "--lang=smt2", + "--continued-execution", + "--tlimit", string_of_int (Real.ceil (1000.0 * Config.get ctxt SMT_Config.timeout))] + + fun select_class ctxt = + if Config.get ctxt cvc4_extensions then CVC4_Interface.smtlib_cvc4C + else SMTLIB_Interface.smtlibC +in + +val cvc4: SMT_Solver.solver_config = { + name = "cvc4", + class = select_class, + avail = make_avail "CVC4", + command = make_command "CVC4", + options = cvc4_options, + smt_options = [(":produce-unsat-cores", "true")], + default_max_relevant = 400 (* FUDGE *), + outcome = on_first_line (outcome_of "unsat" "sat" "unknown"), + parse_proof = SOME (K CVC4_Proof_Parse.parse_proof), + replay = NONE } + +end + +(* veriT *) + +val veriT: SMT_Solver.solver_config = { + name = "verit", + class = K SMTLIB_Interface.smtlibC, + avail = make_avail "VERIT", + command = make_command "VERIT", + options = (fn ctxt => [ + "--proof-version=1", + "--proof-prune", + "--proof-merge", + "--disable-print-success", + "--disable-banner", + "--max-time=" ^ string_of_int (Real.ceil (Config.get ctxt SMT_Config.timeout))]), + smt_options = [(":produce-proofs", "true")], + default_max_relevant = 200 (* FUDGE *), + outcome = on_first_non_unsupported_line (outcome_of "unsat" "sat" "unknown"), + parse_proof = SOME (K VeriT_Proof_Parse.parse_proof), + replay = NONE } + +(* Z3 *) + +val z3_extensions = Attrib.setup_config_bool @{binding z3_extensions} (K false) + +local + fun z3_options ctxt = + ["smt.random_seed=" ^ string_of_int (Config.get ctxt SMT_Config.random_seed), + "smt.refine_inj_axioms=false", + "-T:" ^ string_of_int (Real.ceil (Config.get ctxt SMT_Config.timeout)), + "-smt2"] + + fun select_class ctxt = + if Config.get ctxt z3_extensions then Z3_Interface.smtlib_z3C else SMTLIB_Interface.smtlibC +in + +val z3: SMT_Solver.solver_config = { + name = "z3", + class = select_class, + avail = make_avail "Z3", + command = make_command "Z3", + options = z3_options, + smt_options = [(":produce-proofs", "true")], + default_max_relevant = 350 (* FUDGE *), + outcome = on_first_line (outcome_of "unsat" "sat" "unknown"), + parse_proof = SOME Z3_Replay.parse_proof, + replay = SOME Z3_Replay.replay } + +end + + +(* overall setup *) + +val _ = Theory.setup ( + SMT_Solver.add_solver cvc3 #> + SMT_Solver.add_solver cvc4 #> + SMT_Solver.add_solver veriT #> + SMT_Solver.add_solver z3) + +end; diff --git a/src/test-gen/SMT/smt_translate.ML b/src/test-gen/SMT/smt_translate.ML new file mode 100644 index 0000000..9e9bb6a --- /dev/null +++ b/src/test-gen/SMT/smt_translate.ML @@ -0,0 +1,527 @@ +(* Title: HOL/Tools/SMT/smt_translate.ML + Author: Sascha Boehme, TU Muenchen + +Translate theorems into an SMT intermediate format and serialize them. +*) + +signature SMT_TRANSLATE = +sig + (*intermediate term structure*) + datatype squant = SForall | SExists + datatype 'a spattern = SPat of 'a list | SNoPat of 'a list + datatype sterm = + SVar of int | + SApp of string * sterm list | + SLet of string * sterm * sterm | + SQua of squant * string list * sterm spattern list * sterm + + (*translation configuration*) + type sign = { + logic: string, + sorts: string list, + dtyps: (BNF_Util.fp_kind * (string * (string * (string * string) list) list)) list, + funcs: (string * (string list * string)) list } + type config = { + logic: term list -> string, + fp_kinds: BNF_Util.fp_kind list, + serialize: (string * string) list -> string list -> sign -> sterm list -> string } + type replay_data = { + context: Proof.context, + typs: typ Symtab.table, + terms: term Symtab.table, + ll_defs: term list, + rewrite_rules: thm list, + assms: (int * thm) list } + + (*translation*) + val add_config: SMT_Util.class * (Proof.context -> config) -> Context.generic -> Context.generic + val translate: Proof.context -> (string * string) list -> string list -> (int * thm) list -> + string * replay_data +end; + +structure SMT_Translate: SMT_TRANSLATE = +struct + + +(* intermediate term structure *) + +datatype squant = SForall | SExists + +datatype 'a spattern = SPat of 'a list | SNoPat of 'a list + +datatype sterm = + SVar of int | + SApp of string * sterm list | + SLet of string * sterm * sterm | + SQua of squant * string list * sterm spattern list * sterm + + +(* translation configuration *) + +type sign = { + logic: string, + sorts: string list, + dtyps: (BNF_Util.fp_kind * (string * (string * (string * string) list) list)) list, + funcs: (string * (string list * string)) list } + +type config = { + logic: term list -> string, + fp_kinds: BNF_Util.fp_kind list, + serialize: (string * string) list -> string list -> sign -> sterm list -> string } + +type replay_data = { + context: Proof.context, + typs: typ Symtab.table, + terms: term Symtab.table, + ll_defs: term list, + rewrite_rules: thm list, + assms: (int * thm) list } + + +(* translation context *) + +fun add_components_of_typ (Type (s, Ts)) = + cons (Long_Name.base_name s) #> fold_rev add_components_of_typ Ts + | add_components_of_typ (TFree (s, _)) = cons (perhaps (try (unprefix "'")) s) + | add_components_of_typ _ = I; + +fun suggested_name_of_typ T = space_implode "_" (add_components_of_typ T []); + +fun suggested_name_of_term (Const (s, _)) = Long_Name.base_name s + | suggested_name_of_term (Free (s, _)) = s + | suggested_name_of_term _ = Name.uu + +val empty_tr_context = (Name.context, Typtab.empty, Termtab.empty) +val safe_suffix = "$" + +fun add_typ T proper (cx as (names, typs, terms)) = + (case Typtab.lookup typs T of + SOME (name, _) => (name, cx) + | NONE => + let + val sugg = Name.desymbolize (SOME true) (suggested_name_of_typ T) ^ safe_suffix + val (name, names') = Name.variant sugg names + val typs' = Typtab.update (T, (name, proper)) typs + in (name, (names', typs', terms)) end) + +fun add_fun t sort (cx as (names, typs, terms)) = + (case Termtab.lookup terms t of + SOME (name, _) => (name, cx) + | NONE => + let + val sugg = Name.desymbolize (SOME false) (suggested_name_of_term t) ^ safe_suffix + val (name, names') = Name.variant sugg names + val terms' = Termtab.update (t, (name, sort)) terms + in (name, (names', typs, terms')) end) + +fun sign_of logic dtyps (_, typs, terms) = { + logic = logic, + sorts = Typtab.fold (fn (_, (n, true)) => cons n | _ => I) typs [], + dtyps = dtyps, + funcs = Termtab.fold (fn (_, (n, SOME ss)) => cons (n,ss) | _ => I) terms []} + +fun replay_data_of ctxt ll_defs rules assms (_, typs, terms) = + let + fun add_typ (T, (n, _)) = Symtab.update (n, T) + val typs' = Typtab.fold add_typ typs Symtab.empty + + fun add_fun (t, (n, _)) = Symtab.update (n, t) + val terms' = Termtab.fold add_fun terms Symtab.empty + in + {context = ctxt, typs = typs', terms = terms', ll_defs = ll_defs, rewrite_rules = rules, + assms = assms} + end + + +(* preprocessing *) + +(** (co)datatype declarations **) + +fun collect_co_datatypes fp_kinds (tr_context, ctxt) ts = + let + val (fp_decls, ctxt') = + ([], ctxt) + |> fold (Term.fold_types (SMT_Datatypes.add_decls fp_kinds)) ts + |>> flat + + fun is_decl_typ T = exists (equal T o fst o snd) fp_decls + + fun add_typ' T proper = + (case SMT_Builtin.dest_builtin_typ ctxt' T of + SOME n => pair n + | NONE => add_typ T proper) + + fun tr_select sel = + let val T = Term.range_type (Term.fastype_of sel) + in add_fun sel NONE ##>> add_typ' T (not (is_decl_typ T)) end + fun tr_constr (constr, selects) = + add_fun constr NONE ##>> fold_map tr_select selects + fun tr_typ (fp, (T, cases)) = + add_typ' T false ##>> fold_map tr_constr cases #>> pair fp + + val (fp_decls', tr_context') = fold_map tr_typ fp_decls tr_context + + fun add (constr, selects) = + Termtab.update (constr, length selects) #> + fold (Termtab.update o rpair 1) selects + + val funcs = fold (fold add o snd o snd) fp_decls Termtab.empty + + in ((funcs, fp_decls', tr_context', ctxt'), ts) end + (* FIXME: also return necessary (co)datatype theorems *) + + +(** eta-expand quantifiers, let expressions and built-ins *) + +local + fun eta f T t = Abs (Name.uu, T, f (Term.incr_boundvars 1 t $ Bound 0)) + + fun exp f T = eta f (Term.domain_type (Term.domain_type T)) + + fun exp2 T q = + let val U = Term.domain_type T + in Abs (Name.uu, U, q $ eta I (Term.domain_type U) (Bound 0)) end + + fun expf k i T t = + let val Ts = drop i (fst (SMT_Util.dest_funT k T)) + in + Term.incr_boundvars (length Ts) t + |> fold_rev (fn i => fn u => u $ Bound i) (0 upto length Ts - 1) + |> fold_rev (fn T => fn u => Abs (Name.uu, T, u)) Ts + end +in + +fun eta_expand ctxt funcs = + let + fun exp_func t T ts = + (case Termtab.lookup funcs t of + SOME k => Term.list_comb (t, ts) |> k <> length ts ? expf k (length ts) T + | NONE => Term.list_comb (t, ts)) + + fun expand ((q as Const (@{const_name All}, _)) $ Abs a) = q $ abs_expand a + | expand ((q as Const (@{const_name All}, T)) $ t) = q $ exp expand T t + | expand (q as Const (@{const_name All}, T)) = exp2 T q + | expand ((q as Const (@{const_name Ex}, _)) $ Abs a) = q $ abs_expand a + | expand ((q as Const (@{const_name Ex}, T)) $ t) = q $ exp expand T t + | expand (q as Const (@{const_name Ex}, T)) = exp2 T q + | expand (Const (@{const_name Let}, _) $ t $ u) = expand (Term.betapply (u, t)) + | expand (Const (@{const_name Let}, T) $ t) = + let val U = Term.domain_type (Term.range_type T) + in Abs (Name.uu, U, Bound 0 $ Term.incr_boundvars 1 t) end + | expand (Const (@{const_name Let}, T)) = + let val U = Term.domain_type (Term.range_type T) + in Abs (Name.uu, Term.domain_type T, Abs (Name.uu, U, Bound 0 $ Bound 1)) end + | expand t = + (case Term.strip_comb t of + (u as Const (c as (_, T)), ts) => + (case SMT_Builtin.dest_builtin ctxt c ts of + SOME (_, k, us, mk) => + if k = length us then mk (map expand us) + else if k < length us then chop k (map expand us) |>> mk |> Term.list_comb + else expf k (length ts) T (mk (map expand us)) + | NONE => exp_func u T (map expand ts)) + | (u as Free (_, T), ts) => exp_func u T (map expand ts) + | (Abs a, ts) => Term.list_comb (abs_expand a, map expand ts) + | (u, ts) => Term.list_comb (u, map expand ts)) + + and abs_expand (n, T, t) = Abs (n, T, expand t) + + in map expand end + +end + + +(** introduce explicit applications **) + +local + (* + Make application explicit for functions with varying number of arguments. + *) + + fun add t i = apfst (Termtab.map_default (t, i) (Integer.min i)) + fun add_type T = apsnd (Typtab.update (T, ())) + + fun min_arities t = + (case Term.strip_comb t of + (u as Const _, ts) => add u (length ts) #> fold min_arities ts + | (u as Free _, ts) => add u (length ts) #> fold min_arities ts + | (Abs (_, T, u), ts) => (can dest_funT T ? add_type T) #> min_arities u #> fold min_arities ts + | (_, ts) => fold min_arities ts) + + fun minimize types t i = + let + fun find_min j [] _ = j + | find_min j (U :: Us) T = + if Typtab.defined types T then j else find_min (j + 1) Us (U --> T) + + val (Ts, T) = Term.strip_type (Term.type_of t) + in find_min 0 (take i (rev Ts)) T end + + fun app u (t, T) = (Const (@{const_name fun_app}, T --> T) $ t $ u, Term.range_type T) + + fun apply i t T ts = + let + val (ts1, ts2) = chop i ts + val (_, U) = SMT_Util.dest_funT i T + in fst (fold app ts2 (Term.list_comb (t, ts1), U)) end +in + +fun intro_explicit_application ctxt funcs ts = + let + val (arities, types) = fold min_arities ts (Termtab.empty, Typtab.empty) + val arities' = Termtab.map (minimize types) arities (* FIXME: highly suspicious *) + + fun app_func t T ts = + if is_some (Termtab.lookup funcs t) then Term.list_comb (t, ts) + else apply (the (Termtab.lookup arities' t)) t T ts + + fun in_list T f t = SMT_Util.mk_symb_list T (map f (SMT_Util.dest_symb_list t)) + + fun traverse Ts t = + (case Term.strip_comb t of + (q as Const (@{const_name All}, _), [Abs (x, T, u)]) => + q $ Abs (x, T, in_trigger (T :: Ts) u) + | (q as Const (@{const_name Ex}, _), [Abs (x, T, u)]) => + q $ Abs (x, T, in_trigger (T :: Ts) u) + | (q as Const (@{const_name Let}, _), [u1, u2 as Abs _]) => + q $ traverse Ts u1 $ traverse Ts u2 + | (u as Const (c as (_, T)), ts) => + (case SMT_Builtin.dest_builtin ctxt c ts of + SOME (_, k, us, mk) => + let + val (ts1, ts2) = chop k (map (traverse Ts) us) + val U = Term.strip_type T |>> snd o chop k |> (op --->) + in apply 0 (mk ts1) U ts2 end + | NONE => app_func u T (map (traverse Ts) ts)) + | (u as Free (_, T), ts) => app_func u T (map (traverse Ts) ts) + | (u as Bound i, ts) => apply 0 u (nth Ts i) (map (traverse Ts) ts) + | (Abs (n, T, u), ts) => traverses Ts (Abs (n, T, traverse (T::Ts) u)) ts + | (u, ts) => traverses Ts u ts) + and in_trigger Ts ((c as @{const trigger}) $ p $ t) = c $ in_pats Ts p $ traverse Ts t + | in_trigger Ts t = traverse Ts t + and in_pats Ts ps = + in_list @{typ "pattern symb_list"} (in_list @{typ pattern} (in_pat Ts)) ps + and in_pat Ts ((p as Const (@{const_name pat}, _)) $ t) = p $ traverse Ts t + | in_pat Ts ((p as Const (@{const_name nopat}, _)) $ t) = p $ traverse Ts t + | in_pat _ t = raise TERM ("bad pattern", [t]) + and traverses Ts t ts = Term.list_comb (t, map (traverse Ts) ts) + in map (traverse []) ts end + +val fun_app_eq = mk_meta_eq @{thm fun_app_def} + +end + + +(** map HOL formulas to FOL formulas (i.e., separate formulas froms terms) **) + +local + val is_quant = member (op =) [@{const_name All}, @{const_name Ex}] + + val fol_rules = [ + Let_def, + @{lemma "P = True == P" by (rule eq_reflection) simp}] + + exception BAD_PATTERN of unit + + fun is_builtin_conn_or_pred ctxt c ts = + is_some (SMT_Builtin.dest_builtin_conn ctxt c ts) orelse + is_some (SMT_Builtin.dest_builtin_pred ctxt c ts) +in + +fun folify ctxt = + let + fun in_list T f t = SMT_Util.mk_symb_list T (map_filter f (SMT_Util.dest_symb_list t)) + + fun in_term pat t = + (case Term.strip_comb t of + (@{const True}, []) => t + | (@{const False}, []) => t + | (u as Const (@{const_name If}, _), [t1, t2, t3]) => + if pat then raise BAD_PATTERN () else u $ in_form t1 $ in_term pat t2 $ in_term pat t3 + | (Const (c as (n, _)), ts) => + if is_builtin_conn_or_pred ctxt c ts orelse is_quant n then + if pat then raise BAD_PATTERN () else in_form t + else + Term.list_comb (Const c, map (in_term pat) ts) + | (Free c, ts) => Term.list_comb (Free c, map (in_term pat) ts) + | _ => t) + + and in_pat ((p as Const (@{const_name pat}, _)) $ t) = + p $ in_term true t + | in_pat ((p as Const (@{const_name nopat}, _)) $ t) = + p $ in_term true t + | in_pat t = raise TERM ("bad pattern", [t]) + + and in_pats ps = + in_list @{typ "pattern symb_list"} (SOME o in_list @{typ pattern} (try in_pat)) ps + + and in_trigger ((c as @{const trigger}) $ p $ t) = c $ in_pats p $ in_form t + | in_trigger t = in_form t + + and in_form t = + (case Term.strip_comb t of + (q as Const (qn, _), [Abs (n, T, u)]) => + if is_quant qn then q $ Abs (n, T, in_trigger u) + else in_term false t + | (Const c, ts) => + (case SMT_Builtin.dest_builtin_conn ctxt c ts of + SOME (_, _, us, mk) => mk (map in_form us) + | NONE => + (case SMT_Builtin.dest_builtin_pred ctxt c ts of + SOME (_, _, us, mk) => mk (map (in_term false) us) + | NONE => in_term false t)) + | _ => in_term false t) + in + map in_form #> + pair (fol_rules, I) + end + +end + + +(* translation into intermediate format *) + +(** utility functions **) + +val quantifier = (fn + @{const_name All} => SOME SForall + | @{const_name Ex} => SOME SExists + | _ => NONE) + +fun group_quant qname Ts (t as Const (q, _) $ Abs (_, T, u)) = + if q = qname then group_quant qname (T :: Ts) u else (Ts, t) + | group_quant _ Ts t = (Ts, t) + +fun dest_pat (Const (@{const_name pat}, _) $ t) = (t, true) + | dest_pat (Const (@{const_name nopat}, _) $ t) = (t, false) + | dest_pat t = raise TERM ("bad pattern", [t]) + +fun dest_pats [] = I + | dest_pats ts = + (case map dest_pat ts |> split_list ||> distinct (op =) of + (ps, [true]) => cons (SPat ps) + | (ps, [false]) => cons (SNoPat ps) + | _ => raise TERM ("bad multi-pattern", ts)) + +fun dest_trigger (@{const trigger} $ tl $ t) = + (rev (fold (dest_pats o SMT_Util.dest_symb_list) (SMT_Util.dest_symb_list tl) []), t) + | dest_trigger t = ([], t) + +fun dest_quant qn T t = quantifier qn |> Option.map (fn q => + let + val (Ts, u) = group_quant qn [T] t + val (ps, p) = dest_trigger u + in (q, rev Ts, ps, p) end) + +fun fold_map_pat f (SPat ts) = fold_map f ts #>> SPat + | fold_map_pat f (SNoPat ts) = fold_map f ts #>> SNoPat + + +(** translation from Isabelle terms into SMT intermediate terms **) + +fun intermediate logic dtyps builtin ctxt ts trx = + let + fun transT (T as TFree _) = add_typ T true + | transT (T as TVar _) = (fn _ => raise TYPE ("bad SMT type", [T], [])) + | transT (T as Type _) = + (case SMT_Builtin.dest_builtin_typ ctxt T of + SOME n => pair n + | NONE => add_typ T true) + + fun app n ts = SApp (n, ts) + + fun trans t = + (case Term.strip_comb t of + (Const (qn, _), [Abs (_, T, t1)]) => + (case dest_quant qn T t1 of + SOME (q, Ts, ps, b) => + fold_map transT Ts ##>> fold_map (fold_map_pat trans) ps ##>> + trans b #>> (fn ((Ts', ps'), b') => SQua (q, Ts', ps', b')) + | NONE => raise TERM ("unsupported quantifier", [t])) + | (Const (@{const_name Let}, _), [t1, Abs (_, T, t2)]) => + transT T ##>> trans t1 ##>> trans t2 #>> (fn ((U, u1), u2) => SLet (U, u1, u2)) + | (u as Const (c as (_, T)), ts) => + (case builtin ctxt c ts of + SOME (n, _, us, _) => fold_map trans us #>> app n + | NONE => transs u T ts) + | (u as Free (_, T), ts) => transs u T ts + | (Bound i, []) => pair (SVar i) + | _ => raise TERM ("bad SMT term", [t])) + + and transs t T ts = + let val (Us, U) = SMT_Util.dest_funT (length ts) T + in + fold_map transT Us ##>> transT U #-> (fn Up => + add_fun t (SOME Up) ##>> fold_map trans ts #>> SApp) + end + + val (us, trx') = fold_map trans ts trx + in ((sign_of (logic ts) dtyps trx', us), trx') end + + +(* translation *) + +structure Configs = Generic_Data +( + type T = (Proof.context -> config) SMT_Util.dict + val empty = [] + val extend = I + fun merge data = SMT_Util.dict_merge fst data +) + +fun add_config (cs, cfg) = Configs.map (SMT_Util.dict_update (cs, cfg)) + +fun get_config ctxt = + let val cs = SMT_Config.solver_class_of ctxt + in + (case SMT_Util.dict_get (Configs.get (Context.Proof ctxt)) cs of + SOME cfg => cfg ctxt + | NONE => error ("SMT: no translation configuration found " ^ + "for solver class " ^ quote (SMT_Util.string_of_class cs))) + end + +fun translate ctxt smt_options comments ithms = + let + val {logic, fp_kinds, serialize} = get_config ctxt + + fun no_dtyps (tr_context, ctxt) ts = + ((Termtab.empty, [], tr_context, ctxt), ts) + + val ts1 = map (Envir.beta_eta_contract o SMT_Util.prop_of o snd) ithms + + val ((funcs, dtyps, tr_context, ctxt1), ts2) = + ((empty_tr_context, ctxt), ts1) + |-> (if null fp_kinds then no_dtyps else collect_co_datatypes fp_kinds) + + fun is_binder (Const (@{const_name Let}, _) $ _) = true + | is_binder t = Lambda_Lifting.is_quantifier t + + fun mk_trigger ((q as Const (@{const_name All}, _)) $ Abs (n, T, t)) = + q $ Abs (n, T, mk_trigger t) + | mk_trigger (eq as (Const (@{const_name HOL.eq}, T) $ lhs $ _)) = + Term.domain_type T --> @{typ pattern} + |> (fn T => Const (@{const_name pat}, T) $ lhs) + |> SMT_Util.mk_symb_list @{typ pattern} o single + |> SMT_Util.mk_symb_list @{typ "pattern symb_list"} o single + |> (fn t => @{const trigger} $ t $ eq) + | mk_trigger t = t + + val (ctxt2, (ts3, ll_defs)) = + ts2 + |> eta_expand ctxt1 funcs + |> rpair ctxt1 + |-> Lambda_Lifting.lift_lambdas NONE is_binder + |-> (fn (ts', ll_defs) => fn ctxt' => + (ctxt', (intro_explicit_application ctxt' funcs (map mk_trigger ll_defs @ ts'), ll_defs))) + + val ((rewrite_rules, builtin), ts4) = folify ctxt2 ts3 + |>> apfst (cons fun_app_eq) +val _ = dtyps : (BNF_Util.fp_kind * (string * (string * (string * string) list) list)) list (*###*) + in + (ts4, tr_context) + |-> intermediate logic dtyps (builtin SMT_Builtin.dest_builtin) ctxt2 + |>> uncurry (serialize smt_options comments) + ||> replay_data_of ctxt2 ll_defs rewrite_rules ithms + end + +end; diff --git a/src/test-gen/SMT/smt_util.ML b/src/test-gen/SMT/smt_util.ML new file mode 100644 index 0000000..387c204 --- /dev/null +++ b/src/test-gen/SMT/smt_util.ML @@ -0,0 +1,240 @@ +(* Title: HOL/Tools/SMT/smt_util.ML + Author: Sascha Boehme, TU Muenchen + +General utility functions. +*) + +signature SMT_UTIL = +sig + (*basic combinators*) + val repeat: ('a -> 'a option) -> 'a -> 'a + val repeat_yield: ('a -> 'b -> ('a * 'b) option) -> 'a -> 'b -> 'a * 'b + + (*class dictionaries*) + type class = string list + val basicC: class + val string_of_class: class -> string + type 'a dict = (class * 'a) Ord_List.T + val dict_map_default: class * 'a -> ('a -> 'a) -> 'a dict -> 'a dict + val dict_update: class * 'a -> 'a dict -> 'a dict + val dict_merge: ('a * 'a -> 'a) -> 'a dict * 'a dict -> 'a dict + val dict_lookup: 'a dict -> class -> 'a list + val dict_get: 'a dict -> class -> 'a option + + (*types*) + val dest_funT: int -> typ -> typ list * typ + + (*terms*) + val dest_conj: term -> term * term + val dest_disj: term -> term * term + val under_quant: (term -> 'a) -> term -> 'a + val is_number: term -> bool + + (*symbolic lists*) + val symb_nil_const: typ -> term + val symb_cons_const: typ -> term + val mk_symb_list: typ -> term list -> term + val dest_symb_list: term -> term list + + (*patterns and instantiations*) + val mk_const_pat: theory -> string -> (ctyp -> 'a) -> 'a * cterm + val destT1: ctyp -> ctyp + val destT2: ctyp -> ctyp + val instTs: ctyp list -> ctyp list * cterm -> cterm + val instT: ctyp -> ctyp * cterm -> cterm + val instT': cterm -> ctyp * cterm -> cterm + + (*certified terms*) + val dest_cabs: cterm -> Proof.context -> cterm * Proof.context + val dest_all_cabs: cterm -> Proof.context -> cterm * Proof.context + val dest_cbinder: cterm -> Proof.context -> cterm * Proof.context + val dest_all_cbinders: cterm -> Proof.context -> cterm * Proof.context + val mk_cprop: cterm -> cterm + val dest_cprop: cterm -> cterm + val mk_cequals: cterm -> cterm -> cterm + val term_of: cterm -> term + val prop_of: thm -> term + + (*conversions*) + val if_conv: (term -> bool) -> conv -> conv -> conv + val if_true_conv: (term -> bool) -> conv -> conv + val if_exists_conv: (term -> bool) -> conv -> conv + val binders_conv: (Proof.context -> conv) -> Proof.context -> conv + val under_quant_conv: (Proof.context * cterm list -> conv) -> + Proof.context -> conv + val prop_conv: conv -> conv +end; + +structure SMT_Util: SMT_UTIL = +struct + +(* basic combinators *) + +fun repeat f = + let fun rep x = (case f x of SOME y => rep y | NONE => x) + in rep end + +fun repeat_yield f = + let fun rep x y = (case f x y of SOME (x', y') => rep x' y' | NONE => (x, y)) + in rep end + + +(* class dictionaries *) + +type class = string list + +val basicC = [] + +fun string_of_class [] = "basic" + | string_of_class cs = "basic." ^ space_implode "." cs + +type 'a dict = (class * 'a) Ord_List.T + +fun class_ord ((cs1, _), (cs2, _)) = + rev_order (list_ord fast_string_ord (cs1, cs2)) + +fun dict_insert (cs, x) d = + if AList.defined (op =) d cs then d + else Ord_List.insert class_ord (cs, x) d + +fun dict_map_default (cs, x) f = + dict_insert (cs, x) #> AList.map_entry (op =) cs f + +fun dict_update (e as (_, x)) = dict_map_default e (K x) + +fun dict_merge val_merge = sort class_ord o AList.join (op =) (K val_merge) + +fun dict_lookup d cs = + let fun match (cs', x) = if is_prefix (op =) cs' cs then SOME x else NONE + in map_filter match d end + +fun dict_get d cs = + (case AList.lookup (op =) d cs of + NONE => (case cs of [] => NONE | _ => dict_get d (take (length cs - 1) cs)) + | SOME x => SOME x) + + +(* types *) + +val dest_funT = + let + fun dest Ts 0 T = (rev Ts, T) + | dest Ts i (Type ("fun", [T, U])) = dest (T::Ts) (i-1) U + | dest _ _ T = raise TYPE ("not a function type", [T], []) + in dest [] end + + +(* terms *) + +fun dest_conj (@{const HOL.conj} $ t $ u) = (t, u) + | dest_conj t = raise TERM ("not a conjunction", [t]) + +fun dest_disj (@{const HOL.disj} $ t $ u) = (t, u) + | dest_disj t = raise TERM ("not a disjunction", [t]) + +fun under_quant f t = + (case t of + Const (@{const_name All}, _) $ Abs (_, _, u) => under_quant f u + | Const (@{const_name Ex}, _) $ Abs (_, _, u) => under_quant f u + | _ => f t) + +val is_number = + let + fun is_num env (Const (@{const_name Let}, _) $ t $ Abs (_, _, u)) = is_num (t :: env) u + | is_num env (Bound i) = i < length env andalso is_num env (nth env i) + | is_num _ t = can HOLogic.dest_number t + in is_num [] end + + +(* symbolic lists *) + +fun symb_listT T = Type (@{type_name symb_list}, [T]) + +fun symb_nil_const T = Const (@{const_name Symb_Nil}, symb_listT T) + +fun symb_cons_const T = + let val listT = symb_listT T in Const (@{const_name Symb_Cons}, T --> listT --> listT) end + +fun mk_symb_list T ts = + fold_rev (fn t => fn u => symb_cons_const T $ t $ u) ts (symb_nil_const T) + +fun dest_symb_list (Const (@{const_name Symb_Nil}, _)) = [] + | dest_symb_list (Const (@{const_name Symb_Cons}, _) $ t $ u) = t :: dest_symb_list u + + +(* patterns and instantiations *) + +fun mk_const_pat thy name destT = + let val cpat = Thm.global_cterm_of thy (Const (name, Sign.the_const_type thy name)) + in (destT (Thm.ctyp_of_cterm cpat), cpat) end + +val destT1 = hd o Thm.dest_ctyp +val destT2 = hd o tl o Thm.dest_ctyp + +fun instTs cUs (cTs, ct) = Thm.instantiate_cterm (map (dest_TVar o Thm.typ_of) cTs ~~ cUs, []) ct +fun instT cU (cT, ct) = instTs [cU] ([cT], ct) +fun instT' ct = instT (Thm.ctyp_of_cterm ct) + + +(* certified terms *) + +fun dest_cabs ct ctxt = + (case Thm.term_of ct of + Abs _ => + let val (n, ctxt') = yield_singleton Variable.variant_fixes Name.uu ctxt + in (snd (Thm.dest_abs (SOME n) ct), ctxt') end + | _ => raise CTERM ("no abstraction", [ct])) + +val dest_all_cabs = repeat_yield (try o dest_cabs) + +fun dest_cbinder ct ctxt = + (case Thm.term_of ct of + Const _ $ Abs _ => dest_cabs (Thm.dest_arg ct) ctxt + | _ => raise CTERM ("not a binder", [ct])) + +val dest_all_cbinders = repeat_yield (try o dest_cbinder) + +val mk_cprop = Thm.apply (Thm.cterm_of @{context} @{const Trueprop}) + +fun dest_cprop ct = + (case Thm.term_of ct of + @{const Trueprop} $ _ => Thm.dest_arg ct + | _ => raise CTERM ("not a property", [ct])) + +val equals = mk_const_pat @{theory} @{const_name Pure.eq} destT1 +fun mk_cequals ct cu = Thm.mk_binop (instT' ct equals) ct cu + +val dest_prop = (fn @{const Trueprop} $ t => t | t => t) +fun term_of ct = dest_prop (Thm.term_of ct) +fun prop_of thm = dest_prop (Thm.prop_of thm) + + +(* conversions *) + +fun if_conv pred cv1 cv2 ct = if pred (Thm.term_of ct) then cv1 ct else cv2 ct + +fun if_true_conv pred cv = if_conv pred cv Conv.all_conv + +fun if_exists_conv pred = if_true_conv (Term.exists_subterm pred) + +fun binders_conv cv ctxt = + Conv.binder_conv (binders_conv cv o snd) ctxt else_conv cv ctxt + +fun under_quant_conv cv ctxt = + let + fun quant_conv inside ctxt cvs ct = + (case Thm.term_of ct of + Const (@{const_name All}, _) $ Abs _ => + Conv.binder_conv (under_conv cvs) ctxt + | Const (@{const_name Ex}, _) $ Abs _ => + Conv.binder_conv (under_conv cvs) ctxt + | _ => if inside then cv (ctxt, cvs) else Conv.all_conv) ct + and under_conv cvs (cv, ctxt) = quant_conv true ctxt (cv :: cvs) + in quant_conv false ctxt [] end + +fun prop_conv cv ct = + (case Thm.term_of ct of + @{const Trueprop} $ _ => Conv.arg_conv cv ct + | _ => raise CTERM ("not a property", [ct])) + +end; diff --git a/src/test-gen/SMT/smtlib.ML b/src/test-gen/SMT/smtlib.ML new file mode 100644 index 0000000..e20b0ba --- /dev/null +++ b/src/test-gen/SMT/smtlib.ML @@ -0,0 +1,191 @@ +(* Title: HOL/Tools/SMT/smtlib.ML + Author: Sascha Boehme, TU Muenchen + +Parsing and generating SMT-LIB 2. +*) + +signature SMTLIB = +sig + exception PARSE of int * string + datatype tree = + Num of int | + Dec of int * int | + Str of string | + Sym of string | + Key of string | + S of tree list + val parse: string list -> tree + val pretty_tree: tree -> Pretty.T + val str_of: tree -> string +end; + +structure SMTLIB: SMTLIB = +struct + +(* data structures *) + +exception PARSE of int * string + +datatype tree = + Num of int | + Dec of int * int | + Str of string | + Sym of string | + Key of string | + S of tree list + +datatype unfinished = None | String of string | Symbol of string + + +(* utilities *) + +fun read_raw pred l cs = + (case take_prefix pred cs of + ([], []) => raise PARSE (l, "empty token") + | ([], c :: _) => raise PARSE (l, "unexpected character " ^ quote c) + | x => x) + + +(* numerals and decimals *) + +fun int_of cs = fst (read_int cs) + +fun read_num l cs = + (case read_raw Symbol.is_ascii_digit l cs of + (cs1, "." :: cs') => + let val (cs2, cs'') = read_raw Symbol.is_ascii_digit l cs' + in (Dec (int_of cs1, int_of cs2), cs'') end + | (cs1, cs2) => (Num (int_of cs1), cs2)) + + +(* binary numbers *) + +fun is_bin c = (c = "0" orelse c = "1") + +fun read_bin l cs = read_raw is_bin l cs |>> Num o fst o read_radix_int 2 + + +(* hex numbers *) + +val is_hex = member (op =) (raw_explode "0123456789abcdefABCDEF") + +fun within c1 c2 c = (ord c1 <= ord c andalso ord c <= ord c2) + +fun unhex i [] = i + | unhex i (c :: cs) = + if within "0" "9" c then unhex (i * 16 + (ord c - ord "0")) cs + else if within "a" "f" c then unhex (i * 16 + (ord c - ord "a" + 10)) cs + else if within "A" "F" c then unhex (i * 16 + (ord c - ord "A" + 10)) cs + else raise Fail ("bad hex character " ^ quote c) + +fun read_hex l cs = read_raw is_hex l cs |>> Num o unhex 0 + + +(* symbols *) + +val symbol_chars = raw_explode "~!@$%^&*_+=<>.?/-" + +fun is_sym c = + Symbol.is_ascii_letter c orelse + Symbol.is_ascii_digit c orelse + member (op =) symbol_chars c + +fun read_sym f l cs = read_raw is_sym l cs |>> f o implode + + +(* quoted tokens *) + +fun read_quoted stop (escape, replacement) cs = + let + fun read _ [] = NONE + | read rs (cs as (c :: cs')) = + if is_prefix (op =) stop cs then + SOME (implode (rev rs), drop (length stop) cs) + else if not (null escape) andalso is_prefix (op =) escape cs then + read (replacement :: rs) (drop (length escape) cs) + else read (c :: rs) cs' + in read [] cs end + +fun read_string cs = read_quoted ["\\", "\""] (["\\", "\\"], "\\") cs +fun read_symbol cs = read_quoted ["|"] ([], "") cs + + +(* core parser *) + +fun read _ [] rest tss = (rest, tss) + | read l ("(" :: cs) None tss = read l cs None ([] :: tss) + | read l (")" :: cs) None (ts1 :: ts2 :: tss) = + read l cs None ((S (rev ts1) :: ts2) :: tss) + | read l ("#" :: "x" :: cs) None (ts :: tss) = + token read_hex l cs ts tss + | read l ("#" :: "b" :: cs) None (ts :: tss) = + token read_bin l cs ts tss + | read l (":" :: cs) None (ts :: tss) = + token (read_sym Key) l cs ts tss + | read l ("\"" :: cs) None (ts :: tss) = + quoted read_string String Str l "" cs ts tss + | read l ("|" :: cs) None (ts :: tss) = + quoted read_symbol Symbol Sym l "" cs ts tss + | read l ((c as "!") :: cs) None (ts :: tss) = + token (fn _ => pair (Sym c)) l cs ts tss + | read l (c :: cs) None (ts :: tss) = + if Symbol.is_ascii_blank c then read l cs None (ts :: tss) + else if Symbol.is_digit c then token read_num l (c :: cs) ts tss + else token (read_sym Sym) l (c :: cs) ts tss + | read l cs (String s) (ts :: tss) = + quoted read_string String Str l s cs ts tss + | read l cs (Symbol s) (ts :: tss) = + quoted read_symbol Symbol Sym l s cs ts tss + | read l _ _ [] = raise PARSE (l, "bad parser state") + +and token f l cs ts tss = + let val (t, cs') = f l cs + in read l cs' None ((t :: ts) :: tss) end + +and quoted r f g l s cs ts tss = + (case r cs of + NONE => (f (s ^ implode cs), ts :: tss) + | SOME (s', cs') => read l cs' None ((g (s ^ s') :: ts) :: tss)) + + + +(* overall parser *) + +fun read_line l line = read l (raw_explode line) + +fun add_line line (l, (None, tss)) = + if size line = 0 orelse nth_string line 0 = ";" then (l + 1, (None, tss)) + else (l + 1, read_line l line None tss) + | add_line line (l, (unfinished, tss)) = + (l + 1, read_line l line unfinished tss) + +fun finish (_, (None, [[t]])) = t + | finish (l, _) = raise PARSE (l, "bad nesting") + +fun parse lines = finish (fold add_line lines (1, (None, [[]]))) + + +(* pretty printer *) + +fun pretty_tree (Num i) = Pretty.str (string_of_int i) + | pretty_tree (Dec (i, j)) = + Pretty.str (string_of_int i ^ "." ^ string_of_int j) + | pretty_tree (Str s) = + raw_explode s + |> maps (fn "\"" => ["\\", "\""] | "\\" => ["\\", "\\"] | c => [c]) + |> implode + |> enclose "\"" "\"" + |> Pretty.str + | pretty_tree (Sym s) = + if String.isPrefix "(" s (* for bit vector functions *) orelse + forall is_sym (raw_explode s) then + Pretty.str s + else + Pretty.str ("|" ^ s ^ "|") + | pretty_tree (Key s) = Pretty.str (":" ^ s) + | pretty_tree (S trees) = + Pretty.enclose "(" ")" (Pretty.separate "" (map pretty_tree trees)) + +val str_of = Pretty.unformatted_string_of o pretty_tree + +end; diff --git a/src/test-gen/SMT/smtlib_interface.ML b/src/test-gen/SMT/smtlib_interface.ML new file mode 100644 index 0000000..37ffb50 --- /dev/null +++ b/src/test-gen/SMT/smtlib_interface.ML @@ -0,0 +1,171 @@ +(* Title: HOL/Tools/SMT/smtlib_interface.ML + Author: Sascha Boehme, TU Muenchen + Author: Jasmin Blanchette, TU Muenchen + +Interface to SMT solvers based on the SMT-LIB 2 format. +*) + +signature SMTLIB_INTERFACE = +sig + val smtlibC: SMT_Util.class + val add_logic: int * (term list -> string option) -> Context.generic -> Context.generic + val translate_config: Proof.context -> SMT_Translate.config + val assert_name_of_index: int -> string + val assert_index_of_name: string -> int + val assert_prefix : string +end; + +structure SMTLIB_Interface: SMTLIB_INTERFACE = +struct + +val smtlibC = ["smtlib"] + + +(* builtins *) + +local + fun int_num _ i = SOME (string_of_int i) + + fun is_linear [t] = SMT_Util.is_number t + | is_linear [t, u] = SMT_Util.is_number t orelse SMT_Util.is_number u + | is_linear _ = false + + fun times _ _ ts = + let val mk = Term.list_comb o pair @{const times (int)} + in if is_linear ts then SOME ("*", 2, ts, mk) else NONE end +in + +val setup_builtins = + fold (SMT_Builtin.add_builtin_typ smtlibC) [ + (@{typ bool}, K (SOME "Bool"), K (K NONE)), + (@{typ int}, K (SOME "Int"), int_num)] #> + fold (SMT_Builtin.add_builtin_fun' smtlibC) [ + (@{const True}, "true"), + (@{const False}, "false"), + (@{const Not}, "not"), + (@{const HOL.conj}, "and"), + (@{const HOL.disj}, "or"), + (@{const HOL.implies}, "=>"), + (@{const HOL.eq ('a)}, "="), + (@{const If ('a)}, "ite"), + (@{const less (int)}, "<"), + (@{const less_eq (int)}, "<="), + (@{const uminus (int)}, "-"), + (@{const plus (int)}, "+"), + (@{const minus (int)}, "-")] #> + SMT_Builtin.add_builtin_fun smtlibC + (Term.dest_Const @{const times (int)}, times) + +end + + +(* serialization *) + +(** logic **) + +fun fst_int_ord ((i1, _), (i2, _)) = int_ord (i1, i2) + +structure Logics = Generic_Data +( + type T = (int * (term list -> string option)) list + val empty = [] + val extend = I + fun merge data = Ord_List.merge fst_int_ord data +) + +fun add_logic pf = Logics.map (Ord_List.insert fst_int_ord pf) + +fun choose_logic ctxt ts = + let + fun choose [] = "AUFLIA" + | choose ((_, f) :: fs) = (case f ts of SOME s => s | NONE => choose fs) + in + (case choose (Logics.get (Context.Proof ctxt)) of + "" => "" (* for default Z3 logic, a subset of everything *) + | logic => "(set-logic " ^ logic ^ ")\n") + end + + +(** serialization **) + +fun var i = "?v" ^ string_of_int i + +fun tree_of_sterm l (SMT_Translate.SVar i) = SMTLIB.Sym (var (l - i - 1)) + | tree_of_sterm _ (SMT_Translate.SApp (n, [])) = SMTLIB.Sym n + | tree_of_sterm l (SMT_Translate.SApp (n, ts)) = + SMTLIB.S (SMTLIB.Sym n :: map (tree_of_sterm l) ts) + | tree_of_sterm _ (SMT_Translate.SLet _) = + raise Fail "SMT-LIB: unsupported let expression" + | tree_of_sterm l (SMT_Translate.SQua (q, ss, pats, t)) = + let + val l' = l + length ss + + fun quant_name SMT_Translate.SForall = "forall" + | quant_name SMT_Translate.SExists = "exists" + + fun gen_trees_of_pat keyword ps = + [SMTLIB.Key keyword, SMTLIB.S (map (tree_of_sterm l') ps)] + fun trees_of_pat (SMT_Translate.SPat ps) = gen_trees_of_pat "pattern" ps + | trees_of_pat (SMT_Translate.SNoPat ps) = gen_trees_of_pat "no-pattern" ps + fun tree_of_pats [] t = t + | tree_of_pats pats t = SMTLIB.S (SMTLIB.Sym "!" :: t :: maps trees_of_pat pats) + + val vs = map_index (fn (i, ty) => + SMTLIB.S [SMTLIB.Sym (var (l + i)), SMTLIB.Sym ty]) ss + + val body = t + |> tree_of_sterm l' + |> tree_of_pats pats + in + SMTLIB.S [SMTLIB.Sym (quant_name q), SMTLIB.S vs, body] + end + + +fun sctrarg (sel, typ) = "(" ^ sel ^ " " ^ typ ^ ")" +fun sctr (name, args) = enclose "(" ")" (space_implode " " (name :: map sctrarg args)) +fun sdatatype (name, ctrs) = enclose "(" ")" (space_implode " " (name :: map sctr ctrs)) + +fun string_of_fun (f, (ss, s)) = f ^ " (" ^ space_implode " " ss ^ ") " ^ s + +fun named_sterm s t = SMTLIB.S [SMTLIB.Sym "!", t, SMTLIB.Key "named", SMTLIB.Sym s] + +val assert_prefix = "a" + +fun assert_name_of_index i = assert_prefix ^ string_of_int i +fun assert_index_of_name s = the_default ~1 (Int.fromString (unprefix assert_prefix s)) + +fun sdtyp (fp, dtyps) = + Buffer.add (enclose ("(declare-" ^ BNF_FP_Util.co_prefix fp ^ "datatypes () (") "))\n" + (space_implode "\n " (map sdatatype dtyps))) + +fun serialize smt_options comments {logic, sorts, dtyps, funcs} ts = + let + val unsat_core = member (op =) smt_options (":produce-unsat-cores", "true") + in + Buffer.empty + |> fold (Buffer.add o enclose "; " "\n") comments + |> fold (fn (k, v) => Buffer.add ("(set-option " ^ k ^ " " ^ v ^ ")\n")) smt_options + |> Buffer.add logic + |> fold (Buffer.add o enclose "(declare-sort " " 0)\n") (sort fast_string_ord sorts) + |> fold sdtyp (AList.coalesce (op =) dtyps) + |> fold (Buffer.add o enclose "(declare-fun " ")\n" o string_of_fun) + (sort (fast_string_ord o apply2 fst) funcs) + |> fold (fn (i, t) => Buffer.add (enclose "(assert " ")\n" + (SMTLIB.str_of (named_sterm (assert_name_of_index i) (tree_of_sterm 0 t))))) (map_index I ts) + |> Buffer.add "(check-sat)\n" + |> Buffer.add (if unsat_core then "(get-unsat-core)\n" else "(get-proof)\n") + |> Buffer.content + end + +(* interface *) + +fun translate_config ctxt = { + logic = choose_logic ctxt, + fp_kinds = [], + serialize = serialize} + +val _ = Theory.setup (Context.theory_map + (setup_builtins #> + SMT_Translate.add_config (smtlibC, translate_config))) + +end; diff --git a/src/test-gen/SMT/smtlib_isar.ML b/src/test-gen/SMT/smtlib_isar.ML new file mode 100644 index 0000000..2f7ae04 --- /dev/null +++ b/src/test-gen/SMT/smtlib_isar.ML @@ -0,0 +1,75 @@ +(* Title: HOL/Tools/SMT/smtlib_isar.ML + Author: Jasmin Blanchette, TU Muenchen + Author: Mathias Fleury, ENS Rennes + +General tools for Isar proof reconstruction. +*) + +signature SMTLIB_ISAR = +sig + val unlift_term: term list -> term -> term + val postprocess_step_conclusion: Proof.context -> thm list -> term list -> term -> term + val normalizing_prems : Proof.context -> term -> (string * string list) list + val distinguish_conjecture_and_hypothesis : ''a list -> ''b -> ''b -> ''b list -> + (''a * term) list -> term list -> term -> (ATP_Problem.atp_formula_role * term) option + val unskolemize_names: Proof.context -> term -> term +end; + +structure SMTLIB_Isar: SMTLIB_ISAR = +struct + +open ATP_Util +open ATP_Problem +open ATP_Proof_Reconstruct + +fun unlift_term ll_defs = + let + val lifted = map (ATP_Util.extract_lambda_def dest_Free o ATP_Util.hol_open_form I) ll_defs + + fun un_free (t as Free (s, _)) = + (case AList.lookup (op =) lifted s of + SOME t => un_term t + | NONE => t) + | un_free t = t + and un_term t = map_aterms un_free t + in un_term end + +(* Remove the "__" suffix for newly introduced variables (Skolems). It is not clear why "__" is + generated also for abstraction variables, but this is repaired here. *) +fun unskolemize_names ctxt = + Term.map_abs_vars (perhaps (try Name.dest_skolem)) + #> Term.map_aterms (perhaps (try (fn Free (s, T) => + Free (s |> not (Variable.is_fixed ctxt s) ? Name.dest_skolem, T)))) + +fun postprocess_step_conclusion ctxt rewrite_rules ll_defs = + let val thy = Proof_Context.theory_of ctxt in + Raw_Simplifier.rewrite_term thy rewrite_rules [] + #> Object_Logic.atomize_term ctxt + #> not (null ll_defs) ? unlift_term ll_defs + #> simplify_bool + #> unskolemize_names ctxt + #> HOLogic.mk_Trueprop + end + +fun normalizing_prems ctxt concl0 = + SMT_Normalize.case_bool_entry :: SMT_Normalize.special_quant_table @ + SMT_Normalize.abs_min_max_table + |> map_filter (fn (c, th) => + if exists_Const (curry (op =) c o fst) concl0 then + let val s = short_thm_name ctxt th in SOME (s, [s]) end + else + NONE) + +fun distinguish_conjecture_and_hypothesis ss id conjecture_id prem_ids fact_helper_ts hyp_ts + concl_t = + (case ss of + [s] => SOME (Axiom, the (AList.lookup (op =) fact_helper_ts s)) + | _ => + if id = conjecture_id then + SOME (Conjecture, concl_t) + else + (case find_index (curry (op =) id) prem_ids of + ~1 => NONE (* lambda-lifting definition *) + | i => SOME (Hypothesis, close_form (nth hyp_ts i)))) + +end; diff --git a/src/test-gen/SMT/smtlib_proof.ML b/src/test-gen/SMT/smtlib_proof.ML new file mode 100644 index 0000000..909b7a5 --- /dev/null +++ b/src/test-gen/SMT/smtlib_proof.ML @@ -0,0 +1,298 @@ +(* Title: HOL/Tools/SMT/smtlib_proof.ML + Author: Sascha Boehme, TU Muenchen + Author: Mathias Fleury, ENS Rennes + Author: Jasmin Blanchette, TU Muenchen + +SMT-LIB-2-style proofs: parsing and abstract syntax tree. +*) + +signature SMTLIB_PROOF = +sig + datatype 'b shared = Tree of SMTLIB.tree | Term of term | Proof of 'b | None + type ('a, 'b) context + + val mk_context: Proof.context -> int -> 'b shared Symtab.table -> typ Symtab.table -> + term Symtab.table -> 'a -> ('a, 'b) context + val empty_context: Proof.context -> typ Symtab.table -> term Symtab.table -> ('a list, 'b) context + val ctxt_of: ('a, 'b) context -> Proof.context + val lookup_binding: ('a, 'b) context -> string -> 'b shared + val update_binding: string * 'b shared -> ('a, 'b) context -> ('a, 'b) context + val with_bindings: (string * 'b shared) list -> (('a, 'b) context -> 'c * ('d, 'b) context) -> + ('a, 'b) context -> 'c * ('d, 'b) context + val next_id: ('a, 'b) context -> int * ('a, 'b) context + val with_fresh_names: (('a list, 'b) context -> + term * ((string * (string * typ)) list, 'b) context) -> ('c, 'b) context -> (term * string list) + + (*type and term parsers*) + type type_parser = SMTLIB.tree * typ list -> typ option + type term_parser = SMTLIB.tree * term list -> term option + val add_type_parser: type_parser -> Context.generic -> Context.generic + val add_term_parser: term_parser -> Context.generic -> Context.generic + + exception SMTLIB_PARSE of string * SMTLIB.tree + + val declare_fun: string -> typ -> ((string * typ) list, 'a) context -> + ((string * typ) list, 'a) context + val dest_binding: SMTLIB.tree -> string * 'a shared + val type_of: ('a, 'b) context -> SMTLIB.tree -> typ + val term_of: SMTLIB.tree -> ((string * (string * typ)) list, 'a) context -> + term * ((string * (string * typ)) list, 'a) context +end; + +structure SMTLIB_Proof: SMTLIB_PROOF = +struct + +(* proof parser context *) + +datatype 'b shared = Tree of SMTLIB.tree | Term of term | Proof of 'b | None + +type ('a, 'b) context = { + ctxt: Proof.context, + id: int, + syms: 'b shared Symtab.table, + typs: typ Symtab.table, + funs: term Symtab.table, + extra: 'a} + +fun mk_context ctxt id syms typs funs extra: ('a, 'b) context = + {ctxt = ctxt, id = id, syms = syms, typs = typs, funs = funs, extra = extra} + +fun empty_context ctxt typs funs = mk_context ctxt 1 Symtab.empty typs funs [] + +fun ctxt_of ({ctxt, ...}: ('a, 'b) context) = ctxt + +fun lookup_binding ({syms, ...}: ('a, 'b) context) = + the_default None o Symtab.lookup syms + +fun map_syms f ({ctxt, id, syms, typs, funs, extra}: ('a, 'b) context) = + mk_context ctxt id (f syms) typs funs extra + +fun update_binding b = map_syms (Symtab.update b) + +fun with_bindings bs f cx = + let val bs' = map (lookup_binding cx o fst) bs + in + cx + |> fold update_binding bs + |> f + ||> fold2 (fn (name, _) => update_binding o pair name) bs bs' + end + +fun next_id ({ctxt, id, syms, typs, funs, extra}: ('a, 'b) context) = + (id, mk_context ctxt (id + 1) syms typs funs extra) + +fun with_fresh_names f ({ctxt, id, syms, typs, funs, ...}: ('a, 'b) context) = + let + fun bind (_, v as (_, T)) t = Logic.all_const T $ Term.absfree v t + + val needs_inferT = equal Term.dummyT orf Term.is_TVar + val needs_infer = Term.exists_type (Term.exists_subtype needs_inferT) + fun infer_types ctxt = + singleton (Type_Infer_Context.infer_types ctxt) #> + singleton (Proof_Context.standard_term_check_finish ctxt) + fun infer ctxt t = if needs_infer t then infer_types ctxt t else t + + val (t, {ctxt = ctxt', extra = names, ...}: ((string * (string * typ)) list, 'b) context) = + f (mk_context ctxt id syms typs funs []) + val t' = infer ctxt' (fold_rev bind names (HOLogic.mk_Trueprop t)) + in + (t', map fst names) + end + +fun lookup_typ ({typs, ...}: ('a, 'b) context) = Symtab.lookup typs +fun lookup_fun ({funs, ...}: ('a, 'b) context) = Symtab.lookup funs + + +(* core type and term parser *) + +fun core_type_parser (SMTLIB.Sym "Bool", []) = SOME @{typ HOL.bool} + | core_type_parser (SMTLIB.Sym "Int", []) = SOME @{typ Int.int} + | core_type_parser _ = NONE + +fun mk_unary n t = + let val T = fastype_of t + in Const (n, T --> T) $ t end + +fun mk_binary' n T U t1 t2 = Const (n, [T, T] ---> U) $ t1 $ t2 + +fun mk_binary n t1 t2 = + let val T = fastype_of t1 + in mk_binary' n T T t1 t2 end + +fun mk_rassoc f t ts = + let val us = rev (t :: ts) + in fold f (tl us) (hd us) end + +fun mk_lassoc f t ts = fold (fn u1 => fn u2 => f u2 u1) ts t + +fun mk_lassoc' n = mk_lassoc (mk_binary n) + +fun mk_binary_pred n S t1 t2 = + let + val T1 = fastype_of t1 + val T2 = fastype_of t2 + val T = + if T1 <> Term.dummyT then T1 + else if T2 <> Term.dummyT then T2 + else TVar (("?a", serial ()), S) + in mk_binary' n T @{typ HOL.bool} t1 t2 end + +fun mk_less t1 t2 = mk_binary_pred @{const_name ord_class.less} @{sort linorder} t1 t2 +fun mk_less_eq t1 t2 = mk_binary_pred @{const_name ord_class.less_eq} @{sort linorder} t1 t2 + +fun core_term_parser (SMTLIB.Sym "true", _) = SOME @{const HOL.True} + | core_term_parser (SMTLIB.Sym "false", _) = SOME @{const HOL.False} + | core_term_parser (SMTLIB.Sym "not", [t]) = SOME (HOLogic.mk_not t) + | core_term_parser (SMTLIB.Sym "and", t :: ts) = SOME (mk_rassoc (curry HOLogic.mk_conj) t ts) + | core_term_parser (SMTLIB.Sym "or", t :: ts) = SOME (mk_rassoc (curry HOLogic.mk_disj) t ts) + | core_term_parser (SMTLIB.Sym "=>", [t1, t2]) = SOME (HOLogic.mk_imp (t1, t2)) + | core_term_parser (SMTLIB.Sym "implies", [t1, t2]) = SOME (HOLogic.mk_imp (t1, t2)) + | core_term_parser (SMTLIB.Sym "=", [t1, t2]) = SOME (HOLogic.mk_eq (t1, t2)) + | core_term_parser (SMTLIB.Sym "~", [t1, t2]) = SOME (HOLogic.mk_eq (t1, t2)) + | core_term_parser (SMTLIB.Sym "ite", [t1, t2, t3]) = + let + val T = fastype_of t2 + val c = Const (@{const_name HOL.If}, [@{typ HOL.bool}, T, T] ---> T) + in SOME (c $ t1 $ t2 $ t3) end + | core_term_parser (SMTLIB.Num i, []) = SOME (HOLogic.mk_number @{typ Int.int} i) + | core_term_parser (SMTLIB.Sym "-", [t]) = SOME (mk_unary @{const_name uminus_class.uminus} t) + | core_term_parser (SMTLIB.Sym "~", [t]) = SOME (mk_unary @{const_name uminus_class.uminus} t) + | core_term_parser (SMTLIB.Sym "+", t :: ts) = + SOME (mk_lassoc' @{const_name plus_class.plus} t ts) + | core_term_parser (SMTLIB.Sym "-", t :: ts) = + SOME (mk_lassoc' @{const_name minus_class.minus} t ts) + | core_term_parser (SMTLIB.Sym "*", t :: ts) = + SOME (mk_lassoc' @{const_name times_class.times} t ts) + | core_term_parser (SMTLIB.Sym "div", [t1, t2]) = SOME (mk_binary @{const_name z3div} t1 t2) + | core_term_parser (SMTLIB.Sym "mod", [t1, t2]) = SOME (mk_binary @{const_name z3mod} t1 t2) + | core_term_parser (SMTLIB.Sym "<", [t1, t2]) = SOME (mk_less t1 t2) + | core_term_parser (SMTLIB.Sym ">", [t1, t2]) = SOME (mk_less t2 t1) + | core_term_parser (SMTLIB.Sym "<=", [t1, t2]) = SOME (mk_less_eq t1 t2) + | core_term_parser (SMTLIB.Sym ">=", [t1, t2]) = SOME (mk_less_eq t2 t1) + | core_term_parser _ = NONE + + +(* custom type and term parsers *) + +type type_parser = SMTLIB.tree * typ list -> typ option + +type term_parser = SMTLIB.tree * term list -> term option + +fun id_ord ((id1, _), (id2, _)) = int_ord (id1, id2) + +structure Parsers = Generic_Data +( + type T = (int * type_parser) list * (int * term_parser) list + val empty : T = ([(serial (), core_type_parser)], [(serial (), core_term_parser)]) + val extend = I + fun merge ((tys1, ts1), (tys2, ts2)) = + (Ord_List.merge id_ord (tys1, tys2), Ord_List.merge id_ord (ts1, ts2)) +) + +fun add_type_parser type_parser = + Parsers.map (apfst (Ord_List.insert id_ord (serial (), type_parser))) + +fun add_term_parser term_parser = + Parsers.map (apsnd (Ord_List.insert id_ord (serial (), term_parser))) + +fun get_type_parsers ctxt = map snd (fst (Parsers.get (Context.Proof ctxt))) +fun get_term_parsers ctxt = map snd (snd (Parsers.get (Context.Proof ctxt))) + +fun apply_parsers parsers x = + let + fun apply [] = NONE + | apply (parser :: parsers) = + (case parser x of + SOME y => SOME y + | NONE => apply parsers) + in apply parsers end + + +(* type and term parsing *) + +exception SMTLIB_PARSE of string * SMTLIB.tree + +val desymbolize = Name.desymbolize (SOME false) o perhaps (try (unprefix "?")) + +fun fresh_fun add name n T ({ctxt, id, syms, typs, funs, extra}: ('a, 'b) context) = + let + val (n', ctxt') = yield_singleton Variable.variant_fixes n ctxt + val t = Free (n', T) + val funs' = Symtab.update (name, t) funs + in (t, mk_context ctxt' id syms typs funs' (add (n', T) extra)) end + +fun declare_fun name = snd oo fresh_fun cons name (desymbolize name) +fun declare_free name = fresh_fun (cons o pair name) name (desymbolize name) + +fun parse_type cx ty Ts = + (case apply_parsers (get_type_parsers (ctxt_of cx)) (ty, Ts) of + SOME T => T + | NONE => + (case ty of + SMTLIB.Sym name => + (case lookup_typ cx name of + SOME T => T + | NONE => raise SMTLIB_PARSE ("unknown SMT type", ty)) + | _ => raise SMTLIB_PARSE ("bad SMT type format", ty))) + +fun parse_term t ts cx = + (case apply_parsers (get_term_parsers (ctxt_of cx)) (t, ts) of + SOME u => (u, cx) + | NONE => + (case t of + SMTLIB.Sym name => + (case lookup_fun cx name of + SOME u => (Term.list_comb (u, ts), cx) + | NONE => + if null ts then declare_free name Term.dummyT cx + else raise SMTLIB_PARSE ("bad SMT term", t)) + | _ => raise SMTLIB_PARSE ("bad SMT term format", t))) + +fun type_of cx ty = + (case try (parse_type cx ty) [] of + SOME T => T + | NONE => + (case ty of + SMTLIB.S (ty' :: tys) => parse_type cx ty' (map (type_of cx) tys) + | _ => raise SMTLIB_PARSE ("bad SMT type", ty))) + +fun dest_var cx (SMTLIB.S [SMTLIB.Sym name, ty]) = (name, (desymbolize name, type_of cx ty)) + | dest_var _ v = raise SMTLIB_PARSE ("bad SMT quantifier variable format", v) + +fun dest_body (SMTLIB.S (SMTLIB.Sym "!" :: body :: _)) = dest_body body + | dest_body body = body + +fun dest_binding (SMTLIB.S [SMTLIB.Sym name, t]) = (name, Tree t) + | dest_binding b = raise SMTLIB_PARSE ("bad SMT let binding format", b) + +fun term_of t cx = + (case t of + SMTLIB.S [SMTLIB.Sym "forall", SMTLIB.S vars, body] => quant HOLogic.mk_all vars body cx + | SMTLIB.S [SMTLIB.Sym "exists", SMTLIB.S vars, body] => quant HOLogic.mk_exists vars body cx + | SMTLIB.S [SMTLIB.Sym "let", SMTLIB.S bindings, body] => + with_bindings (map dest_binding bindings) (term_of body) cx + | SMTLIB.S (SMTLIB.Sym "!" :: t :: _) => term_of t cx + | SMTLIB.S (f :: args) => + cx + |> fold_map term_of args + |-> parse_term f + | SMTLIB.Sym name => + (case lookup_binding cx name of + Tree u => + cx + |> term_of u + |-> (fn u' => pair u' o update_binding (name, Term u')) + | Term u => (u, cx) + | None => parse_term t [] cx + | _ => raise SMTLIB_PARSE ("bad SMT term format", t)) + | _ => parse_term t [] cx) + +and quant q vars body cx = + let val vs = map (dest_var cx) vars + in + cx + |> with_bindings (map (apsnd (Term o Free)) vs) (term_of (dest_body body)) + |>> fold_rev (fn (_, (n, T)) => fn t => q (n, T, t)) vs + end + +end; diff --git a/src/test-gen/SMT/verit_isar.ML b/src/test-gen/SMT/verit_isar.ML new file mode 100644 index 0000000..28ee6d9 --- /dev/null +++ b/src/test-gen/SMT/verit_isar.ML @@ -0,0 +1,60 @@ +(* Title: HOL/Tools/SMT/verit_isar.ML + Author: Mathias Fleury, TU Muenchen + Author: Jasmin Blanchette, TU Muenchen + +VeriT proofs as generic ATP proofs for Isar proof reconstruction. +*) + +signature VERIT_ISAR = +sig + type ('a, 'b) atp_step = ('a, 'b) ATP_Proof.atp_step + val atp_proof_of_veriT_proof: Proof.context -> term list -> thm list -> term list -> term -> + (string * term) list -> int list -> int -> (int * string) list -> VeriT_Proof.veriT_step list -> + (term, string) ATP_Proof.atp_step list +end; + +structure VeriT_Isar: VERIT_ISAR = +struct + +open ATP_Util +open ATP_Problem +open ATP_Proof +open ATP_Proof_Reconstruct +open SMTLIB_Interface +open SMTLIB_Isar +open VeriT_Proof + +fun atp_proof_of_veriT_proof ctxt ll_defs rewrite_rules hyp_ts concl_t fact_helper_ts prem_ids + conjecture_id fact_helper_ids = + let + fun steps_of (VeriT_Proof.VeriT_Step {id, rule, prems, concl, ...}) = + let + val concl' = postprocess_step_conclusion ctxt rewrite_rules ll_defs concl + fun standard_step role = ((id, []), role, concl', rule, map (rpair []) prems) + in + if rule = veriT_input_rule then + let + val id_num = the (Int.fromString (unprefix assert_prefix id)) + val ss = the_list (AList.lookup (op =) fact_helper_ids id_num) + in + (case distinguish_conjecture_and_hypothesis ss id_num conjecture_id prem_ids + fact_helper_ts hyp_ts concl_t of + NONE => [] + | SOME (role0, concl00) => + let + val name0 = (id ^ "a", ss) + val concl0 = unskolemize_names ctxt concl00 + in + [(name0, role0, concl0, rule, []), + ((id, []), Plain, concl', veriT_rewrite_rule, + name0 :: normalizing_prems ctxt concl0)] + end) + end + else + [standard_step (if null prems then Lemma else Plain)] + end + in + maps steps_of + end + +end; diff --git a/src/test-gen/SMT/verit_proof.ML b/src/test-gen/SMT/verit_proof.ML new file mode 100644 index 0000000..1dab112 --- /dev/null +++ b/src/test-gen/SMT/verit_proof.ML @@ -0,0 +1,324 @@ +(* Title: HOL/Tools/SMT/verit_proof.ML + Author: Mathias Fleury, ENS Rennes + Author: Sascha Boehme, TU Muenchen + +VeriT proofs: parsing and abstract syntax tree. +*) + +signature VERIT_PROOF = +sig + (*proofs*) + datatype veriT_step = VeriT_Step of { + id: string, + rule: string, + prems: string list, + concl: term, + fixes: string list} + + (*proof parser*) + val parse: typ Symtab.table -> term Symtab.table -> string list -> + Proof.context -> veriT_step list * Proof.context + + val veriT_step_prefix : string + val veriT_input_rule: string + val veriT_la_generic_rule : string + val veriT_rewrite_rule : string + val veriT_simp_arith_rule : string + val veriT_tmp_ite_elim_rule : string + val veriT_tmp_skolemize_rule : string +end; + +structure VeriT_Proof: VERIT_PROOF = +struct + +open SMTLIB_Proof + +datatype veriT_node = VeriT_Node of { + id: string, + rule: string, + prems: string list, + concl: term, + bounds: string list} + +fun mk_node id rule prems concl bounds = + VeriT_Node {id = id, rule = rule, prems = prems, concl = concl, bounds = bounds} + +datatype veriT_step = VeriT_Step of { + id: string, + rule: string, + prems: string list, + concl: term, + fixes: string list} + +fun mk_step id rule prems concl fixes = + VeriT_Step {id = id, rule = rule, prems = prems, concl = concl, fixes = fixes} + +val veriT_step_prefix = ".c" +val veriT_input_rule = "input" +val veriT_la_generic_rule = "la_generic" +val veriT_rewrite_rule = "__rewrite" (* arbitrary *) +val veriT_simp_arith_rule = "simp_arith" +val veriT_tmp_alphaconv_rule = "tmp_alphaconv" +val veriT_tmp_ite_elim_rule = "tmp_ite_elim" +val veriT_tmp_skolemize_rule = "tmp_skolemize" + +(* proof parser *) + +fun node_of p cx = + ([], cx) + ||>> `(with_fresh_names (term_of p)) + |>> snd + +(*in order to get Z3-style quantification*) +fun repair_quantification (SMTLIB.S (SMTLIB.Sym "forall" :: l)) = + let val (quantified_vars, t) = split_last (map repair_quantification l) + in + SMTLIB.S (SMTLIB.Sym "forall" :: SMTLIB.S quantified_vars :: t :: []) + end + | repair_quantification (SMTLIB.S (SMTLIB.Sym "exists" :: l)) = + let val (quantified_vars, t) = split_last (map repair_quantification l) + in + SMTLIB.S (SMTLIB.Sym "exists" :: SMTLIB.S quantified_vars :: t :: []) + end + | repair_quantification (SMTLIB.S l) = SMTLIB.S (map repair_quantification l) + | repair_quantification x = x + +fun replace_bound_var_by_free_var (q $ Abs (var, ty, u)) free_var = + (case List.find (fn v => String.isPrefix v var) free_var of + NONE => q $ Abs (var, ty, replace_bound_var_by_free_var u free_var) + | SOME _ => replace_bound_var_by_free_var (Term.subst_bound (Free (var, ty), u)) free_var) + | replace_bound_var_by_free_var (u $ v) free_vars = replace_bound_var_by_free_var u free_vars $ + replace_bound_var_by_free_var v free_vars + | replace_bound_var_by_free_var u _ = u + +fun find_type_in_formula (Abs (v, T, u)) var_name = + if String.isPrefix var_name v then SOME T else find_type_in_formula u var_name + | find_type_in_formula (u $ v) var_name = + (case find_type_in_formula u var_name of + NONE => find_type_in_formula v var_name + | some_T => some_T) + | find_type_in_formula _ _ = NONE + +fun add_bound_variables_to_ctxt concl = + fold (update_binding o + (fn s => (s, Term (Free (s, the_default dummyT (find_type_in_formula concl s)))))) + +fun update_step_and_cx (node as VeriT_Node {id, rule, prems, concl, bounds}) cx = + if rule = veriT_tmp_ite_elim_rule then + (mk_node id rule prems concl bounds, add_bound_variables_to_ctxt concl bounds cx) + else if rule = veriT_tmp_skolemize_rule then + let val concl' = replace_bound_var_by_free_var concl bounds in + (mk_node id rule prems concl' [], add_bound_variables_to_ctxt concl bounds cx) + end + else + (node, cx) + +fun fix_subproof_steps ((((id_of_father_step, rule), prems), subproof), ((step_concl, bounds), + cx)) = + let + fun mk_prop_of_term concl = + concl |> fastype_of concl = @{typ bool} ? curry (op $) @{term Trueprop} + fun update_prems assumption_id prems = + map (fn prem => id_of_father_step ^ prem) + (filter_out (curry (op =) assumption_id) prems) + fun inline_assumption assumption assumption_id + (VeriT_Node {id, rule, prems, concl, bounds}) = + mk_node id rule (update_prems assumption_id prems) + (@{const Pure.imp} $ mk_prop_of_term assumption $ mk_prop_of_term concl) bounds + fun find_input_steps_and_inline [] last_step = ([], last_step) + | find_input_steps_and_inline (VeriT_Node {id = id', rule, prems, concl, bounds} :: steps) + last_step = + if rule = veriT_input_rule then + find_input_steps_and_inline (map (inline_assumption concl id') steps) last_step + else + apfst (cons (mk_node (id_of_father_step ^ id') rule prems concl bounds)) + (find_input_steps_and_inline steps (id_of_father_step ^ id')) + val (subproof', last_step_id) = find_input_steps_and_inline subproof "" + val prems' = + if last_step_id = "" then + prems + else + (case prems of + NONE => SOME [last_step_id] + | SOME l => SOME (last_step_id :: l)) + in + (subproof', (((((id_of_father_step, rule), prems'), step_concl), bounds), cx)) + end + +(* +(set id rule :clauses(...) :args(..) :conclusion (...)). +or +(set id subproof (set ...) :conclusion (...)). +*) + +fun parse_proof_step cx = + let + fun rotate_pair (a, (b, c)) = ((a, b), c) + fun get_id (SMTLIB.S [SMTLIB.Sym "set", SMTLIB.Sym id, SMTLIB.S l]) = (id, l) + | get_id t = raise Fail ("unrecognized VeriT proof " ^ @{make_string} t) + fun parse_rule (SMTLIB.Sym rule :: l) = (rule, l) + fun parse_source (SMTLIB.Key "clauses" :: SMTLIB.S source ::l) = + (SOME (map (fn (SMTLIB.Sym id) => id) source), l) + | parse_source l = (NONE, l) + fun parse_subproof cx id_of_father_step ((subproof_step as SMTLIB.S (SMTLIB.Sym "set" :: _)) :: l) = + let val (subproof_steps, cx') = parse_proof_step cx subproof_step in + apfst (apfst (curry (op @) subproof_steps)) (parse_subproof cx' id_of_father_step l) + end + | parse_subproof cx _ l = (([], cx), l) + fun skip_args (SMTLIB.Key "args" :: SMTLIB.S _ :: l) = l + | skip_args l = l + fun parse_conclusion (SMTLIB.Key "conclusion" :: SMTLIB.S concl :: []) = concl + fun make_or_from_clausification l = + foldl1 (fn ((concl1, bounds1), (concl2, bounds2)) => + (HOLogic.mk_disj (perhaps (try HOLogic.dest_Trueprop) concl1, + perhaps (try HOLogic.dest_Trueprop) concl2), bounds1 @ bounds2)) l + fun to_node (((((id, rule), prems), concl), bounds), cx) = + (mk_node id rule (the_default [] prems) concl bounds, cx) + in + get_id + ##> parse_rule + #> rotate_pair + ##> parse_source + #> rotate_pair + ##> skip_args + #> (fn (((id, rule), prems), sub) => (((id, rule), prems), parse_subproof cx id sub)) + #> rotate_pair + ##> parse_conclusion + ##> map repair_quantification + #> (fn ((((id, rule), prems), (subproof, cx)), terms) => + (((((id, rule), prems), subproof), fold_map (fn t => fn cx => node_of t cx) terms cx))) + ##> apfst (fn [] => (@{const False}, []) | concls => make_or_from_clausification concls) + #> fix_subproof_steps + ##> to_node + #> (fn (subproof, (step, cx)) => (subproof @ [step], cx)) + #-> fold_map update_step_and_cx + end + +(*subproofs are written on multiple lines: SMTLIB can not parse then, because parentheses are +unbalanced on each line*) +fun seperate_into_steps lines = + let + fun count ("(" :: l) n = count l (n + 1) + | count (")" :: l) n = count l (n - 1) + | count (_ :: l) n = count l n + | count [] n = n + fun seperate (line :: l) actual_lines m = + let val n = count (raw_explode line) 0 in + if m + n = 0 then + [actual_lines ^ line] :: seperate l "" 0 + else + seperate l (actual_lines ^ line) (m + n) + end + | seperate [] _ 0 = [] + in + seperate lines "" 0 + end + +(* VeriT adds "@" before every variable. *) +fun remove_all_at (SMTLIB.Sym v :: l) = + SMTLIB.Sym (perhaps (try (unprefix "@")) v) :: remove_all_at l + | remove_all_at (SMTLIB.S l :: l') = SMTLIB.S (remove_all_at l) :: remove_all_at l' + | remove_all_at (SMTLIB.Key v :: l) = SMTLIB.Key v :: remove_all_at l + | remove_all_at (v :: l) = v :: remove_all_at l + | remove_all_at [] = [] + +fun find_in_which_step_defined var (VeriT_Node {id, bounds, ...} :: l) = + (case List.find (fn v => String.isPrefix v var) bounds of + NONE => find_in_which_step_defined var l + | SOME _ => id) + | find_in_which_step_defined var _ = raise Fail ("undefined " ^ var) + +(*Yes every case is possible: the introduced var is not on a special size of the equality sign.*) +fun find_ite_var_in_term (Const (@{const_name If}, _) $ _ $ + (Const (@{const_name HOL.eq}, _) $ Free (var1, _) $ Free (var2, _) ) $ + (Const (@{const_name HOL.eq}, _) $ Free (var3, _) $ Free (var4, _) )) = + let + fun get_number_of_ite_transformed_var var = + perhaps (try (unprefix "ite")) var + |> Int.fromString + fun is_equal_and_has_correct_substring var var' var'' = + if var = var' andalso String.isPrefix "ite" var then SOME var' + else if var = var'' andalso String.isPrefix "ite" var then SOME var'' else NONE + val var1_introduced_var = is_equal_and_has_correct_substring var1 var3 var4 + val var2_introduced_var = is_equal_and_has_correct_substring var3 var1 var2 + in + (case (var1_introduced_var, var2_introduced_var) of + (SOME a, SOME b) => + (*ill-generated case, might be possible when applying the rule to max a a. Only if the + variable have been introduced before. Probably an impossible edge case*) + (case (get_number_of_ite_transformed_var a, get_number_of_ite_transformed_var b) of + (SOME a, SOME b) => if a < b then var2_introduced_var else var1_introduced_var + (*Otherwise, it is a name clase between a parameter name and the introduced variable. + Or the name convention has been changed.*) + | (NONE, SOME _) => var2_introduced_var + | (SOME _, NONE) => var2_introduced_var) + | (_, SOME _) => var2_introduced_var + | (SOME _, _) => var1_introduced_var) + end + | find_ite_var_in_term (Const (@{const_name If}, _) $ _ $ + (Const (@{const_name HOL.eq}, _) $ Free (var, _) $ _ ) $ + (Const (@{const_name HOL.eq}, _) $ Free (var', _) $ _ )) = + if var = var' then SOME var else NONE + | find_ite_var_in_term (Const (@{const_name If}, _) $ _ $ + (Const (@{const_name HOL.eq}, _) $ _ $ Free (var, _)) $ + (Const (@{const_name HOL.eq}, _) $ _ $ Free (var', _))) = + if var = var' then SOME var else NONE + | find_ite_var_in_term (p $ q) = + (case find_ite_var_in_term p of + NONE => find_ite_var_in_term q + | x => x) + | find_ite_var_in_term (Abs (_, _, body)) = find_ite_var_in_term body + | find_ite_var_in_term _ = NONE + +fun correct_veriT_step steps (node as VeriT_Node {id, rule, prems, concl, bounds}) = + if rule = veriT_tmp_ite_elim_rule then + if bounds = [] then + (*if the introduced var has already been defined, adding the definition as a dependency*) + let + val new_prems = prems + |> (case find_ite_var_in_term concl of + NONE => I + | SOME var => cons (find_in_which_step_defined var steps)) + in + VeriT_Node {id = id, rule = rule, prems = new_prems, concl = concl, bounds = bounds} + end + else + (*some new variables are created*) + let val concl' = replace_bound_var_by_free_var concl bounds in + mk_node id rule prems concl' [] + end + else + node + +fun remove_alpha_conversion _ [] = [] + | remove_alpha_conversion replace_table (VeriT_Node {id, rule, prems, concl, bounds} :: steps) = + let + val correct_dependency = map (perhaps (Symtab.lookup replace_table)) + val find_predecessor = perhaps (Symtab.lookup replace_table) + in + if rule = veriT_tmp_alphaconv_rule then + remove_alpha_conversion (Symtab.update (id, find_predecessor (hd prems)) + replace_table) steps + else + VeriT_Node {id = id, rule = rule, prems = correct_dependency prems, + concl = concl, bounds = bounds} :: remove_alpha_conversion replace_table steps + end + +fun correct_veriT_steps steps = + steps + |> map (correct_veriT_step steps) + |> remove_alpha_conversion Symtab.empty + +fun parse typs funs lines ctxt = + let + val smtlib_lines_without_at = remove_all_at (map SMTLIB.parse (seperate_into_steps lines)) + val (u, env) = apfst flat (fold_map (fn l => fn cx => parse_proof_step cx l) + smtlib_lines_without_at (empty_context ctxt typs funs)) + val t = correct_veriT_steps u + fun node_to_step (VeriT_Node {id, rule, prems, concl, bounds, ...}) = + mk_step id rule prems concl bounds + in + (map node_to_step t, ctxt_of env) + end + +end; diff --git a/src/test-gen/SMT/verit_proof_parse.ML b/src/test-gen/SMT/verit_proof_parse.ML new file mode 100644 index 0000000..cddc609 --- /dev/null +++ b/src/test-gen/SMT/verit_proof_parse.ML @@ -0,0 +1,78 @@ +(* Title: HOL/Tools/SMT/verit_proof_parse.ML + Author: Mathias Fleury, TU Muenchen + Author: Jasmin Blanchette, TU Muenchen + +VeriT proof parsing. +*) + +signature VERIT_PROOF_PARSE = +sig + type ('a, 'b) atp_step = ('a, 'b) ATP_Proof.atp_step + val parse_proof: SMT_Translate.replay_data -> + ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> + SMT_Solver.parsed_proof +end; + +structure VeriT_Proof_Parse: VERIT_PROOF_PARSE = +struct + +open ATP_Util +open ATP_Problem +open ATP_Proof +open ATP_Proof_Reconstruct +open VeriT_Isar +open VeriT_Proof + +fun add_used_asserts_in_step (VeriT_Proof.VeriT_Step {prems, ...}) = + union (op =) (map_filter (try SMTLIB_Interface.assert_index_of_name) prems) + +fun parse_proof + ({context = ctxt, typs, terms, ll_defs, rewrite_rules, assms} : SMT_Translate.replay_data) + xfacts prems concl output = + let + val num_ll_defs = length ll_defs + + val id_of_index = Integer.add num_ll_defs + val index_of_id = Integer.add (~ num_ll_defs) + + fun step_of_assume j (_, th) = + VeriT_Proof.VeriT_Step {id = SMTLIB_Interface.assert_name_of_index (id_of_index j), + rule = veriT_input_rule, prems = [], concl = Thm.prop_of th, fixes = []} + + val (actual_steps, _) = VeriT_Proof.parse typs terms output ctxt + val used_assert_ids = fold add_used_asserts_in_step actual_steps [] + val used_assm_js = + map_filter (fn id => let val i = index_of_id id in if i >= 0 then SOME i else NONE end) + used_assert_ids + val used_assms = map (nth assms) used_assm_js + val assm_steps = map2 step_of_assume used_assm_js used_assms + val steps = assm_steps @ actual_steps + + val conjecture_i = 0 + val prems_i = conjecture_i + 1 + val num_prems = length prems + val facts_i = prems_i + num_prems + val num_facts = length xfacts + val helpers_i = facts_i + num_facts + + val conjecture_id = id_of_index conjecture_i + val prem_ids = map id_of_index (prems_i upto prems_i + num_prems - 1) + val fact_ids' = + map_filter (fn j => + let val (i, _) = nth assms j in + try (apsnd (nth xfacts)) (id_of_index j, i - facts_i) + end) used_assm_js + val helper_ids' = filter (fn (i, _) => i >= helpers_i) used_assms + + val fact_helper_ts = + map (fn (_, th) => (ATP_Util.short_thm_name ctxt th, Thm.prop_of th)) helper_ids' @ + map (fn (_, ((s, _), th)) => (s, Thm.prop_of th)) fact_ids' + val fact_helper_ids' = + map (apsnd (ATP_Util.short_thm_name ctxt)) helper_ids' @ map (apsnd (fst o fst)) fact_ids' + in + {outcome = NONE, fact_ids = SOME fact_ids', + atp_proof = fn () => atp_proof_of_veriT_proof ctxt ll_defs rewrite_rules prems concl + fact_helper_ts prem_ids conjecture_id fact_helper_ids' steps} + end + +end; diff --git a/src/test-gen/SMT/z3_interface.ML b/src/test-gen/SMT/z3_interface.ML new file mode 100644 index 0000000..588458a --- /dev/null +++ b/src/test-gen/SMT/z3_interface.ML @@ -0,0 +1,192 @@ +(* Title: HOL/Tools/SMT/z3_interface.ML + Author: Sascha Boehme, TU Muenchen + +Interface to Z3 based on a relaxed version of SMT-LIB. +*) + +signature Z3_INTERFACE = +sig + val smtlib_z3C: SMT_Util.class + + datatype sym = Sym of string * sym list + type mk_builtins = { + mk_builtin_typ: sym -> typ option, + mk_builtin_num: theory -> int -> typ -> cterm option, + mk_builtin_fun: theory -> sym -> cterm list -> cterm option } + val add_mk_builtins: mk_builtins -> Context.generic -> Context.generic + val mk_builtin_typ: Proof.context -> sym -> typ option + val mk_builtin_num: Proof.context -> int -> typ -> cterm option + val mk_builtin_fun: Proof.context -> sym -> cterm list -> cterm option + + val is_builtin_theory_term: Proof.context -> term -> bool +end; + +structure Z3_Interface: Z3_INTERFACE = +struct + +val smtlib_z3C = SMTLIB_Interface.smtlibC @ ["z3"] + + +(* interface *) + +local + fun translate_config ctxt = + {logic = K "", fp_kinds = [BNF_Util.Least_FP], + serialize = #serialize (SMTLIB_Interface.translate_config ctxt)} + + fun is_div_mod @{const divide (int)} = true + | is_div_mod @{const modulo (int)} = true + | is_div_mod _ = false + + val have_int_div_mod = exists (Term.exists_subterm is_div_mod o Thm.prop_of) + + fun add_div_mod _ (thms, extra_thms) = + if have_int_div_mod thms orelse have_int_div_mod extra_thms then + (thms, @{thms div_as_z3div mod_as_z3mod} @ extra_thms) + else (thms, extra_thms) + + val setup_builtins = + SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const times (int)}, "*") #> + SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const z3div}, "div") #> + SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const z3mod}, "mod") +in + +val _ = Theory.setup (Context.theory_map ( + setup_builtins #> + SMT_Normalize.add_extra_norm (smtlib_z3C, add_div_mod) #> + SMT_Translate.add_config (smtlib_z3C, translate_config))) + +end + + +(* constructors *) + +datatype sym = Sym of string * sym list + + +(** additional constructors **) + +type mk_builtins = { + mk_builtin_typ: sym -> typ option, + mk_builtin_num: theory -> int -> typ -> cterm option, + mk_builtin_fun: theory -> sym -> cterm list -> cterm option } + +fun chained _ [] = NONE + | chained f (b :: bs) = (case f b of SOME y => SOME y | NONE => chained f bs) + +fun chained_mk_builtin_typ bs sym = + chained (fn {mk_builtin_typ=mk, ...} : mk_builtins => mk sym) bs + +fun chained_mk_builtin_num ctxt bs i T = + let val thy = Proof_Context.theory_of ctxt + in chained (fn {mk_builtin_num=mk, ...} : mk_builtins => mk thy i T) bs end + +fun chained_mk_builtin_fun ctxt bs s cts = + let val thy = Proof_Context.theory_of ctxt + in chained (fn {mk_builtin_fun=mk, ...} : mk_builtins => mk thy s cts) bs end + +fun fst_int_ord ((i1, _), (i2, _)) = int_ord (i1, i2) + +structure Mk_Builtins = Generic_Data +( + type T = (int * mk_builtins) list + val empty = [] + val extend = I + fun merge data = Ord_List.merge fst_int_ord data +) + +fun add_mk_builtins mk = Mk_Builtins.map (Ord_List.insert fst_int_ord (serial (), mk)) + +fun get_mk_builtins ctxt = map snd (Mk_Builtins.get (Context.Proof ctxt)) + + +(** basic and additional constructors **) + +fun mk_builtin_typ _ (Sym ("Bool", _)) = SOME @{typ bool} + | mk_builtin_typ _ (Sym ("Int", _)) = SOME @{typ int} + | mk_builtin_typ _ (Sym ("bool", _)) = SOME @{typ bool} (*FIXME: legacy*) + | mk_builtin_typ _ (Sym ("int", _)) = SOME @{typ int} (*FIXME: legacy*) + | mk_builtin_typ ctxt sym = chained_mk_builtin_typ (get_mk_builtins ctxt) sym + +fun mk_builtin_num _ i @{typ int} = SOME (Numeral.mk_cnumber @{ctyp int} i) + | mk_builtin_num ctxt i T = + chained_mk_builtin_num ctxt (get_mk_builtins ctxt) i T + +val mk_true = Thm.cterm_of @{context} (@{const Not} $ @{const False}) +val mk_false = Thm.cterm_of @{context} @{const False} +val mk_not = Thm.apply (Thm.cterm_of @{context} @{const Not}) +val mk_implies = Thm.mk_binop (Thm.cterm_of @{context} @{const HOL.implies}) +val mk_iff = Thm.mk_binop (Thm.cterm_of @{context} @{const HOL.eq (bool)}) +val conj = Thm.cterm_of @{context} @{const HOL.conj} +val disj = Thm.cterm_of @{context} @{const HOL.disj} + +fun mk_nary _ cu [] = cu + | mk_nary ct _ cts = uncurry (fold_rev (Thm.mk_binop ct)) (split_last cts) + +val eq = SMT_Util.mk_const_pat @{theory} @{const_name HOL.eq} SMT_Util.destT1 +fun mk_eq ct cu = Thm.mk_binop (SMT_Util.instT' ct eq) ct cu + +val if_term = + SMT_Util.mk_const_pat @{theory} @{const_name If} (SMT_Util.destT1 o SMT_Util.destT2) +fun mk_if cc ct = Thm.mk_binop (Thm.apply (SMT_Util.instT' ct if_term) cc) ct + +val access = SMT_Util.mk_const_pat @{theory} @{const_name fun_app} SMT_Util.destT1 +fun mk_access array = Thm.apply (SMT_Util.instT' array access) array + +val update = + SMT_Util.mk_const_pat @{theory} @{const_name fun_upd} (Thm.dest_ctyp o SMT_Util.destT1) +fun mk_update array index value = + let val cTs = Thm.dest_ctyp (Thm.ctyp_of_cterm array) + in Thm.apply (Thm.mk_binop (SMT_Util.instTs cTs update) array index) value end + +val mk_uminus = Thm.apply (Thm.cterm_of @{context} @{const uminus (int)}) +val add = Thm.cterm_of @{context} @{const plus (int)} +val int0 = Numeral.mk_cnumber @{ctyp int} 0 +val mk_sub = Thm.mk_binop (Thm.cterm_of @{context} @{const minus (int)}) +val mk_mul = Thm.mk_binop (Thm.cterm_of @{context} @{const times (int)}) +val mk_div = Thm.mk_binop (Thm.cterm_of @{context} @{const z3div}) +val mk_mod = Thm.mk_binop (Thm.cterm_of @{context} @{const z3mod}) +val mk_lt = Thm.mk_binop (Thm.cterm_of @{context} @{const less (int)}) +val mk_le = Thm.mk_binop (Thm.cterm_of @{context} @{const less_eq (int)}) + +fun mk_builtin_fun ctxt sym cts = + (case (sym, cts) of + (Sym ("true", _), []) => SOME mk_true + | (Sym ("false", _), []) => SOME mk_false + | (Sym ("not", _), [ct]) => SOME (mk_not ct) + | (Sym ("and", _), _) => SOME (mk_nary conj mk_true cts) + | (Sym ("or", _), _) => SOME (mk_nary disj mk_false cts) + | (Sym ("implies", _), [ct, cu]) => SOME (mk_implies ct cu) + | (Sym ("iff", _), [ct, cu]) => SOME (mk_iff ct cu) + | (Sym ("~", _), [ct, cu]) => SOME (mk_iff ct cu) + | (Sym ("xor", _), [ct, cu]) => SOME (mk_not (mk_iff ct cu)) + | (Sym ("if", _), [ct1, ct2, ct3]) => SOME (mk_if ct1 ct2 ct3) + | (Sym ("ite", _), [ct1, ct2, ct3]) => SOME (mk_if ct1 ct2 ct3) (* FIXME: remove *) + | (Sym ("=", _), [ct, cu]) => SOME (mk_eq ct cu) + | (Sym ("select", _), [ca, ck]) => SOME (Thm.apply (mk_access ca) ck) + | (Sym ("store", _), [ca, ck, cv]) => SOME (mk_update ca ck cv) + | _ => + (case (sym, try (Thm.typ_of_cterm o hd) cts, cts) of + (Sym ("+", _), SOME @{typ int}, _) => SOME (mk_nary add int0 cts) + | (Sym ("-", _), SOME @{typ int}, [ct]) => SOME (mk_uminus ct) + | (Sym ("-", _), SOME @{typ int}, [ct, cu]) => SOME (mk_sub ct cu) + | (Sym ("*", _), SOME @{typ int}, [ct, cu]) => SOME (mk_mul ct cu) + | (Sym ("div", _), SOME @{typ int}, [ct, cu]) => SOME (mk_div ct cu) + | (Sym ("mod", _), SOME @{typ int}, [ct, cu]) => SOME (mk_mod ct cu) + | (Sym ("<", _), SOME @{typ int}, [ct, cu]) => SOME (mk_lt ct cu) + | (Sym ("<=", _), SOME @{typ int}, [ct, cu]) => SOME (mk_le ct cu) + | (Sym (">", _), SOME @{typ int}, [ct, cu]) => SOME (mk_lt cu ct) + | (Sym (">=", _), SOME @{typ int}, [ct, cu]) => SOME (mk_le cu ct) + | _ => chained_mk_builtin_fun ctxt (get_mk_builtins ctxt) sym cts)) + + +(* abstraction *) + +fun is_builtin_theory_term ctxt t = + if SMT_Builtin.is_builtin_num ctxt t then true + else + (case Term.strip_comb t of + (Const c, ts) => SMT_Builtin.is_builtin_fun ctxt c ts + | _ => false) + +end; diff --git a/src/test-gen/SMT/z3_isar.ML b/src/test-gen/SMT/z3_isar.ML new file mode 100644 index 0000000..5b73931 --- /dev/null +++ b/src/test-gen/SMT/z3_isar.ML @@ -0,0 +1,120 @@ +(* Title: HOL/Tools/SMT/z3_isar.ML + Author: Jasmin Blanchette, TU Muenchen + +Z3 proofs as generic ATP proofs for Isar proof reconstruction. +*) + +signature Z3_ISAR = +sig + val atp_proof_of_z3_proof: Proof.context -> term list -> thm list -> term list -> term -> + (string * term) list -> int list -> int -> (int * string) list -> Z3_Proof.z3_step list -> + (term, string) ATP_Proof.atp_step list +end; + +structure Z3_Isar: Z3_ISAR = +struct + +open ATP_Util +open ATP_Problem +open ATP_Proof +open ATP_Proof_Reconstruct +open SMTLIB_Isar + +val z3_apply_def_rule = Z3_Proof.string_of_rule Z3_Proof.Apply_Def +val z3_hypothesis_rule = Z3_Proof.string_of_rule Z3_Proof.Hypothesis +val z3_intro_def_rule = Z3_Proof.string_of_rule Z3_Proof.Intro_Def +val z3_lemma_rule = Z3_Proof.string_of_rule Z3_Proof.Lemma + +fun inline_z3_defs _ [] = [] + | inline_z3_defs defs ((name, role, t, rule, deps) :: lines) = + if rule = z3_intro_def_rule then + let val def = t |> HOLogic.dest_Trueprop |> HOLogic.dest_eq |> swap in + inline_z3_defs (insert (op =) def defs) + (map (replace_dependencies_in_line (name, [])) lines) + end + else if rule = z3_apply_def_rule then + inline_z3_defs defs (map (replace_dependencies_in_line (name, [])) lines) + else + (name, role, Term.subst_atomic defs t, rule, deps) :: inline_z3_defs defs lines + +fun add_z3_hypotheses [] = I + | add_z3_hypotheses hyps = + HOLogic.dest_Trueprop + #> curry s_imp (Library.foldr1 s_conj (map HOLogic.dest_Trueprop hyps)) + #> HOLogic.mk_Trueprop + +fun inline_z3_hypotheses _ _ [] = [] + | inline_z3_hypotheses hyp_names hyps ((name, role, t, rule, deps) :: lines) = + if rule = z3_hypothesis_rule then + inline_z3_hypotheses (name :: hyp_names) (AList.map_default (op =) (t, []) (cons name) hyps) + lines + else + let val deps' = subtract (op =) hyp_names deps in + if rule = z3_lemma_rule then + (name, role, t, rule, deps') :: inline_z3_hypotheses hyp_names hyps lines + else + let + val add_hyps = filter_out (null o inter (op =) deps o snd) hyps + val t' = add_z3_hypotheses (map fst add_hyps) t + val hyps' = fold (AList.update (op =) o apsnd (insert (op =) name)) add_hyps hyps + in + (name, role, t', rule, deps') :: inline_z3_hypotheses hyp_names hyps' lines + end + end + +fun dest_alls (Const (@{const_name Pure.all}, _) $ Abs (abs as (_, T, _))) = + let val (s', t') = Term.dest_abs abs in + dest_alls t' |>> cons (s', T) + end + | dest_alls t = ([], t) + +val reorder_foralls = + dest_alls + #>> sort_by fst + #-> fold_rev (Logic.all o Free); + +fun atp_proof_of_z3_proof ctxt ll_defs rewrite_rules hyp_ts concl_t fact_helper_ts prem_ids + conjecture_id fact_helper_ids proof = + let + fun steps_of (Z3_Proof.Z3_Step {id, rule, prems, concl, ...}) = + let + val sid = string_of_int id + + val concl' = concl + |> reorder_foralls (* crucial for skolemization steps *) + |> postprocess_step_conclusion ctxt rewrite_rules ll_defs + fun standard_step role = + ((sid, []), role, concl', Z3_Proof.string_of_rule rule, + map (fn id => (string_of_int id, [])) prems) + in + (case rule of + Z3_Proof.Asserted => + let val ss = the_list (AList.lookup (op =) fact_helper_ids id) in + (case distinguish_conjecture_and_hypothesis ss id conjecture_id prem_ids fact_helper_ts + hyp_ts concl_t of + NONE => [] + | SOME (role0, concl00) => + let + val name0 = (sid ^ "a", ss) + val concl0 = unskolemize_names ctxt concl00 + in + (if role0 = Axiom then [] + else [(name0, role0, concl0, Z3_Proof.string_of_rule rule, [])]) @ + [((sid, []), Plain, concl', Z3_Proof.string_of_rule Z3_Proof.Rewrite, + name0 :: normalizing_prems ctxt concl0)] + end) + end + | Z3_Proof.Rewrite => [standard_step Lemma] + | Z3_Proof.Rewrite_Star => [standard_step Lemma] + | Z3_Proof.Skolemize => [standard_step Lemma] + | Z3_Proof.Th_Lemma _ => [standard_step Lemma] + | _ => [standard_step Plain]) + end + in + proof + |> maps steps_of + |> inline_z3_defs [] + |> inline_z3_hypotheses [] [] + end + +end; diff --git a/src/test-gen/SMT/z3_proof.ML b/src/test-gen/SMT/z3_proof.ML new file mode 100644 index 0000000..2c3ab4e --- /dev/null +++ b/src/test-gen/SMT/z3_proof.ML @@ -0,0 +1,303 @@ +(* Title: HOL/Tools/SMT/z3_proof.ML + Author: Sascha Boehme, TU Muenchen + +Z3 proofs: parsing and abstract syntax tree. +*) + +signature Z3_PROOF = +sig + (*proof rules*) + datatype z3_rule = + True_Axiom | Asserted | Goal | Modus_Ponens | Reflexivity | Symmetry | Transitivity | + Transitivity_Star | Monotonicity | Quant_Intro | Distributivity | And_Elim | Not_Or_Elim | + Rewrite | Rewrite_Star | Pull_Quant | Pull_Quant_Star | Push_Quant | Elim_Unused_Vars | + Dest_Eq_Res | Quant_Inst | Hypothesis | Lemma | Unit_Resolution | Iff_True | Iff_False | + Commutativity | Def_Axiom | Intro_Def | Apply_Def | Iff_Oeq | Nnf_Pos | Nnf_Neg | Nnf_Star | + Cnf_Star | Skolemize | Modus_Ponens_Oeq | Th_Lemma of string + + val is_assumption: z3_rule -> bool + val string_of_rule: z3_rule -> string + + (*proofs*) + datatype z3_step = Z3_Step of { + id: int, + rule: z3_rule, + prems: int list, + concl: term, + fixes: string list, + is_fix_step: bool} + + (*proof parser*) + val parse: typ Symtab.table -> term Symtab.table -> string list -> + Proof.context -> z3_step list * Proof.context +end; + +structure Z3_Proof: Z3_PROOF = +struct + +open SMTLIB_Proof + + +(* proof rules *) + +datatype z3_rule = + True_Axiom | Asserted | Goal | Modus_Ponens | Reflexivity | Symmetry | Transitivity | + Transitivity_Star | Monotonicity | Quant_Intro | Distributivity | And_Elim | Not_Or_Elim | + Rewrite | Rewrite_Star | Pull_Quant | Pull_Quant_Star | Push_Quant | Elim_Unused_Vars | + Dest_Eq_Res | Quant_Inst | Hypothesis | Lemma | Unit_Resolution | Iff_True | Iff_False | + Commutativity | Def_Axiom | Intro_Def | Apply_Def | Iff_Oeq | Nnf_Pos | Nnf_Neg | Nnf_Star | + Cnf_Star | Skolemize | Modus_Ponens_Oeq | Th_Lemma of string + (* some proof rules include further information that is currently dropped by the parser *) + +val rule_names = Symtab.make [ + ("true-axiom", True_Axiom), + ("asserted", Asserted), + ("goal", Goal), + ("mp", Modus_Ponens), + ("refl", Reflexivity), + ("symm", Symmetry), + ("trans", Transitivity), + ("trans*", Transitivity_Star), + ("monotonicity", Monotonicity), + ("quant-intro", Quant_Intro), + ("distributivity", Distributivity), + ("and-elim", And_Elim), + ("not-or-elim", Not_Or_Elim), + ("rewrite", Rewrite), + ("rewrite*", Rewrite_Star), + ("pull-quant", Pull_Quant), + ("pull-quant*", Pull_Quant_Star), + ("push-quant", Push_Quant), + ("elim-unused", Elim_Unused_Vars), + ("der", Dest_Eq_Res), + ("quant-inst", Quant_Inst), + ("hypothesis", Hypothesis), + ("lemma", Lemma), + ("unit-resolution", Unit_Resolution), + ("iff-true", Iff_True), + ("iff-false", Iff_False), + ("commutativity", Commutativity), + ("def-axiom", Def_Axiom), + ("intro-def", Intro_Def), + ("apply-def", Apply_Def), + ("iff~", Iff_Oeq), + ("nnf-pos", Nnf_Pos), + ("nnf-neg", Nnf_Neg), + ("nnf*", Nnf_Star), + ("cnf*", Cnf_Star), + ("sk", Skolemize), + ("mp~", Modus_Ponens_Oeq)] + +fun is_assumption Asserted = true + | is_assumption Goal = true + | is_assumption Hypothesis = true + | is_assumption Intro_Def = true + | is_assumption Skolemize = true + | is_assumption _ = false + +fun rule_of_string name = + (case Symtab.lookup rule_names name of + SOME rule => rule + | NONE => error ("unknown Z3 proof rule " ^ quote name)) + +fun string_of_rule (Th_Lemma kind) = "th-lemma" ^ (if kind = "" then "" else " " ^ kind) + | string_of_rule r = + let fun eq_rule (s, r') = if r = r' then SOME s else NONE + in the (Symtab.get_first eq_rule rule_names) end + + +(* proofs *) + +datatype z3_node = Z3_Node of { + id: int, + rule: z3_rule, + prems: z3_node list, + concl: term, + bounds: string list} + +fun mk_node id rule prems concl bounds = + Z3_Node {id = id, rule = rule, prems = prems, concl = concl, bounds = bounds} + +datatype z3_step = Z3_Step of { + id: int, + rule: z3_rule, + prems: int list, + concl: term, + fixes: string list, + is_fix_step: bool} + +fun mk_step id rule prems concl fixes is_fix_step = + Z3_Step {id = id, rule = rule, prems = prems, concl = concl, fixes = fixes, + is_fix_step = is_fix_step} + + +(* proof parser *) + +fun rule_of (SMTLIB.Sym name) = rule_of_string name + | rule_of (SMTLIB.S (SMTLIB.Sym "_" :: SMTLIB.Sym name :: args)) = + (case (name, args) of + ("th-lemma", SMTLIB.Sym kind :: _) => Th_Lemma kind + | _ => rule_of_string name) + | rule_of r = raise SMTLIB_PARSE ("bad Z3 proof rule format", r) + +fun node_of p cx = + (case p of + SMTLIB.Sym name => + (case lookup_binding cx name of + Proof node => (node, cx) + | Tree p' => + cx + |> node_of p' + |-> (fn node => pair node o update_binding (name, Proof node)) + | _ => raise SMTLIB_PARSE ("bad Z3 proof format", p)) + | SMTLIB.S [SMTLIB.Sym "let", SMTLIB.S bindings, p] => + with_bindings (map dest_binding bindings) (node_of p) cx + | SMTLIB.S (name :: parts) => + let + val (ps, p) = split_last parts + val r = rule_of name + in + cx + |> fold_map node_of ps + ||>> `(with_fresh_names (term_of p)) + ||>> next_id + |>> (fn ((prems, (t, ns)), id) => mk_node id r prems t ns) + end + | _ => raise SMTLIB_PARSE ("bad Z3 proof format", p)) + +fun dest_name (SMTLIB.Sym name) = name + | dest_name t = raise SMTLIB_PARSE ("bad name", t) + +fun dest_seq (SMTLIB.S ts) = ts + | dest_seq t = raise SMTLIB_PARSE ("bad Z3 proof format", t) + +fun parse' (SMTLIB.S (SMTLIB.Sym "set-logic" :: _) :: ts) cx = parse' ts cx + | parse' (SMTLIB.S [SMTLIB.Sym "declare-fun", n, tys, ty] :: ts) cx = + let + val name = dest_name n + val Ts = map (type_of cx) (dest_seq tys) + val T = type_of cx ty + in parse' ts (declare_fun name (Ts ---> T) cx) end + | parse' (SMTLIB.S [SMTLIB.Sym "proof", p] :: _) cx = node_of p cx + | parse' ts _ = raise SMTLIB_PARSE ("bad Z3 proof declarations", SMTLIB.S ts) + +fun parse_proof typs funs lines ctxt = + let + val ts = dest_seq (SMTLIB.parse lines) + val (node, cx) = parse' ts (empty_context ctxt typs funs) + in (node, ctxt_of cx) end + handle SMTLIB.PARSE (l, msg) => error ("parsing error at line " ^ string_of_int l ^ ": " ^ msg) + | SMTLIB_PARSE (msg, t) => error (msg ^ ": " ^ SMTLIB.str_of t) + + +(* handling of bound variables *) + +fun subst_of tyenv = + let fun add (ix, (S, T)) = cons (TVar (ix, S), T) + in Vartab.fold add tyenv [] end + +fun substTs_same subst = + let val applyT = Same.function (AList.lookup (op =) subst) + in Term_Subst.map_atypsT_same applyT end + +fun subst_types ctxt env bounds t = + let + val match = Sign.typ_match (Proof_Context.theory_of ctxt) + + val t' = singleton (Variable.polymorphic ctxt) t + val patTs = map snd (Term.strip_qnt_vars @{const_name Pure.all} t') + val objTs = map (the o Symtab.lookup env) bounds + val subst = subst_of (fold match (patTs ~~ objTs) Vartab.empty) + in Same.commit (Term_Subst.map_types_same (substTs_same subst)) t' end + +fun eq_quant (@{const_name HOL.All}, _) (@{const_name HOL.All}, _) = true + | eq_quant (@{const_name HOL.Ex}, _) (@{const_name HOL.Ex}, _) = true + | eq_quant _ _ = false + +fun opp_quant (@{const_name HOL.All}, _) (@{const_name HOL.Ex}, _) = true + | opp_quant (@{const_name HOL.Ex}, _) (@{const_name HOL.All}, _) = true + | opp_quant _ _ = false + +fun with_quant pred i (Const q1 $ Abs (_, T1, t1), Const q2 $ Abs (_, T2, t2)) = + if pred q1 q2 andalso T1 = T2 then + let val t = Var (("", i), T1) + in SOME (apply2 Term.subst_bound ((t, t1), (t, t2))) end + else NONE + | with_quant _ _ _ = NONE + +fun dest_quant_pair i (@{term HOL.Not} $ t1, t2) = + Option.map (apfst HOLogic.mk_not) (with_quant opp_quant i (t1, t2)) + | dest_quant_pair i (t1, t2) = with_quant eq_quant i (t1, t2) + +fun dest_quant i t = + (case dest_quant_pair i (HOLogic.dest_eq (HOLogic.dest_Trueprop t)) of + SOME (t1, t2) => HOLogic.mk_Trueprop (HOLogic.mk_eq (t1, t2)) + | NONE => raise TERM ("lift_quant", [t])) + +fun match_types ctxt pat obj = + (Vartab.empty, Vartab.empty) + |> Pattern.first_order_match (Proof_Context.theory_of ctxt) (pat, obj) + +fun strip_match ctxt pat i obj = + (case try (match_types ctxt pat) obj of + SOME (tyenv, _) => subst_of tyenv + | NONE => strip_match ctxt pat (i + 1) (dest_quant i obj)) + +fun dest_all i (Const (@{const_name Pure.all}, _) $ (a as Abs (_, T, _))) = + dest_all (i + 1) (Term.betapply (a, Var (("", i), T))) + | dest_all i t = (i, t) + +fun dest_alls t = dest_all (Term.maxidx_of_term t + 1) t + +fun match_rule ctxt env (Z3_Node {bounds = bs', concl = t', ...}) bs t = + let + val t'' = singleton (Variable.polymorphic ctxt) t' + val (i, obj) = dest_alls (subst_types ctxt env bs t) + in + (case try (strip_match ctxt (snd (dest_alls t'')) i) obj of + NONE => NONE + | SOME subst => + let + val applyT = Same.commit (substTs_same subst) + val patTs = map snd (Term.strip_qnt_vars @{const_name Pure.all} t'') + in SOME (Symtab.make (bs' ~~ map applyT patTs)) end) + end + + +(* linearizing proofs and resolving types of bound variables *) + +fun has_step (tab, _) = Inttab.defined tab + +fun add_step id rule bounds concl is_fix_step ids (tab, sts) = + let val step = mk_step id rule ids concl bounds is_fix_step + in (id, (Inttab.update (id, ()) tab, step :: sts)) end + +fun is_fix_rule rule prems = + member (op =) [Quant_Intro, Nnf_Pos, Nnf_Neg] rule andalso length prems = 1 + +fun lin_proof ctxt env (Z3_Node {id, rule, prems, concl, bounds}) steps = + if has_step steps id then (id, steps) + else + let + val t = subst_types ctxt env bounds concl + val add = add_step id rule bounds t + fun rec_apply e b = fold_map (lin_proof ctxt e) prems #-> add b + in + if is_fix_rule rule prems then + (case match_rule ctxt env (hd prems) bounds t of + NONE => rec_apply env false steps + | SOME env' => rec_apply env' true steps) + else rec_apply env false steps + end + +fun linearize ctxt node = + rev (snd (snd (lin_proof ctxt Symtab.empty node (Inttab.empty, [])))) + + +(* overall proof parser *) + +fun parse typs funs lines ctxt = + let val (node, ctxt') = parse_proof typs funs lines ctxt + in (linearize ctxt' node, ctxt') end + +end; diff --git a/src/test-gen/SMT/z3_real.ML b/src/test-gen/SMT/z3_real.ML new file mode 100644 index 0000000..15ef469 --- /dev/null +++ b/src/test-gen/SMT/z3_real.ML @@ -0,0 +1,32 @@ +(* Title: HOL/Tools/SMT/z3_real.ML + Author: Sascha Boehme, TU Muenchen + +Z3 setup for reals. +*) + +structure Z3_Real: sig end = +struct + +fun real_type_parser (SMTLIB.Sym "Real", []) = SOME @{typ Real.real} + | real_type_parser _ = NONE + +fun real_term_parser (SMTLIB.Dec (i, 0), []) = SOME (HOLogic.mk_number @{typ Real.real} i) + | real_term_parser (SMTLIB.Sym "/", [t1, t2]) = + SOME (@{term "Rings.divide :: real => _"} $ t1 $ t2) + | real_term_parser (SMTLIB.Sym "to_real", [t]) = SOME (@{term "Int.of_int :: int => _"} $ t) + | real_term_parser _ = NONE + +fun abstract abs t = + (case t of + (c as @{term "Rings.divide :: real => _"}) $ t1 $ t2 => + abs t1 ##>> abs t2 #>> (fn (u1, u2) => SOME (c $ u1 $ u2)) + | (c as @{term "Int.of_int :: int => _"}) $ t => + abs t #>> (fn u => SOME (c $ u)) + | _ => pair NONE) + +val _ = Theory.setup (Context.theory_map ( + SMTLIB_Proof.add_type_parser real_type_parser #> + SMTLIB_Proof.add_term_parser real_term_parser #> + Z3_Replay_Methods.add_arith_abstracter abstract)) + +end; diff --git a/src/test-gen/SMT/z3_replay.ML b/src/test-gen/SMT/z3_replay.ML new file mode 100644 index 0000000..b9ecce8 --- /dev/null +++ b/src/test-gen/SMT/z3_replay.ML @@ -0,0 +1,262 @@ +(* Title: HOL/Tools/SMT/z3_replay.ML + Author: Sascha Boehme, TU Muenchen + Author: Jasmin Blanchette, TU Muenchen + +Z3 proof parsing and replay. +*) + +signature Z3_REPLAY = +sig + val parse_proof: Proof.context -> SMT_Translate.replay_data -> + ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> + SMT_Solver.parsed_proof + val replay: Proof.context -> SMT_Translate.replay_data -> string list -> thm +end; + +structure Z3_Replay: Z3_REPLAY = +struct + +fun params_of t = Term.strip_qnt_vars @{const_name Pure.all} t + +fun varify ctxt thm = + let + val maxidx = Thm.maxidx_of thm + 1 + val vs = params_of (Thm.prop_of thm) + val vars = map_index (fn (i, (n, T)) => Var ((n, i + maxidx), T)) vs + in Drule.forall_elim_list (map (Thm.cterm_of ctxt) vars) thm end + +fun add_paramTs names t = + fold2 (fn n => fn (_, T) => AList.update (op =) (n, T)) names (params_of t) + +fun new_fixes ctxt nTs = + let + val (ns, ctxt') = Variable.variant_fixes (replicate (length nTs) "") ctxt + fun mk (n, T) n' = (n, Thm.cterm_of ctxt' (Free (n', T))) + in (ctxt', Symtab.make (map2 mk nTs ns)) end + +fun forall_elim_term ct (Const (@{const_name Pure.all}, _) $ (a as Abs _)) = + Term.betapply (a, Thm.term_of ct) + | forall_elim_term _ qt = raise TERM ("forall_elim'", [qt]) + +fun apply_fixes elim env = fold (elim o the o Symtab.lookup env) + +val apply_fixes_prem = uncurry o apply_fixes Thm.forall_elim +val apply_fixes_concl = apply_fixes forall_elim_term + +fun export_fixes env names = Drule.forall_intr_list (map (the o Symtab.lookup env) names) + +fun under_fixes f ctxt (prems, nthms) names concl = + let + val thms1 = map (varify ctxt) prems + val (ctxt', env) = + add_paramTs names concl [] + |> fold (uncurry add_paramTs o apsnd Thm.prop_of) nthms + |> new_fixes ctxt + val thms2 = map (apply_fixes_prem env) nthms + val t = apply_fixes_concl env names concl + in export_fixes env names (f ctxt' (thms1 @ thms2) t) end + +fun replay_thm ctxt assumed nthms (Z3_Proof.Z3_Step {id, rule, concl, fixes, is_fix_step, ...}) = + if Z3_Proof.is_assumption rule then + (case Inttab.lookup assumed id of + SOME (_, thm) => thm + | NONE => Thm.assume (Thm.cterm_of ctxt concl)) + else + under_fixes (Z3_Replay_Methods.method_for rule) ctxt + (if is_fix_step then (map snd nthms, []) else ([], nthms)) fixes concl + +fun replay_step ctxt assumed (step as Z3_Proof.Z3_Step {id, rule, prems, fixes, ...}) state = + let + val (proofs, stats) = state + val nthms = map (the o Inttab.lookup proofs) prems + val replay = Timing.timing (replay_thm ctxt assumed nthms) + val ({elapsed, ...}, thm) = + SMT_Config.with_time_limit ctxt SMT_Config.reconstruction_step_timeout replay step + handle Timeout.TIMEOUT _ => raise SMT_Failure.SMT SMT_Failure.Time_Out + val stats' = Symtab.cons_list (Z3_Proof.string_of_rule rule, Time.toMilliseconds elapsed) stats + in (Inttab.update (id, (fixes, thm)) proofs, stats') end + +local + val remove_trigger = mk_meta_eq @{thm trigger_def} + val remove_fun_app = mk_meta_eq @{thm fun_app_def} + + fun rewrite_conv _ [] = Conv.all_conv + | rewrite_conv ctxt eqs = Simplifier.full_rewrite (empty_simpset ctxt addsimps eqs) + + val rewrite_true_rule = @{lemma "True == ~ False" by simp} + val prep_rules = [@{thm Let_def}, remove_trigger, remove_fun_app, rewrite_true_rule] + + fun rewrite _ [] = I + | rewrite ctxt eqs = Conv.fconv_rule (rewrite_conv ctxt eqs) + + fun lookup_assm assms_net ct = + Z3_Replay_Util.net_instances assms_net ct + |> map (fn ithm as (_, thm) => (ithm, Thm.cprop_of thm aconvc ct)) +in + +fun add_asserted outer_ctxt rewrite_rules assms steps ctxt = + let + val eqs = map (rewrite ctxt [rewrite_true_rule]) rewrite_rules + val eqs' = union Thm.eq_thm eqs prep_rules + + val assms_net = + assms + |> map (apsnd (rewrite ctxt eqs')) + |> map (apsnd (Conv.fconv_rule Thm.eta_conversion)) + |> Z3_Replay_Util.thm_net_of snd + + fun revert_conv ctxt = rewrite_conv ctxt eqs' then_conv Thm.eta_conversion + + fun assume thm ctxt = + let + val ct = Thm.cprem_of thm 1 + val (thm', ctxt') = yield_singleton Assumption.add_assumes ct ctxt + in (thm' RS thm, ctxt') end + + fun add1 id fixes thm1 ((i, th), exact) ((iidths, thms), (ctxt, ptab)) = + let + val (thm, ctxt') = if exact then (Thm.implies_elim thm1 th, ctxt) else assume thm1 ctxt + val thms' = if exact then thms else th :: thms + in (((i, (id, th)) :: iidths, thms'), (ctxt', Inttab.update (id, (fixes, thm)) ptab)) end + + fun add (Z3_Proof.Z3_Step {id, rule, concl, fixes, ...}) + (cx as ((iidths, thms), (ctxt, ptab))) = + if Z3_Proof.is_assumption rule andalso rule <> Z3_Proof.Hypothesis then + let + val ct = Thm.cterm_of ctxt concl + val thm1 = Thm.trivial ct |> Conv.fconv_rule (Conv.arg1_conv (revert_conv outer_ctxt)) + val thm2 = singleton (Variable.export ctxt outer_ctxt) thm1 + in + (case lookup_assm assms_net (Thm.cprem_of thm2 1) of + [] => + let val (thm, ctxt') = assume thm1 ctxt + in ((iidths, thms), (ctxt', Inttab.update (id, (fixes, thm)) ptab)) end + | ithms => fold (add1 id fixes thm1) ithms cx) + end + else + cx + in fold add steps (([], []), (ctxt, Inttab.empty)) end + +end + +(* |- (EX x. P x) = P c |- ~ (ALL x. P x) = ~ P c *) +local + val sk_rules = @{lemma + "c = (SOME x. P x) ==> (EX x. P x) = P c" + "c = (SOME x. ~ P x) ==> (~ (ALL x. P x)) = (~ P c)" + by (metis someI_ex)+} +in + +fun discharge_sk_tac ctxt i st = + (resolve_tac ctxt @{thms trans} i + THEN resolve_tac ctxt sk_rules i + THEN (resolve_tac ctxt @{thms refl} ORELSE' discharge_sk_tac ctxt) (i+1) + THEN resolve_tac ctxt @{thms refl} i) st + +end + +val true_thm = @{lemma "~False" by simp} +fun make_discharge_rules rules = rules @ [@{thm allI}, @{thm refl}, @{thm reflexive}, true_thm] + +val intro_def_rules = @{lemma + "(~ P | P) & (P | ~ P)" + "(P | ~ P) & (~ P | P)" + by fast+} + +fun discharge_assms_tac ctxt rules = + REPEAT + (HEADGOAL (resolve_tac ctxt (intro_def_rules @ rules) ORELSE' + SOLVED' (discharge_sk_tac ctxt))) + +fun discharge_assms ctxt rules thm = + (if Thm.nprems_of thm = 0 then + thm + else + (case Seq.pull (discharge_assms_tac ctxt rules thm) of + SOME (thm', _) => thm' + | NONE => raise THM ("failed to discharge premise", 1, [thm]))) + |> Goal.norm_result ctxt + +fun discharge rules outer_ctxt inner_ctxt = + singleton (Proof_Context.export inner_ctxt outer_ctxt) + #> discharge_assms outer_ctxt (make_discharge_rules rules) + +fun parse_proof outer_ctxt + ({context = ctxt, typs, terms, ll_defs, rewrite_rules, assms} : SMT_Translate.replay_data) + xfacts prems concl output = + let + val (steps, ctxt2) = Z3_Proof.parse typs terms output ctxt + val ((iidths, _), _) = add_asserted outer_ctxt rewrite_rules assms steps ctxt2 + + fun id_of_index i = the_default ~1 (Option.map fst (AList.lookup (op =) iidths i)) + + val conjecture_i = 0 + val prems_i = 1 + val facts_i = prems_i + length prems + + val conjecture_id = id_of_index conjecture_i + val prem_ids = map id_of_index (prems_i upto facts_i - 1) + val fact_ids' = + map_filter (fn (i, (id, _)) => try (apsnd (nth xfacts)) (id, i - facts_i)) iidths + val helper_ids' = map_filter (try (fn (~1, idth) => idth)) iidths + + val fact_helper_ts = + map (fn (_, th) => (ATP_Util.short_thm_name ctxt th, Thm.prop_of th)) helper_ids' @ + map (fn (_, ((s, _), th)) => (s, Thm.prop_of th)) fact_ids' + val fact_helper_ids' = + map (apsnd (ATP_Util.short_thm_name ctxt)) helper_ids' @ map (apsnd (fst o fst)) fact_ids' + in + {outcome = NONE, fact_ids = SOME fact_ids', + atp_proof = fn () => Z3_Isar.atp_proof_of_z3_proof ctxt ll_defs rewrite_rules prems concl + fact_helper_ts prem_ids conjecture_id fact_helper_ids' steps} + end + +fun intermediate_statistics ctxt start total = + SMT_Config.statistics_msg ctxt (fn current => + "Reconstructed " ^ string_of_int current ^ " of " ^ string_of_int total ^ " steps in " ^ + string_of_int (Time.toMilliseconds (#elapsed (Timing.result start))) ^ " ms") + +fun pretty_statistics total stats = + let + fun mean_of is = + let + val len = length is + val mid = len div 2 + in if len mod 2 = 0 then (nth is (mid - 1) + nth is mid) div 2 else nth is mid end + fun pretty_item name p = Pretty.item (Pretty.separate ":" [Pretty.str name, p]) + fun pretty (name, milliseconds) = pretty_item name (Pretty.block (Pretty.separate "," [ + Pretty.str (string_of_int (length milliseconds) ^ " occurrences") , + Pretty.str (string_of_int (mean_of milliseconds) ^ " ms mean time"), + Pretty.str (string_of_int (fold Integer.max milliseconds 0) ^ " ms maximum time"), + Pretty.str (string_of_int (fold Integer.add milliseconds 0) ^ " ms total time")])) + in + Pretty.big_list "Z3 proof reconstruction statistics:" ( + pretty_item "total time" (Pretty.str (string_of_int total ^ " ms")) :: + map pretty (Symtab.dest stats)) + end + +fun replay outer_ctxt + ({context = ctxt, typs, terms, rewrite_rules, assms, ...} : SMT_Translate.replay_data) output = + let + val (steps, ctxt2) = Z3_Proof.parse typs terms output ctxt + val ((_, rules), (ctxt3, assumed)) = add_asserted outer_ctxt rewrite_rules assms steps ctxt2 + val ctxt4 = + ctxt3 + |> put_simpset (Z3_Replay_Util.make_simpset ctxt3 []) + |> Config.put SAT.solver (Config.get ctxt3 SMT_Config.sat_solver) + val len = length steps + val start = Timing.start () + val print_runtime_statistics = intermediate_statistics ctxt4 start len + fun blockwise f (i, x) y = + (if i > 0 andalso i mod 100 = 0 then print_runtime_statistics i else (); f x y) + val (proofs, stats) = + fold_index (blockwise (replay_step ctxt4 assumed)) steps (assumed, Symtab.empty) + val _ = print_runtime_statistics len + val total = Time.toMilliseconds (#elapsed (Timing.result start)) + val (_, Z3_Proof.Z3_Step {id, ...}) = split_last steps + val _ = SMT_Config.statistics_msg ctxt4 (Pretty.string_of o pretty_statistics total) stats + in + Inttab.lookup proofs id |> the |> snd |> discharge rules outer_ctxt ctxt4 + end + +end; diff --git a/src/test-gen/SMT/z3_replay_methods.ML b/src/test-gen/SMT/z3_replay_methods.ML new file mode 100644 index 0000000..e4fbb73 --- /dev/null +++ b/src/test-gen/SMT/z3_replay_methods.ML @@ -0,0 +1,685 @@ +(* Title: HOL/Tools/SMT/z3_replay_methods.ML + Author: Sascha Boehme, TU Muenchen + Author: Jasmin Blanchette, TU Muenchen + +Proof methods for replaying Z3 proofs. +*) + +signature Z3_REPLAY_METHODS = +sig + (*abstraction*) + type abs_context = int * term Termtab.table + type 'a abstracter = term -> abs_context -> 'a * abs_context + val add_arith_abstracter: (term abstracter -> term option abstracter) -> + Context.generic -> Context.generic + + (*theory lemma methods*) + type th_lemma_method = Proof.context -> thm list -> term -> thm + val add_th_lemma_method: string * th_lemma_method -> Context.generic -> + Context.generic + + (*methods for Z3 proof rules*) + type z3_method = Proof.context -> thm list -> term -> thm + val true_axiom: z3_method + val mp: z3_method + val refl: z3_method + val symm: z3_method + val trans: z3_method + val cong: z3_method + val quant_intro: z3_method + val distrib: z3_method + val and_elim: z3_method + val not_or_elim: z3_method + val rewrite: z3_method + val rewrite_star: z3_method + val pull_quant: z3_method + val push_quant: z3_method + val elim_unused: z3_method + val dest_eq_res: z3_method + val quant_inst: z3_method + val lemma: z3_method + val unit_res: z3_method + val iff_true: z3_method + val iff_false: z3_method + val comm: z3_method + val def_axiom: z3_method + val apply_def: z3_method + val iff_oeq: z3_method + val nnf_pos: z3_method + val nnf_neg: z3_method + val mp_oeq: z3_method + val th_lemma: string -> z3_method + val method_for: Z3_Proof.z3_rule -> z3_method +end; + +structure Z3_Replay_Methods: Z3_REPLAY_METHODS = +struct + +type z3_method = Proof.context -> thm list -> term -> thm + + +(* utility functions *) + +fun trace ctxt f = SMT_Config.trace_msg ctxt f () + +fun pretty_thm ctxt thm = Syntax.pretty_term ctxt (Thm.concl_of thm) + +fun pretty_goal ctxt msg rule thms t = + let + val full_msg = msg ^ ": " ^ quote (Z3_Proof.string_of_rule rule) + val assms = + if null thms then [] + else [Pretty.big_list "assumptions:" (map (pretty_thm ctxt) thms)] + val concl = Pretty.big_list "proposition:" [Syntax.pretty_term ctxt t] + in Pretty.big_list full_msg (assms @ [concl]) end + +fun replay_error ctxt msg rule thms t = error (Pretty.string_of (pretty_goal ctxt msg rule thms t)) + +fun replay_rule_error ctxt = replay_error ctxt "Failed to replay Z3 proof step" + +fun trace_goal ctxt rule thms t = + trace ctxt (fn () => Pretty.string_of (pretty_goal ctxt "Goal" rule thms t)) + +fun as_prop (t as Const (@{const_name Trueprop}, _) $ _) = t + | as_prop t = HOLogic.mk_Trueprop t + +fun dest_prop (Const (@{const_name Trueprop}, _) $ t) = t + | dest_prop t = t + +fun dest_thm thm = dest_prop (Thm.concl_of thm) + +fun certify_prop ctxt t = Thm.cterm_of ctxt (as_prop t) + +fun try_provers ctxt rule [] thms t = replay_rule_error ctxt rule thms t + | try_provers ctxt rule ((name, prover) :: named_provers) thms t = + (case (trace ctxt (K ("Trying prover " ^ quote name)); try prover t) of + SOME thm => thm + | NONE => try_provers ctxt rule named_provers thms t) + +fun match ctxt pat t = + (Vartab.empty, Vartab.empty) + |> Pattern.first_order_match (Proof_Context.theory_of ctxt) (pat, t) + +fun gen_certify_inst sel cert ctxt thm t = + let + val inst = match ctxt (dest_thm thm) (dest_prop t) + fun cert_inst (ix, (a, b)) = ((ix, a), cert b) + in Vartab.fold (cons o cert_inst) (sel inst) [] end + +fun match_instantiateT ctxt t thm = + if Term.exists_type (Term.exists_subtype Term.is_TVar) (dest_thm thm) then + Thm.instantiate (gen_certify_inst fst (Thm.ctyp_of ctxt) ctxt thm t, []) thm + else thm + +fun match_instantiate ctxt t thm = + let val thm' = match_instantiateT ctxt t thm in + Thm.instantiate ([], gen_certify_inst snd (Thm.cterm_of ctxt) ctxt thm' t) thm' + end + +fun apply_rule ctxt t = + (case Z3_Replay_Rules.apply ctxt (certify_prop ctxt t) of + SOME thm => thm + | NONE => raise Fail "apply_rule") + +fun discharge _ [] thm = thm + | discharge i (rule :: rules) thm = discharge (i + Thm.nprems_of rule) rules (rule RSN (i, thm)) + +fun by_tac ctxt thms ns ts t tac = + Goal.prove ctxt [] (map as_prop ts) (as_prop t) + (fn {context, prems} => HEADGOAL (tac context prems)) + |> Drule.generalize ([], ns) + |> discharge 1 thms + +fun prove ctxt t tac = by_tac ctxt [] [] [] t (K o tac) + +fun prop_tac ctxt prems = + Method.insert_tac ctxt prems + THEN' SUBGOAL (fn (prop, i) => + if Term.size_of_term prop > 100 then SAT.satx_tac ctxt i + else (Classical.fast_tac ctxt ORELSE' Clasimp.force_tac ctxt) i) + +fun quant_tac ctxt = Blast.blast_tac ctxt + + +(* plug-ins *) + +type abs_context = int * term Termtab.table + +type 'a abstracter = term -> abs_context -> 'a * abs_context + +type th_lemma_method = Proof.context -> thm list -> term -> thm + +fun id_ord ((id1, _), (id2, _)) = int_ord (id1, id2) + +structure Plugins = Generic_Data +( + type T = + (int * (term abstracter -> term option abstracter)) list * + th_lemma_method Symtab.table + val empty = ([], Symtab.empty) + val extend = I + fun merge ((abss1, ths1), (abss2, ths2)) = ( + Ord_List.merge id_ord (abss1, abss2), + Symtab.merge (K true) (ths1, ths2)) +) + +fun add_arith_abstracter abs = Plugins.map (apfst (Ord_List.insert id_ord (serial (), abs))) +fun get_arith_abstracters ctxt = map snd (fst (Plugins.get (Context.Proof ctxt))) + +fun add_th_lemma_method method = Plugins.map (apsnd (Symtab.update_new method)) +fun get_th_lemma_method ctxt = snd (Plugins.get (Context.Proof ctxt)) + + +(* abstraction *) + +fun prove_abstract ctxt thms t tac f = + let + val ((prems, concl), (_, ts)) = f (1, Termtab.empty) + val ns = Termtab.fold (fn (_, v) => cons (fst (Term.dest_Free v))) ts [] + in + by_tac ctxt [] ns prems concl tac + |> match_instantiate ctxt t + |> discharge 1 thms + end + +fun prove_abstract' ctxt t tac f = + prove_abstract ctxt [] t tac (f #>> pair []) + +fun lookup_term (_, terms) t = Termtab.lookup terms t + +fun abstract_sub t f cx = + (case lookup_term cx t of + SOME v => (v, cx) + | NONE => f cx) + +fun mk_fresh_free t (i, terms) = + let val v = Free ("t" ^ string_of_int i, fastype_of t) + in (v, (i + 1, Termtab.update (t, v) terms)) end + +fun apply_abstracters _ [] _ cx = (NONE, cx) + | apply_abstracters abs (abstracter :: abstracters) t cx = + (case abstracter abs t cx of + (NONE, _) => apply_abstracters abs abstracters t cx + | x as (SOME _, _) => x) + +fun abstract_term (t as _ $ _) = abstract_sub t (mk_fresh_free t) + | abstract_term (t as Abs _) = abstract_sub t (mk_fresh_free t) + | abstract_term t = pair t + +fun abstract_bin abs f t t1 t2 = abstract_sub t (abs t1 ##>> abs t2 #>> f) + +fun abstract_ter abs f t t1 t2 t3 = + abstract_sub t (abs t1 ##>> abs t2 ##>> abs t3 #>> (Scan.triple1 #> f)) + +fun abstract_lit (@{const HOL.Not} $ t) = abstract_term t #>> HOLogic.mk_not + | abstract_lit t = abstract_term t + +fun abstract_not abs (t as @{const HOL.Not} $ t1) = + abstract_sub t (abs t1 #>> HOLogic.mk_not) + | abstract_not _ t = abstract_lit t + +fun abstract_conj (t as @{const HOL.conj} $ t1 $ t2) = + abstract_bin abstract_conj HOLogic.mk_conj t t1 t2 + | abstract_conj t = abstract_lit t + +fun abstract_disj (t as @{const HOL.disj} $ t1 $ t2) = + abstract_bin abstract_disj HOLogic.mk_disj t t1 t2 + | abstract_disj t = abstract_lit t + +fun abstract_prop (t as (c as @{const If (bool)}) $ t1 $ t2 $ t3) = + abstract_ter abstract_prop (fn (t1, t2, t3) => c $ t1 $ t2 $ t3) t t1 t2 t3 + | abstract_prop (t as @{const HOL.disj} $ t1 $ t2) = + abstract_bin abstract_prop HOLogic.mk_disj t t1 t2 + | abstract_prop (t as @{const HOL.conj} $ t1 $ t2) = + abstract_bin abstract_prop HOLogic.mk_conj t t1 t2 + | abstract_prop (t as @{const HOL.implies} $ t1 $ t2) = + abstract_bin abstract_prop HOLogic.mk_imp t t1 t2 + | abstract_prop (t as @{term "HOL.eq :: bool => _"} $ t1 $ t2) = + abstract_bin abstract_prop HOLogic.mk_eq t t1 t2 + | abstract_prop t = abstract_not abstract_prop t + +fun abstract_arith ctxt u = + let + fun abs (t as (c as Const _) $ Abs (s, T, t')) = + abstract_sub t (abs t' #>> (fn u' => c $ Abs (s, T, u'))) + | abs (t as (c as Const (@{const_name If}, _)) $ t1 $ t2 $ t3) = + abstract_ter abs (fn (t1, t2, t3) => c $ t1 $ t2 $ t3) t t1 t2 t3 + | abs (t as @{const HOL.Not} $ t1) = abstract_sub t (abs t1 #>> HOLogic.mk_not) + | abs (t as @{const HOL.disj} $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> HOLogic.mk_disj) + | abs (t as (c as Const (@{const_name uminus_class.uminus}, _)) $ t1) = + abstract_sub t (abs t1 #>> (fn u => c $ u)) + | abs (t as (c as Const (@{const_name plus_class.plus}, _)) $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) + | abs (t as (c as Const (@{const_name minus_class.minus}, _)) $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) + | abs (t as (c as Const (@{const_name times_class.times}, _)) $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) + | abs (t as (c as Const (@{const_name z3div}, _)) $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) + | abs (t as (c as Const (@{const_name z3mod}, _)) $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) + | abs (t as (c as Const (@{const_name HOL.eq}, _)) $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) + | abs (t as (c as Const (@{const_name ord_class.less}, _)) $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) + | abs (t as (c as Const (@{const_name ord_class.less_eq}, _)) $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) + | abs t = abstract_sub t (fn cx => + if can HOLogic.dest_number t then (t, cx) + else + (case apply_abstracters abs (get_arith_abstracters ctxt) t cx of + (SOME u, cx') => (u, cx') + | (NONE, _) => abstract_term t cx)) + in abs u end + + +(* truth axiom *) + +fun true_axiom _ _ _ = @{thm TrueI} + + +(* modus ponens *) + +fun mp _ [p, p_eq_q] _ = discharge 1 [p_eq_q, p] iffD1 + | mp ctxt thms t = replay_rule_error ctxt Z3_Proof.Modus_Ponens thms t + +val mp_oeq = mp + + +(* reflexivity *) + +fun refl ctxt _ t = match_instantiate ctxt t @{thm refl} + + +(* symmetry *) + +fun symm _ [thm] _ = thm RS @{thm sym} + | symm ctxt thms t = replay_rule_error ctxt Z3_Proof.Reflexivity thms t + + +(* transitivity *) + +fun trans _ [thm1, thm2] _ = thm1 RSN (1, thm2 RSN (2, @{thm trans})) + | trans ctxt thms t = replay_rule_error ctxt Z3_Proof.Transitivity thms t + + +(* congruence *) + +fun ctac ctxt prems i st = st |> ( + resolve_tac ctxt (@{thm refl} :: prems) i + ORELSE (cong_tac ctxt i THEN ctac ctxt prems (i + 1) THEN ctac ctxt prems i)) + +fun cong_basic ctxt thms t = + let val st = Thm.trivial (certify_prop ctxt t) + in + (case Seq.pull (ctac ctxt thms 1 st) of + SOME (thm, _) => thm + | NONE => raise THM ("cong", 0, thms @ [st])) + end + +val cong_dest_rules = @{lemma + "(~ P | Q) & (P | ~ Q) ==> P = Q" + "(P | ~ Q) & (~ P | Q) ==> P = Q" + by fast+} + +fun cong_full ctxt thms t = prove ctxt t (fn ctxt' => + Method.insert_tac ctxt thms + THEN' (Classical.fast_tac ctxt' + ORELSE' dresolve_tac ctxt cong_dest_rules + THEN' Classical.fast_tac ctxt')) + +fun cong ctxt thms = try_provers ctxt Z3_Proof.Monotonicity [ + ("basic", cong_basic ctxt thms), + ("full", cong_full ctxt thms)] thms + + +(* quantifier introduction *) + +val quant_intro_rules = @{lemma + "(!!x. P x = Q x) ==> (ALL x. P x) = (ALL x. Q x)" + "(!!x. P x = Q x) ==> (EX x. P x) = (EX x. Q x)" + "(!!x. (~ P x) = Q x) ==> (~ (EX x. P x)) = (ALL x. Q x)" + "(!!x. (~ P x) = Q x) ==> (~ (ALL x. P x)) = (EX x. Q x)" + by fast+} + +fun quant_intro ctxt [thm] t = + prove ctxt t (K (REPEAT_ALL_NEW (resolve_tac ctxt (thm :: quant_intro_rules)))) + | quant_intro ctxt thms t = replay_rule_error ctxt Z3_Proof.Quant_Intro thms t + + +(* distributivity of conjunctions and disjunctions *) + +(* TODO: there are no tests with this proof rule *) +fun distrib ctxt _ t = + prove_abstract' ctxt t prop_tac (abstract_prop (dest_prop t)) + + +(* elimination of conjunctions *) + +fun and_elim ctxt [thm] t = + prove_abstract ctxt [thm] t prop_tac ( + abstract_lit (dest_prop t) ##>> + abstract_conj (dest_thm thm) #>> + apfst single o swap) + | and_elim ctxt thms t = replay_rule_error ctxt Z3_Proof.And_Elim thms t + + +(* elimination of negated disjunctions *) + +fun not_or_elim ctxt [thm] t = + prove_abstract ctxt [thm] t prop_tac ( + abstract_lit (dest_prop t) ##>> + abstract_not abstract_disj (dest_thm thm) #>> + apfst single o swap) + | not_or_elim ctxt thms t = + replay_rule_error ctxt Z3_Proof.Not_Or_Elim thms t + + +(* rewriting *) + +local + +fun dest_all (Const (@{const_name HOL.All}, _) $ Abs (_, T, t)) nctxt = + let + val (n, nctxt') = Name.variant "" nctxt + val f = Free (n, T) + val t' = Term.subst_bound (f, t) + in dest_all t' nctxt' |>> cons f end + | dest_all t _ = ([], t) + +fun dest_alls t = + let + val nctxt = Name.make_context (Term.add_free_names t []) + val (lhs, rhs) = HOLogic.dest_eq (dest_prop t) + val (ls, lhs') = dest_all lhs nctxt + val (rs, rhs') = dest_all rhs nctxt + in + if eq_list (op aconv) (ls, rs) then SOME (ls, (HOLogic.mk_eq (lhs', rhs'))) + else NONE + end + +fun forall_intr ctxt t thm = + let val ct = Thm.cterm_of ctxt t + in Thm.forall_intr ct thm COMP_INCR @{thm iff_allI} end + +in + +fun focus_eq f ctxt t = + (case dest_alls t of + NONE => f ctxt t + | SOME (vs, t') => fold (forall_intr ctxt) vs (f ctxt t')) + +end + +fun abstract_eq f (Const (@{const_name HOL.eq}, _) $ t1 $ t2) = + f t1 ##>> f t2 #>> HOLogic.mk_eq + | abstract_eq _ t = abstract_term t + +fun prove_prop_rewrite ctxt t = + prove_abstract' ctxt t prop_tac ( + abstract_eq abstract_prop (dest_prop t)) + +fun arith_rewrite_tac ctxt _ = + TRY o Simplifier.simp_tac ctxt + THEN_ALL_NEW (Arith_Data.arith_tac ctxt ORELSE' Clasimp.force_tac ctxt) + +fun prove_arith_rewrite ctxt t = + prove_abstract' ctxt t arith_rewrite_tac ( + abstract_eq (abstract_arith ctxt) (dest_prop t)) + +val lift_ite_thm = @{thm HOL.if_distrib} RS @{thm eq_reflection} + +fun ternary_conv cv = Conv.combination_conv (Conv.binop_conv cv) cv + +fun if_context_conv ctxt ct = + (case Thm.term_of ct of + Const (@{const_name HOL.If}, _) $ _ $ _ $ _ => + ternary_conv (if_context_conv ctxt) + | _ $ (Const (@{const_name HOL.If}, _) $ _ $ _ $ _) => + Conv.rewr_conv lift_ite_thm then_conv ternary_conv (if_context_conv ctxt) + | _ => Conv.sub_conv (Conv.top_sweep_conv if_context_conv) ctxt) ct + +fun lift_ite_rewrite ctxt t = + prove ctxt t (fn ctxt => + CONVERSION (HOLogic.Trueprop_conv (Conv.binop_conv (if_context_conv ctxt))) + THEN' resolve_tac ctxt @{thms refl}) + +fun prove_conj_disj_perm ctxt t = prove ctxt t Conj_Disj_Perm.conj_disj_perm_tac + +fun rewrite ctxt _ = try_provers ctxt Z3_Proof.Rewrite [ + ("rules", apply_rule ctxt), + ("conj_disj_perm", prove_conj_disj_perm ctxt), + ("prop_rewrite", prove_prop_rewrite ctxt), + ("arith_rewrite", focus_eq prove_arith_rewrite ctxt), + ("if_rewrite", lift_ite_rewrite ctxt)] [] + +fun rewrite_star ctxt = rewrite ctxt + + +(* pulling quantifiers *) + +fun pull_quant ctxt _ t = prove ctxt t quant_tac + + +(* pushing quantifiers *) + +fun push_quant _ _ _ = raise Fail "unsupported" (* FIXME *) + + +(* elimination of unused bound variables *) + +val elim_all = @{lemma "P = Q ==> (ALL x. P) = Q" by fast} +val elim_ex = @{lemma "P = Q ==> (EX x. P) = Q" by fast} + +fun elim_unused_tac ctxt i st = ( + match_tac ctxt [@{thm refl}] + ORELSE' (match_tac ctxt [elim_all, elim_ex] THEN' elim_unused_tac ctxt) + ORELSE' ( + match_tac ctxt [@{thm iff_allI}, @{thm iff_exI}] + THEN' elim_unused_tac ctxt)) i st + +fun elim_unused ctxt _ t = prove ctxt t elim_unused_tac + + +(* destructive equality resolution *) + +fun dest_eq_res _ _ _ = raise Fail "dest_eq_res" (* FIXME *) + + +(* quantifier instantiation *) + +val quant_inst_rule = @{lemma "~P x | Q ==> ~(ALL x. P x) | Q" by fast} + +fun quant_inst ctxt _ t = prove ctxt t (fn _ => + REPEAT_ALL_NEW (resolve_tac ctxt [quant_inst_rule]) + THEN' resolve_tac ctxt @{thms excluded_middle}) + + +(* propositional lemma *) + +exception LEMMA of unit + +val intro_hyp_rule1 = @{lemma "(~P ==> Q) ==> P | Q" by fast} +val intro_hyp_rule2 = @{lemma "(P ==> Q) ==> ~P | Q" by fast} + +fun norm_lemma thm = + (thm COMP_INCR intro_hyp_rule1) + handle THM _ => thm COMP_INCR intro_hyp_rule2 + +fun negated_prop (@{const HOL.Not} $ t) = HOLogic.mk_Trueprop t + | negated_prop t = HOLogic.mk_Trueprop (HOLogic.mk_not t) + +fun intro_hyps tab (t as @{const HOL.disj} $ t1 $ t2) cx = + lookup_intro_hyps tab t (fold (intro_hyps tab) [t1, t2]) cx + | intro_hyps tab t cx = + lookup_intro_hyps tab t (fn _ => raise LEMMA ()) cx + +and lookup_intro_hyps tab t f (cx as (thm, terms)) = + (case Termtab.lookup tab (negated_prop t) of + NONE => f cx + | SOME hyp => (norm_lemma (Thm.implies_intr hyp thm), t :: terms)) + +fun lemma ctxt (thms as [thm]) t = + (let + val tab = Termtab.make (map (`Thm.term_of) (Thm.chyps_of thm)) + val (thm', terms) = intro_hyps tab (dest_prop t) (thm, []) + in + prove_abstract ctxt [thm'] t prop_tac ( + fold (snd oo abstract_lit) terms #> + abstract_disj (dest_thm thm') #>> single ##>> + abstract_disj (dest_prop t)) + end + handle LEMMA () => replay_error ctxt "Bad proof state" Z3_Proof.Lemma thms t) + | lemma ctxt thms t = replay_rule_error ctxt Z3_Proof.Lemma thms t + + +(* unit resolution *) + +fun abstract_unit (t as (@{const HOL.Not} $ (@{const HOL.disj} $ t1 $ t2))) = + abstract_sub t (abstract_unit t1 ##>> abstract_unit t2 #>> + HOLogic.mk_not o HOLogic.mk_disj) + | abstract_unit (t as (@{const HOL.disj} $ t1 $ t2)) = + abstract_sub t (abstract_unit t1 ##>> abstract_unit t2 #>> + HOLogic.mk_disj) + | abstract_unit t = abstract_lit t + +fun unit_res ctxt thms t = + prove_abstract ctxt thms t prop_tac ( + fold_map (abstract_unit o dest_thm) thms ##>> + abstract_unit (dest_prop t) #>> + (fn (prems, concl) => (prems, concl))) + + +(* iff-true *) + +val iff_true_rule = @{lemma "P ==> P = True" by fast} + +fun iff_true _ [thm] _ = thm RS iff_true_rule + | iff_true ctxt thms t = replay_rule_error ctxt Z3_Proof.Iff_True thms t + + +(* iff-false *) + +val iff_false_rule = @{lemma "~P ==> P = False" by fast} + +fun iff_false _ [thm] _ = thm RS iff_false_rule + | iff_false ctxt thms t = replay_rule_error ctxt Z3_Proof.Iff_False thms t + + +(* commutativity *) + +fun comm ctxt _ t = match_instantiate ctxt t @{thm eq_commute} + + +(* definitional axioms *) + +fun def_axiom_disj ctxt t = + (case dest_prop t of + @{const HOL.disj} $ u1 $ u2 => + prove_abstract' ctxt t prop_tac ( + abstract_prop u2 ##>> abstract_prop u1 #>> HOLogic.mk_disj o swap) + | u => prove_abstract' ctxt t prop_tac (abstract_prop u)) + +fun def_axiom ctxt _ = try_provers ctxt Z3_Proof.Def_Axiom [ + ("rules", apply_rule ctxt), + ("disj", def_axiom_disj ctxt)] [] + + +(* application of definitions *) + +fun apply_def _ [thm] _ = thm (* TODO: cover also the missing cases *) + | apply_def ctxt thms t = replay_rule_error ctxt Z3_Proof.Apply_Def thms t + + +(* iff-oeq *) + +fun iff_oeq _ _ _ = raise Fail "iff_oeq" (* FIXME *) + + +(* negation normal form *) + +fun nnf_prop ctxt thms t = + prove_abstract ctxt thms t prop_tac ( + fold_map (abstract_prop o dest_thm) thms ##>> + abstract_prop (dest_prop t)) + +fun nnf ctxt rule thms = try_provers ctxt rule [ + ("prop", nnf_prop ctxt thms), + ("quant", quant_intro ctxt [hd thms])] thms + +fun nnf_pos ctxt = nnf ctxt Z3_Proof.Nnf_Pos +fun nnf_neg ctxt = nnf ctxt Z3_Proof.Nnf_Neg + + +(* theory lemmas *) + +fun arith_th_lemma_tac ctxt prems = + Method.insert_tac ctxt prems + THEN' SELECT_GOAL (Local_Defs.unfold0_tac ctxt @{thms z3div_def z3mod_def}) + THEN' Arith_Data.arith_tac ctxt + +fun arith_th_lemma ctxt thms t = + prove_abstract ctxt thms t arith_th_lemma_tac ( + fold_map (abstract_arith ctxt o dest_thm) thms ##>> + abstract_arith ctxt (dest_prop t)) + +val _ = Theory.setup (Context.theory_map (add_th_lemma_method ("arith", arith_th_lemma))) + +fun th_lemma name ctxt thms = + (case Symtab.lookup (get_th_lemma_method ctxt) name of + SOME method => method ctxt thms + | NONE => replay_error ctxt "Bad theory" (Z3_Proof.Th_Lemma name) thms) + + +(* mapping of rules to methods *) + +fun unsupported rule ctxt = replay_error ctxt "Unsupported" rule +fun assumed rule ctxt = replay_error ctxt "Assumed" rule + +fun choose Z3_Proof.True_Axiom = true_axiom + | choose (r as Z3_Proof.Asserted) = assumed r + | choose (r as Z3_Proof.Goal) = assumed r + | choose Z3_Proof.Modus_Ponens = mp + | choose Z3_Proof.Reflexivity = refl + | choose Z3_Proof.Symmetry = symm + | choose Z3_Proof.Transitivity = trans + | choose (r as Z3_Proof.Transitivity_Star) = unsupported r + | choose Z3_Proof.Monotonicity = cong + | choose Z3_Proof.Quant_Intro = quant_intro + | choose Z3_Proof.Distributivity = distrib + | choose Z3_Proof.And_Elim = and_elim + | choose Z3_Proof.Not_Or_Elim = not_or_elim + | choose Z3_Proof.Rewrite = rewrite + | choose Z3_Proof.Rewrite_Star = rewrite_star + | choose Z3_Proof.Pull_Quant = pull_quant + | choose (r as Z3_Proof.Pull_Quant_Star) = unsupported r + | choose Z3_Proof.Push_Quant = push_quant + | choose Z3_Proof.Elim_Unused_Vars = elim_unused + | choose Z3_Proof.Dest_Eq_Res = dest_eq_res + | choose Z3_Proof.Quant_Inst = quant_inst + | choose (r as Z3_Proof.Hypothesis) = assumed r + | choose Z3_Proof.Lemma = lemma + | choose Z3_Proof.Unit_Resolution = unit_res + | choose Z3_Proof.Iff_True = iff_true + | choose Z3_Proof.Iff_False = iff_false + | choose Z3_Proof.Commutativity = comm + | choose Z3_Proof.Def_Axiom = def_axiom + | choose (r as Z3_Proof.Intro_Def) = assumed r + | choose Z3_Proof.Apply_Def = apply_def + | choose Z3_Proof.Iff_Oeq = iff_oeq + | choose Z3_Proof.Nnf_Pos = nnf_pos + | choose Z3_Proof.Nnf_Neg = nnf_neg + | choose (r as Z3_Proof.Nnf_Star) = unsupported r + | choose (r as Z3_Proof.Cnf_Star) = unsupported r + | choose (r as Z3_Proof.Skolemize) = assumed r + | choose Z3_Proof.Modus_Ponens_Oeq = mp_oeq + | choose (Z3_Proof.Th_Lemma name) = th_lemma name + +fun with_tracing rule method ctxt thms t = + let val _ = trace_goal ctxt rule thms t + in method ctxt thms t end + +fun method_for rule = with_tracing rule (choose rule) + +end; diff --git a/src/test-gen/SMT/z3_replay_rules.ML b/src/test-gen/SMT/z3_replay_rules.ML new file mode 100644 index 0000000..966100b --- /dev/null +++ b/src/test-gen/SMT/z3_replay_rules.ML @@ -0,0 +1,54 @@ +(* Title: HOL/Tools/SMT/z3_replay_rules.ML + Author: Sascha Boehme, TU Muenchen + +Custom rules for Z3 proof replay. +*) + +signature Z3_REPLAY_RULES = +sig + val apply: Proof.context -> cterm -> thm option +end; + +structure Z3_Replay_Rules: Z3_REPLAY_RULES = +struct + +structure Data = Generic_Data +( + type T = thm Net.net + val empty = Net.empty + val extend = I + val merge = Net.merge Thm.eq_thm +) + +fun maybe_instantiate ct thm = + try Thm.first_order_match (Thm.cprop_of thm, ct) + |> Option.map (fn inst => Thm.instantiate inst thm) + +fun apply ctxt ct = + let + val net = Data.get (Context.Proof ctxt) + val xthms = Net.match_term net (Thm.term_of ct) + + fun select ct = map_filter (maybe_instantiate ct) xthms + fun select' ct = + let val thm = Thm.trivial ct + in map_filter (try (fn rule => rule COMP thm)) xthms end + + in try hd (case select ct of [] => select' ct | xthms' => xthms') end + +val prep = `Thm.prop_of + +fun ins thm net = Net.insert_term Thm.eq_thm (prep thm) net handle Net.INSERT => net +fun del thm net = Net.delete_term Thm.eq_thm (prep thm) net handle Net.DELETE => net + +val add = Thm.declaration_attribute (Data.map o ins) +val del = Thm.declaration_attribute (Data.map o del) + +val name = Binding.name "z3_rule" + +val description = "declaration of Z3 proof rules" + +val _ = Theory.setup (Attrib.setup name (Attrib.add_del add del) description #> + Global_Theory.add_thms_dynamic (name, Net.content o Data.get)) + +end; diff --git a/src/test-gen/SMT/z3_replay_util.ML b/src/test-gen/SMT/z3_replay_util.ML new file mode 100644 index 0000000..34419ec --- /dev/null +++ b/src/test-gen/SMT/z3_replay_util.ML @@ -0,0 +1,155 @@ +(* Title: HOL/Tools/SMT/z3_replay_util.ML + Author: Sascha Boehme, TU Muenchen + +Helper functions required for Z3 proof replay. +*) + +signature Z3_REPLAY_UTIL = +sig + (*theorem nets*) + val thm_net_of: ('a -> thm) -> 'a list -> 'a Net.net + val net_instances: (int * thm) Net.net -> cterm -> (int * thm) list + + (*proof combinators*) + val under_assumption: (thm -> thm) -> cterm -> thm + val discharge: thm -> thm -> thm + + (*a faster COMP*) + type compose_data = cterm list * (cterm -> cterm list) * thm + val precompose: (cterm -> cterm list) -> thm -> compose_data + val precompose2: (cterm -> cterm * cterm) -> thm -> compose_data + val compose: compose_data -> thm -> thm + + (*simpset*) + val add_simproc: Simplifier.simproc -> Context.generic -> Context.generic + val make_simpset: Proof.context -> thm list -> simpset +end; + +structure Z3_Replay_Util: Z3_REPLAY_UTIL = +struct + +(* theorem nets *) + +fun thm_net_of f xthms = + let fun insert xthm = Net.insert_term (K false) (Thm.prop_of (f xthm), xthm) + in fold insert xthms Net.empty end + +fun maybe_instantiate ct thm = + try Thm.first_order_match (Thm.cprop_of thm, ct) + |> Option.map (fn inst => Thm.instantiate inst thm) + +local + fun instances_from_net match f net ct = + let + val lookup = if match then Net.match_term else Net.unify_term + val xthms = lookup net (Thm.term_of ct) + fun select ct = map_filter (f (maybe_instantiate ct)) xthms + fun select' ct = + let val thm = Thm.trivial ct + in map_filter (f (try (fn rule => rule COMP thm))) xthms end + in (case select ct of [] => select' ct | xthms' => xthms') end +in + +fun net_instances net = + instances_from_net false (fn f => fn (i, thm) => Option.map (pair i) (f thm)) + net + +end + + +(* proof combinators *) + +fun under_assumption f ct = + let val ct' = SMT_Util.mk_cprop ct in Thm.implies_intr ct' (f (Thm.assume ct')) end + +fun discharge p pq = Thm.implies_elim pq p + + +(* a faster COMP *) + +type compose_data = cterm list * (cterm -> cterm list) * thm + +fun list2 (x, y) = [x, y] + +fun precompose f rule : compose_data = (f (Thm.cprem_of rule 1), f, rule) +fun precompose2 f rule : compose_data = precompose (list2 o f) rule + +fun compose (cvs, f, rule) thm = + discharge thm + (Thm.instantiate ([], map (dest_Var o Thm.term_of) cvs ~~ f (Thm.cprop_of thm)) rule) + + +(* simpset *) + +local + val antisym_le1 = mk_meta_eq @{thm order_class.antisym_conv} + val antisym_le2 = mk_meta_eq @{thm linorder_class.antisym_conv2} + val antisym_less1 = mk_meta_eq @{thm linorder_class.antisym_conv1} + val antisym_less2 = mk_meta_eq @{thm linorder_class.antisym_conv3} + + fun eq_prop t thm = HOLogic.mk_Trueprop t aconv Thm.prop_of thm + fun dest_binop ((c as Const _) $ t $ u) = (c, t, u) + | dest_binop t = raise TERM ("dest_binop", [t]) + + fun prove_antisym_le ctxt ct = + let + val (le, r, s) = dest_binop (Thm.term_of ct) + val less = Const (@{const_name less}, Term.fastype_of le) + val prems = Simplifier.prems_of ctxt + in + (case find_first (eq_prop (le $ s $ r)) prems of + NONE => + find_first (eq_prop (HOLogic.mk_not (less $ r $ s))) prems + |> Option.map (fn thm => thm RS antisym_less1) + | SOME thm => SOME (thm RS antisym_le1)) + end + handle THM _ => NONE + + fun prove_antisym_less ctxt ct = + let + val (less, r, s) = dest_binop (HOLogic.dest_not (Thm.term_of ct)) + val le = Const (@{const_name less_eq}, Term.fastype_of less) + val prems = Simplifier.prems_of ctxt + in + (case find_first (eq_prop (le $ r $ s)) prems of + NONE => + find_first (eq_prop (HOLogic.mk_not (less $ s $ r))) prems + |> Option.map (fn thm => thm RS antisym_less2) + | SOME thm => SOME (thm RS antisym_le2)) + end + handle THM _ => NONE + + val basic_simpset = + simpset_of (put_simpset HOL_ss @{context} + addsimps @{thms field_simps times_divide_eq_right times_divide_eq_left arith_special + arith_simps rel_simps array_rules z3div_def z3mod_def NO_MATCH_def} + addsimprocs [@{simproc numeral_divmod}, + Simplifier.make_simproc @{context} "fast_int_arith" + {lhss = [@{term "(m::int) < n"}, @{term "(m::int) \ n"}, @{term "(m::int) = n"}], + proc = K Lin_Arith.simproc}, + Simplifier.make_simproc @{context} "antisym_le" + {lhss = [@{term "(x::'a::order) \ y"}], + proc = K prove_antisym_le}, + Simplifier.make_simproc @{context} "antisym_less" + {lhss = [@{term "\ (x::'a::linorder) < y"}], + proc = K prove_antisym_less}]) + + structure Simpset = Generic_Data + ( + type T = simpset + val empty = basic_simpset + val extend = I + val merge = Simplifier.merge_ss + ) +in + +fun add_simproc simproc context = + Simpset.map (simpset_map (Context.proof_of context) + (fn ctxt => ctxt addsimprocs [simproc])) context + +fun make_simpset ctxt rules = + simpset_of (put_simpset (Simpset.get (Context.Proof ctxt)) ctxt addsimps rules) + +end + +end; From cb9aac907f9462b49ec1602a9952899f3e102ace Mon Sep 17 00:00:00 2001 From: peyman Date: Sat, 5 Feb 2022 15:44:43 -0600 Subject: [PATCH 4/8] [test-gen] split main into dir and upate ROOT with Serguei (cherry picked from commit cbe94ffd6cbae6bdf044c5eed4c2369e58399b90) --- src/test-gen/src/ROOT | 6 +- src/test-gen/src/main/Automata.thy | 391 ++++ src/test-gen/src/main/BackendUtils.thy | 93 + src/test-gen/src/main/EFSM_Toolkit.thy | 167 ++ src/test-gen/src/main/IOCO.thy | 126 ++ src/test-gen/src/main/Interleaving.thy | 244 +++ src/test-gen/src/main/Monads.thy | 1256 ++++++++++++ src/test-gen/src/main/Observers.thy | 200 ++ .../src/main/Old_SMT/old_smt_builtin.ML | 231 +++ .../src/main/Old_SMT/old_smt_config.ML | 254 +++ .../src/main/Old_SMT/old_smt_datatypes.ML | 94 + .../src/main/Old_SMT/old_smt_failure.ML | 61 + .../src/main/Old_SMT/old_smt_normalize.ML | 652 +++++++ src/test-gen/src/main/Old_SMT/old_smt_real.ML | 134 ++ .../src/main/Old_SMT/old_smt_setup_solvers.ML | 189 ++ .../src/main/Old_SMT/old_smt_solver.ML | 374 ++++ .../src/main/Old_SMT/old_smt_translate.ML | 589 ++++++ .../src/main/Old_SMT/old_smt_utils.ML | 221 +++ src/test-gen/src/main/Old_SMT/old_smt_word.ML | 146 ++ .../src/main/Old_SMT/old_smtlib_interface.ML | 161 ++ .../src/main/Old_SMT/old_z3_interface.ML | 239 +++ src/test-gen/src/main/Old_SMT/old_z3_model.ML | 337 ++++ .../src/main/Old_SMT/old_z3_proof_literals.ML | 363 ++++ .../src/main/Old_SMT/old_z3_proof_methods.ML | 149 ++ .../src/main/Old_SMT/old_z3_proof_parser.ML | 446 +++++ .../Old_SMT/old_z3_proof_reconstruction.ML | 891 +++++++++ .../src/main/Old_SMT/old_z3_proof_tools.ML | 374 ++++ src/test-gen/src/main/QuickCheckBackend.thy | 113 ++ src/test-gen/src/main/RandomBackend.thy | 243 +++ src/test-gen/src/main/SMT/z3_replay_util.ML | 155 ++ src/test-gen/src/main/SMTBackend.thy | 450 +++++ src/test-gen/src/main/SharedMemory.thy | 1389 ++++++++++++++ src/test-gen/src/main/Term_Tactics.thy | 334 ++++ src/test-gen/src/main/TestEnv.ML | 458 +++++ src/test-gen/src/main/TestEnv.thy | 726 +++++++ src/test-gen/src/main/TestGen.thy | 1704 +++++++++++++++++ src/test-gen/src/main/TestLib.thy | 57 + src/test-gen/src/main/TestRefinements.thy | 248 +++ src/test-gen/src/main/TestScript.thy | 164 ++ src/test-gen/src/main/TestSequence.thy | 1001 ++++++++++ src/test-gen/src/main/Testing.thy | 57 + src/test-gen/src/main/clocks.ML | 352 ++++ src/test-gen/src/main/clocks.thy | 370 ++++ .../main/codegen_C_pthread/Code_C_pthread.thy | 86 + .../main/codegen_fsharp/Code_Char_Fsharp.thy | 103 + .../codegen_fsharp/Code_Char_chr_Fsharp.thy | 83 + .../codegen_fsharp/Code_Integer_Fsharp.thy | 101 + .../codegen_fsharp/Code_String_Fsharp.thy | 68 + .../src/main/codegen_fsharp/code_fsharp.ML | 618 ++++++ .../src/main/codegen_fsharp/code_fsharp.thy | 130 ++ .../src/main/codegen_fsharp/examples/AQ.thy | 71 + .../main/codegen_fsharp/examples/SemiG.thy | 92 + .../main/codegen_fsharp/upstream/code_ml.ML | 898 +++++++++ .../src/main/codegen_gdb/Code_gdb_script.thy | 322 ++++ src/test-gen/src/main/config.sml | 93 + .../src/main/debug/profiling_begin.thy | 49 + src/test-gen/src/main/debug/profiling_end.thy | 51 + src/test-gen/src/main/isar_setup.ML | 178 ++ src/test-gen/src/main/log.thy | 132 ++ .../src/main/new_smt_patch/SMT_patch.thy | 435 +++++ .../main/new_smt_patch/smt_config_patch.ML | 265 +++ .../main/new_smt_patch/smt_normalize_patch.ML | 556 ++++++ .../main/new_smt_patch/smt_solver_patch.ML | 346 ++++ .../main/new_smt_patch/smt_systems_patch.ML | 159 ++ .../new_smt_patch/smtlib_interface_patch.ML | 172 ++ .../src/main/new_smt_patch/smtlib_patch.ML | 202 ++ .../src/main/new_smt_patch/z3_model.ML | 113 ++ .../src/main/new_smt_patch/z3_replay_patch.ML | 262 +++ .../src/main/smt_patch/Old_SMT_patch.thy | 431 +++++ .../main/smt_patch/old_smt_config_patch.ML | 254 +++ .../main/smt_patch/old_smt_failure_patch.ML | 61 + .../main/smt_patch/old_smt_normalize_patch.ML | 652 +++++++ .../smt_patch/old_smt_setup_solvers_patch.ML | 189 ++ .../main/smt_patch/old_smt_solver_patch.ML | 378 ++++ .../main/smt_patch/old_smt_translate_patch.ML | 589 ++++++ .../main/smt_patch/old_z3_interface_patch.ML | 239 +++ .../src/main/smt_patch/old_z3_model_patch.ML | 337 ++++ .../smt_patch/old_z3_proof_parser_patch.ML | 446 +++++ .../old_z3_proof_reconstruction_patch.ML | 891 +++++++++ src/test-gen/src/main/version.thy | 91 + 80 files changed, 26350 insertions(+), 2 deletions(-) create mode 100644 src/test-gen/src/main/Automata.thy create mode 100644 src/test-gen/src/main/BackendUtils.thy create mode 100644 src/test-gen/src/main/EFSM_Toolkit.thy create mode 100644 src/test-gen/src/main/IOCO.thy create mode 100644 src/test-gen/src/main/Interleaving.thy create mode 100644 src/test-gen/src/main/Monads.thy create mode 100644 src/test-gen/src/main/Observers.thy create mode 100644 src/test-gen/src/main/Old_SMT/old_smt_builtin.ML create mode 100644 src/test-gen/src/main/Old_SMT/old_smt_config.ML create mode 100644 src/test-gen/src/main/Old_SMT/old_smt_datatypes.ML create mode 100644 src/test-gen/src/main/Old_SMT/old_smt_failure.ML create mode 100644 src/test-gen/src/main/Old_SMT/old_smt_normalize.ML create mode 100644 src/test-gen/src/main/Old_SMT/old_smt_real.ML create mode 100644 src/test-gen/src/main/Old_SMT/old_smt_setup_solvers.ML create mode 100644 src/test-gen/src/main/Old_SMT/old_smt_solver.ML create mode 100644 src/test-gen/src/main/Old_SMT/old_smt_translate.ML create mode 100644 src/test-gen/src/main/Old_SMT/old_smt_utils.ML create mode 100644 src/test-gen/src/main/Old_SMT/old_smt_word.ML create mode 100644 src/test-gen/src/main/Old_SMT/old_smtlib_interface.ML create mode 100644 src/test-gen/src/main/Old_SMT/old_z3_interface.ML create mode 100644 src/test-gen/src/main/Old_SMT/old_z3_model.ML create mode 100644 src/test-gen/src/main/Old_SMT/old_z3_proof_literals.ML create mode 100644 src/test-gen/src/main/Old_SMT/old_z3_proof_methods.ML create mode 100644 src/test-gen/src/main/Old_SMT/old_z3_proof_parser.ML create mode 100644 src/test-gen/src/main/Old_SMT/old_z3_proof_reconstruction.ML create mode 100644 src/test-gen/src/main/Old_SMT/old_z3_proof_tools.ML create mode 100644 src/test-gen/src/main/QuickCheckBackend.thy create mode 100644 src/test-gen/src/main/RandomBackend.thy create mode 100644 src/test-gen/src/main/SMT/z3_replay_util.ML create mode 100644 src/test-gen/src/main/SMTBackend.thy create mode 100644 src/test-gen/src/main/SharedMemory.thy create mode 100644 src/test-gen/src/main/Term_Tactics.thy create mode 100644 src/test-gen/src/main/TestEnv.ML create mode 100644 src/test-gen/src/main/TestEnv.thy create mode 100644 src/test-gen/src/main/TestGen.thy create mode 100644 src/test-gen/src/main/TestLib.thy create mode 100644 src/test-gen/src/main/TestRefinements.thy create mode 100644 src/test-gen/src/main/TestScript.thy create mode 100644 src/test-gen/src/main/TestSequence.thy create mode 100644 src/test-gen/src/main/Testing.thy create mode 100644 src/test-gen/src/main/clocks.ML create mode 100644 src/test-gen/src/main/clocks.thy create mode 100644 src/test-gen/src/main/codegen_C_pthread/Code_C_pthread.thy create mode 100644 src/test-gen/src/main/codegen_fsharp/Code_Char_Fsharp.thy create mode 100644 src/test-gen/src/main/codegen_fsharp/Code_Char_chr_Fsharp.thy create mode 100644 src/test-gen/src/main/codegen_fsharp/Code_Integer_Fsharp.thy create mode 100644 src/test-gen/src/main/codegen_fsharp/Code_String_Fsharp.thy create mode 100644 src/test-gen/src/main/codegen_fsharp/code_fsharp.ML create mode 100644 src/test-gen/src/main/codegen_fsharp/code_fsharp.thy create mode 100755 src/test-gen/src/main/codegen_fsharp/examples/AQ.thy create mode 100755 src/test-gen/src/main/codegen_fsharp/examples/SemiG.thy create mode 100644 src/test-gen/src/main/codegen_fsharp/upstream/code_ml.ML create mode 100644 src/test-gen/src/main/codegen_gdb/Code_gdb_script.thy create mode 100644 src/test-gen/src/main/config.sml create mode 100644 src/test-gen/src/main/debug/profiling_begin.thy create mode 100644 src/test-gen/src/main/debug/profiling_end.thy create mode 100644 src/test-gen/src/main/isar_setup.ML create mode 100644 src/test-gen/src/main/log.thy create mode 100644 src/test-gen/src/main/new_smt_patch/SMT_patch.thy create mode 100644 src/test-gen/src/main/new_smt_patch/smt_config_patch.ML create mode 100644 src/test-gen/src/main/new_smt_patch/smt_normalize_patch.ML create mode 100644 src/test-gen/src/main/new_smt_patch/smt_solver_patch.ML create mode 100644 src/test-gen/src/main/new_smt_patch/smt_systems_patch.ML create mode 100644 src/test-gen/src/main/new_smt_patch/smtlib_interface_patch.ML create mode 100644 src/test-gen/src/main/new_smt_patch/smtlib_patch.ML create mode 100644 src/test-gen/src/main/new_smt_patch/z3_model.ML create mode 100644 src/test-gen/src/main/new_smt_patch/z3_replay_patch.ML create mode 100644 src/test-gen/src/main/smt_patch/Old_SMT_patch.thy create mode 100644 src/test-gen/src/main/smt_patch/old_smt_config_patch.ML create mode 100644 src/test-gen/src/main/smt_patch/old_smt_failure_patch.ML create mode 100644 src/test-gen/src/main/smt_patch/old_smt_normalize_patch.ML create mode 100644 src/test-gen/src/main/smt_patch/old_smt_setup_solvers_patch.ML create mode 100644 src/test-gen/src/main/smt_patch/old_smt_solver_patch.ML create mode 100644 src/test-gen/src/main/smt_patch/old_smt_translate_patch.ML create mode 100644 src/test-gen/src/main/smt_patch/old_z3_interface_patch.ML create mode 100644 src/test-gen/src/main/smt_patch/old_z3_model_patch.ML create mode 100644 src/test-gen/src/main/smt_patch/old_z3_proof_parser_patch.ML create mode 100644 src/test-gen/src/main/smt_patch/old_z3_proof_reconstruction_patch.ML create mode 100644 src/test-gen/src/main/version.thy diff --git a/src/test-gen/src/ROOT b/src/test-gen/src/ROOT index 6896e05..fe0b41f 100644 --- a/src/test-gen/src/ROOT +++ b/src/test-gen/src/ROOT @@ -1,14 +1,16 @@ -session "HOL-TestGenLib" in "test" = HOL + +session "HOL-TestGenLib" in "test" = "HOL-Library" + description {* HOL-TestGen Basic Libraries without Code-Generators and Testgenerator *} (*directories "/cygdrive/c/Program Files (x86)/Isabelle HOL/Isabelle2021/hol-testgen-1.9.1/hol-testgen-1.9.1/src"*) theories "TestLib" -session "HOL-TestGen" (main) = "HOL-TestGenLib" + +session "HOL-TestGen" (main) in "main" = "HOL-TestGenLib" + description {* HOL-TestGen *} (*directories "/cygdrive/c/Program Files (x86)/Isabelle HOL/Isabelle2021/hol-testgen-1.9.1/hol-testgen-1.9.1/src"*) + (*directories "../../../../Isabelle-linux/Isabelle2021-1/src/HOL"*) + directories "codegen_fsharp" "codegen_gdb" theories "codegen_fsharp/Code_String_Fsharp" "codegen_fsharp/Code_Char_chr_Fsharp" diff --git a/src/test-gen/src/main/Automata.thy b/src/test-gen/src/main/Automata.thy new file mode 100644 index 0000000..f3018b8 --- /dev/null +++ b/src/test-gen/src/main/Automata.thy @@ -0,0 +1,391 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * Automata.thy --- the base testing theory for automatas in sequence testing. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007 ETH Zurich, Switzerland + * 2009-2017 Univ. Paris-Sud, France + * 2009-2015 Achim D. Brucker, Germany + * 2015-2017 The University of Sheffield, UK + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Basic Automaton Theory *} + +theory Automata imports Main +begin + +text{* Re-Definition of the following type synonyms from Monad-Theory - +apart from that, these theories are independent. *} +type_synonym ('o, '\) MON\<^sub>S\<^sub>E = "'\ \ ('o \ '\)" (* = '\ \ ('o \ '\)option *) +type_synonym ('o, '\) MON\<^sub>S\<^sub>B = "'\ \ ('o \ '\) set" +type_synonym ('o, '\) MON\<^sub>S\<^sub>B\<^sub>E = "'\ \ (('o \ '\) set) option" + + +subsection{* Deterministic I/O Automaton *} + + +class fin = + assumes finite: "finite(UNIV::'a set)" + + +record ('\, '\) DA = + init :: "'\" + step :: "'\ \ '\ \ '\" + accept :: "'\ set" + + +typedef ('\::fin, '\::fin) DFA = "{x::('\, '\) DA. True}" +proof - + show "\x. x \ {x. True}" by auto +qed + +record ('\, '\) pDA = + init :: "'\" + step :: "'\ \ '\ \ '\" + accept :: "'\ set" + +typ "'a option" +record ('\, '\) NDA = + init :: "'\" + step :: "('\ \ '\ \ '\)set" + accept :: "'\ set" + + +type_synonym ('\, 'o, '\) ioNDA = "('\ \ 'o, '\) NDA" + +record ('\, '\) EFSM = + init :: "'\" + step :: "('\ \ '\ \ '\)set" + accept :: "'\ set" + + +record ('\, 'o, '\) det_io_atm = + init :: "'\" + step :: "'\ \ ('o, '\) MON\<^sub>S\<^sub>E" + + +subsection{* Non-deterministic I/O automaton *} + +text{* + We will use two styles of non-deterministic automaton: Labelled Transition + Systems (LTS), which are intensively used in the literature, but tend to anihilate + the difference between input and output, and non-deterministic automaton, which + make this difference explicit and which have a closer connection to monads used + for the operational aspects of testing. +*} + +text{* + There we are: labelled transition systems. +*} + +record ('\, 'o, '\) lts = + init :: "'\ set" + step :: "('\ \ ('\ \ 'o) \ '\) set" (* false : this must be "('\ \ ('\ + 'o) \ '\) set" *) + +text{* And, equivalently; non-deterministic io automata. *} + +record ('\, 'o, '\) ndet_io_atm = + init :: "'\ set" + step :: "'\ \ ('o, '\) MON\<^sub>S\<^sub>B" + + +text{* + First, we will prove the fundamental equivalence of these two notions. + + We refrain from a formal definition of explicit conversion functions + and leave this internally in this proof (i.e. the existential witnesses). +*} + +definition det2ndet :: "('\, 'o, '\) det_io_atm \ ('\, 'o, '\) ndet_io_atm" +where "det2ndet A = \ndet_io_atm.init = {det_io_atm.init A}, + ndet_io_atm.step = + \ \ \. if \ \ dom(det_io_atm.step A \) + then {the(det_io_atm.step A \ \)} + else {} \" + +text{* + The following theorem establishes the fact that deterministic + automaton can be injectively embedded in non-deterministic ones. +*} +lemma det2ndet_injective : "inj det2ndet" + apply(auto simp: inj_on_def det2ndet_def) + apply(tactic {* Record.split_simp_tac @{context} [] (K ~1) 1*}, simp) + apply(simp (no_asm_simp) add: fun_eq_iff, auto) + apply(drule_tac x=x in fun_cong, drule_tac x=xa in fun_cong) + apply(case_tac "xa \ dom (step x)", simp_all) + apply(case_tac "xa \ dom (stepa x)", + simp_all add: fun_eq_iff[symmetric], auto) + apply(case_tac "xa \ dom (stepa x)", auto simp: fun_eq_iff[symmetric]) + apply(erule contrapos_np, simp) + apply(drule Product_Type.split_paired_All[THEN iffD2])+ + apply(simp only: Option.not_Some_eq) +done + + +text{* + We distinguish two forms of determinism - global determinism, where for each + state and input \emph{at most} one output-successor state + is assigned. +*} +definition deterministic :: "('\, 'o, '\) ndet_io_atm \ bool" +where "deterministic atm = (((\ x. ndet_io_atm.init atm = {x}) \ + (\ \ out. \ p1 \ step atm \ out. + \ p2 \ step atm \ out. + p1 = p2)))" + +text{* In contrast, transition relations *} +definition \deterministic :: "('\, 'o, '\) ndet_io_atm \ bool" +where "\deterministic atm = (\ x. ndet_io_atm.init atm = {x} \ + (\ \ out. + \ p1 \ step atm \ out. + \ p2 \ step atm \ out. + fst p1 = fst p2 \ snd p1 = snd p2))" + + +lemma det2ndet_deterministic: "deterministic (det2ndet atm)" + by(auto simp:deterministic_def det2ndet_def) + +lemma det2ndet_\deterministic: "\deterministic (det2ndet atm)" + by(auto simp: \deterministic_def det2ndet_def) + + +text{* + The following theorem establishes the isomorphism of the two concepts + IO-automata and LTS. We will therefore concentrate in the sequel on IO-Automata, + which have a slightly more realistic operational behaviour: you give the program + under test an input and get a possible set of responses rather than "agreeing + with the program under test" on a set of input-output-pairs. +*} + +definition ndet2lts :: "('\, 'o, '\) ndet_io_atm \ ('\, 'o, '\) lts" +where "ndet2lts A = \lts.init = init A, + lts.step = {(s,io,s').(snd io,s') \ step A (fst io) s}\" + +definition lts2ndet :: " ('\,'o,'\) lts \ ('\, 'o, '\) ndet_io_atm" +where "lts2ndet A = \init = lts.init A, + step = \ i s. {(out,s'). (s, (i,out), s') + \ lts.step A}\" + +lemma ndet_io_atm_isomorph_lts : "bij ndet2lts" + apply (auto simp: bij_def inj_on_def surj_def ndet2lts_def) + apply (tactic {* Record.split_simp_tac @{context} [] (K ~1) 1*}, simp) + apply (rule ext, rule ext, simp add: set_eq_iff) + apply (rule_tac x = "lts2ndet y" in exI) + apply (simp add: lts2ndet_def) + done + + +(* Lemma missing: for deterministic ndet_atm's, det2ndet is even bijective, +i.e the definition above is indeed a characterization. *) + +text{* + The following well-formedness property is important: for every state, + there is a valid transition. Otherwise, some states may never be part of an + (infinite) trace. +*} + +definition is_enabled :: "['\ \ ('o, '\) MON\<^sub>S\<^sub>B, '\ ] \ bool" +where "is_enabled rel \ = (\ \. rel \ \ \ {})" + +definition is_enabled' :: "['\ \ ('o, '\) MON\<^sub>S\<^sub>E, '\ ] \ bool" +where "is_enabled' rel \ = (\ \. \ \ dom(rel \))" + + +definition live_wff:: "('\, 'o, '\) ndet_io_atm \ bool" +where "live_wff atm = (\ \. \ \. step atm \ \ \ {})" + +lemma life_wff_charn: "live_wff atm = (\ \. is_enabled (step atm) \)" + by(auto simp: live_wff_def is_enabled_def) + +text{* + There are essentially two approaches: either we disallow non-enabled transition + systems---via life\_wff\_charn---or we restrict our machinery for traces and prefixed closed + sets of runs over them +*} + +section{* Rich Traces and its Derivatives *} +text{* + The easiest way to define the concept of traces is on LTS. Via the injections described + above, we can define notions like deterministic automata rich trace, and i/o automata rich + trace. Moreover, we can easily project event traces or state traces from rich traces. *} + +type_synonym ('\, 'o, '\) trace = "nat \ ('\ \ ('\ \ 'o) \ '\)" +type_synonym ('\, 'o) etrace = "nat \ ('\ \ 'o)" (* event traces *) +type_synonym '\ \trace = "nat \ '\" +type_synonym '\ in_trace = "nat \ '\" +type_synonym 'o out_trace = "nat \ 'o" +type_synonym ('\, 'o, '\) run = "('\ \ ('\ \ 'o) \ '\) list" +type_synonym ('\, 'o) erun = "('\ \ 'o) list" +type_synonym '\ \run = "'\ list" +type_synonym '\ in_run = "'\ list" +type_synonym 'o out_run = "'o list" + +definition rtraces ::"('\, 'o, '\) ndet_io_atm \ ('\, 'o, '\) trace set" +where "rtraces atm = { t. fst(t 0) \ init atm \ + (\ n. fst(t (Suc n)) = snd(snd(t n))) \ + (\ n. if is_enabled (step atm) (fst(t n)) + then t n \ {(s,io,s'). (snd io,s') + \ step atm (fst io) s} + else t n = (fst(t n),undefined,fst(t n)))}" + +lemma init_rtraces[elim!]: "t \ rtraces atm \ fst(t 0) \ init atm" +by(auto simp: rtraces_def) + +lemma post_is_pre_state[elim!]: "t \ rtraces atm \ fst(t (Suc n)) = snd(snd(t n))" +by(auto simp: rtraces_def) + + +lemma enabled_transition[elim!]: +"\t \ rtraces atm; is_enabled (step atm) (fst(t n)) \ + \ t n \ {(s,io,s'). (snd io,s') \ step atm (fst io) s}" +apply(simp add: rtraces_def split_def, safe) +apply (erule allE)+ +apply (auto simp add: split: if_split_asm) +done + +lemma nonenabled_transition[elim!]: "\t \ rtraces atm; \ is_enabled (step atm) (fst(t n)) \ + \ t n = (fst(t n),undefined,fst(t n))" +by(simp add: rtraces_def split_def) + + +text{* + The latter definition solves the problem of inherently finite traces, i.e. those that reach + a state in which they are no longer enabled. They are represented by stuttering steps on + the same state. +*} + +definition fin_rtraces :: "('\, 'o, '\) ndet_io_atm \ ('\, 'o, '\) trace set" +where "fin_rtraces atm = { t . t \ rtraces atm \ + (\ n. \ is_enabled (step atm) (fst(t n)))}" + +lemma fin_rtraces_are_rtraces : "fin_rtraces atm \ rtraces atm" +by(auto simp: rtraces_def fin_rtraces_def) + + +definition \traces ::"('\, 'o, '\) ndet_io_atm \ '\ \trace set" +where "\traces atm = {t . \ rt \ rtraces atm. t = fst o rt }" + +definition etraces ::"('\, 'o, '\) ndet_io_atm \ ('\, 'o) etrace set" +where "etraces atm = {t . \ rt \ rtraces atm. t = fst o snd o rt }" + +definition in_trace :: "('\, 'o) etrace \ '\ in_trace" +where "in_trace rt = fst o rt" + +definition out_trace :: "('\, 'o) etrace \ 'o out_trace" +where "out_trace rt = snd o rt" + +definition prefixes :: "(nat \ '\) set \ '\ list set" +where "prefixes ts = {l. \ t \ ts. \ (n::int). l = map (t o nat) [0..n]}" + +definition rprefixes :: "['\ \ ('o, '\) MON\<^sub>S\<^sub>B, + ('\, 'o, '\) trace set] \ ('\, 'o, '\) run set" +where "rprefixes rel ts = {l. \ t \ ts. \ n. (is_enabled rel (fst(t (nat n))) \ + l = map (t o nat) [0..n])}" +definition eprefixes :: "['\ \ ('o, '\) MON\<^sub>S\<^sub>B, + ('\, 'o, '\) trace set] \ ('\, 'o) erun set" +where "eprefixes rel ts = (map (fst o snd)) ` (rprefixes rel ts)" + +definition \prefixes :: "['\ \ ('o, '\) MON\<^sub>S\<^sub>B, + ('\, 'o, '\) trace set] \ '\ \run set" +where "\prefixes rel ts = (map fst) ` (rprefixes rel ts)" + + + +section{* Extensions: Automata with Explicit Final States *} + + +text{* + We model a few widely used variants of automata as record extensions. In particular, we + define automata with final states and internal (output) actions. +*} + +record ('\, 'o, '\) det_io_atm' = "('\, 'o, '\) det_io_atm" + + final :: "'\ set" + +text{* + A natural well-formedness property to be required from this type of atm is as follows: whenever + an atm' is in a final state, the transition operation is undefined. +*} + +definition final_wff:: "('\, 'o, '\) det_io_atm' \ bool" +where "final_wff atm' = + (\\ \ final atm'. \\. \ \ dom (det_io_atm.step atm' \))" + +text{* + Another extension provides the concept of internal actions, which are considered as part of + the output alphabet here. If internal actions are also used for synchronization, further + extensions admitting internal input actions will be necessary, too, which we do not model here. +*} + +record ('\, 'o, '\) det_io_atm'' = "('\, 'o, '\) det_io_atm'" + + internal :: "'o set" + +text{* + A natural well-formedness property to be required from this type of atm is as follows: whenever + an atm' is in a final state, the transition operation is required to provide a state that is + again final and an output that is considered internal. +*} + +definition final_wff2:: "('\, 'o, '\) det_io_atm'' \ bool" +where "final_wff2 atm'' = (\\ \ final atm''. + \ \. \ \ dom (det_io_atm.step atm'' \) \ + (let (out, \') = the(det_io_atm.step atm'' \ \) + in out \ internal atm'' \ \' \ final atm''))" + +text{* + Of course, for this type of extended automata, it is also possible to impose the additional + requirement that the step function is total---undefined steps would then be represented as + steps leading to final states. + + The standard extensions on deterministic automata are also redefined for the non-deterministic + (specification) case. +*} + +record ('\, 'o, '\) ndet_io_atm' = "('\, 'o, '\) ndet_io_atm" + + final :: "'\ set" + +definition final_wff_ndet_io_atm2 :: "('\, 'o, '\) ndet_io_atm' \ bool" + where "final_wff_ndet_io_atm2 atm' = (\\ \ final atm'. \\. ndet_io_atm.step atm' \ \ = {})" + +record ('\, 'o, '\) ndet_io_atm'' = "('\, 'o, '\) ndet_io_atm'" + + internal :: "'o set" + +definition final_wff2_ndet_io_atm2 :: "('\, 'o, '\) ndet_io_atm'' \ bool" + where "final_wff2_ndet_io_atm2 atm'' = + (\\ \ final atm''. + \\. step atm'' \ \ \ {} \ step atm'' \ \ \ internal atm'' \ final atm'')" + +end diff --git a/src/test-gen/src/main/BackendUtils.thy b/src/test-gen/src/main/BackendUtils.thy new file mode 100644 index 0000000..6faee28 --- /dev/null +++ b/src/test-gen/src/main/BackendUtils.thy @@ -0,0 +1,93 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * TestGen.thy --- the core of the HOL-TestGen System. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2010 ETH Zurich, Switzerland + * 2008-2013 Achim D. Brucker, Germany + * 2009-2013 Universite Paris-Sud, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Utilities for the various backends *} + +theory BackendUtils +imports TestEnv + +begin + +ML{* + +structure BackendUtils = +struct + +fun certify_pairs ctxt l = map (fn (Var(x,t),y) => ((x, t), Thm.cterm_of ctxt y)) l +fun uncertify_pairs l = map (fn (x,(a,t)) => (Thm.term_of x, Var (a,t))) l + +fun solve_by_simp_tac ctxt = SOLVED' (full_simp_tac ctxt) +fun solve_by_simp_or_auto_tac ctxt = + let + val pon = Config.get ctxt TestEnv.pon + val solved' = if (pon = 0) then SOLVED' else (fn x => x) + val full_simp = full_simp_tac ctxt + val clarsimp = clarsimp_tac ctxt + (* val metis = Metis_Tactic.metis_tac [] ATP_Proof_Reconstruct.metis_default_lam_trans ctxt (TestEnv.get_smt_facts ctxt) *) + (* We can use SMT from the standard library, no need for the patch here *) + val smt = SMT_Solver.smt_tac ctxt (TestEnv.get_smt_facts ctxt) + (* val use_metis = Config.get ctxt TestEnv.use_metis *) + val use_smt = Config.get ctxt TestEnv.use_smt + val tactic = if use_smt then ( + fn y => (SOLVE (full_simp y)) + ORELSE (SOLVE (clarsimp y)) + ORELSE (smt y) + ) else ( + fn y => (SOLVE (full_simp y)) + ORELSE (SOLVE (clarsimp y)) + ORELSE (auto_tac ctxt) + ) + in solved' tactic + end + +fun premvars n thm = let + val prem = Logic.nth_prem(n, Thm.prop_of thm) +in + map Var (Term.add_vars prem []) +end + +end + +*} + +end diff --git a/src/test-gen/src/main/EFSM_Toolkit.thy b/src/test-gen/src/main/EFSM_Toolkit.thy new file mode 100644 index 0000000..67205c8 --- /dev/null +++ b/src/test-gen/src/main/EFSM_Toolkit.thy @@ -0,0 +1,167 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * TestRefinements.thy --- for sequence testing. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007, ETH Zurich, Switzerland + * 2009 B. Wolff, Univ. Paris-Sud, France + * 2009 Achim D. Brucker, Germany + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Test-Refinements (IOCO and Friends) *} + +theory EFSM_Toolkit imports Monads Automata +begin + + +definition impl :: "['\\'\\bool, '\ \ ('o,'\)MON\<^sub>S\<^sub>B] \ '\ \ ('o,'\)MON\<^sub>S\<^sub>E" +where "impl pre post \ = (\ \. if pre \ \ + then Some(SOME(out,\'). (out,\') \ post \ \) + else undefined)" + +definition strong_impl :: "['\\'\\bool, '\\('o,'\)MON\<^sub>S\<^sub>B] \ '\\('o, '\)MON\<^sub>S\<^sub>E" +where "strong_impl pre post \ = + (\ \. if pre \ \ + then Some(SOME(out,\'). (out,\') \ post \ \) + else None)" + +definition strong_impl' :: "['\\'\\bool, '\\('o,'\)MON\<^sub>S\<^sub>B] \ '\\('o, '\)MON\<^sub>S\<^sub>E" +where "strong_impl' pre post \ = + (\ \. if pre \ \ + then Some(SOME(out,\'). (out,\') \ post \ \) + else + (if post \ \ \ {} + then Some(SOME(out,\'). (out,\') \ post \ \) + else None))" + + +definition implementable :: "['\ \ '\ \ bool,'\ \ ('o,'\)MON\<^sub>S\<^sub>B] \ bool" +where "implementable pre post =(\ \ \. pre \ \ \(\ out \'. (out,\') \ post \ \ ))" + +definition is_strong_impl :: "['\ \ '\ \ bool, + '\ \ ('o,'\)MON\<^sub>S\<^sub>B, + '\ \ ('o, '\)MON\<^sub>S\<^sub>E] \ bool" +where "is_strong_impl pre post ioprog = + (\ \ \. (\pre \ \ \ ioprog \ \ = None) \ + (pre \ \ \ (\ x. ioprog \ \ = Some x)))" + +lemma is_strong_impl : + "is_strong_impl pre post (strong_impl pre post)" +by(simp add: is_strong_impl_def strong_impl_def) + + +text{* This following characterization of implementable +specifications has actually a quite complicated form due to the fact +that post expects its arguments in curried form - should +be improved \ldots *} +lemma implementable_charn: + "\implementable pre post; pre \ \ \ \ + (the(strong_impl pre post \ \)) \ post \ \" +apply(auto simp: implementable_def strong_impl_def) +apply(erule_tac x=\ in allE) +apply(erule_tac x=\ in allE) +apply(simp add: Eps_case_prod) +apply(rule someI_ex, auto) +done + + +locale efsm_det = + fixes pre :: "'\ \ '\ \ bool" + fixes post :: "'\ \ '\ \ ('o \ '\) set" + fixes efsm :: "'\ \ '\ \ ('o \ '\) option" + fixes in_ev:: "'\" + fixes out_ev:: "'\ \ 'o" + fixes upd :: "'\ \ '\" + fixes E :: "'\ \ bool" + assumes SPEC: "efsm \ (strong_impl pre post)" + assumes pre_red : "\ \. pre \ in_ev = E \" + assumes post_red : "\ \. pre \ in_ev \ (SOME(res,\'). (res,\') \ post in_ev \) = (out_ev \,upd \)" +begin + +lemma impl_def:"efsm in_ev = (\\. if E \ then Some(out_ev \, upd \) else None)" + by(rule ext, auto simp: SPEC strong_impl_def pre_red post_red) + +lemma exec_succes: + "(efsm in_ev \ = Some(b,\')) = (E \ \ (b=out_ev \) \ \'= upd \)" + by(auto simp: impl_def split:if_split_asm) + + +lemma exec_failure: + "(efsm in_ev \ = None) = (\ E \)" + by(auto simp: impl_def split:if_split_asm) + + + +lemma exec_mbindFSave_If[simp]: +"(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (in_ev # S) efsm; return (P s))) = + (if E \ + then ((upd \) \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S efsm; return (P (out_ev \ # s)))) + else (\ \ (return (P []))))" +by(auto simp: exec_mbindFSave impl_def) + +lemma exec_mbindFSave_E: +assumes A:"(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (in_ev # S) efsm; return (P s)))" + and B:"E \ \ ((upd \) \ (s\mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S efsm;return(P(out_ev \ # s)))) \ Q" + and C:"\ E \ \\ \ (return (P [])) \ Q" +shows "Q" +apply(insert A, subst (asm) exec_mbindFSave_If, case_tac "E \", simp_all only: if_True if_False) +by(auto elim: B C) + + + +lemma exec_mbindFStop[simp]: +"(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (in_ev#S) efsm; return (P s))) = + (E \ \ ((upd \) \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S efsm; return (P (out_ev \ # s)))))" +apply(rule iffI) +apply(erule Monads.exec_mbindFStop_E,simp add: exec_succes,auto) +apply(subst exec_bind_SE_success[OF exec_succes[THEN iffD2]], auto) +done + + +lemma exec_mbindFStop_E: +assumes A:"(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (in_ev # S) efsm; return (P s)))" + and B:"E \ \ ((upd \) \ (s\mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S efsm;return(P(out_ev \ # s)))) \ Q" +shows "Q" +by(insert A, rule B, simp_all del: mbind'_bind) + +(* +lemmas impl_1'' = exec_failure +lemmas impl_1' = exec_succes +*) + +end + +end diff --git a/src/test-gen/src/main/IOCO.thy b/src/test-gen/src/main/IOCO.thy new file mode 100644 index 0000000..cf2e5d0 --- /dev/null +++ b/src/test-gen/src/main/IOCO.thy @@ -0,0 +1,126 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * IOCO --- formalizing the IOCO theory + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007 ETH Zurich, Switzerland + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: IOCO.thy 8455 2009-04-08 07:58:38Z wolff $ *) + +chapter {* Basic Testing Setup *} + +theory IOCO imports Main begin + +section{* A Bit of IOCO Theory *} +text{* See Jan Tretmanns, Axel Belinfante: Automatic Testing with Formal + Methods. We follow more or less the notation here, but are more + detailed wrt. concepts such as ``initial states'' which are part of + the concept of a transition system. *} + + +text{* Transition systems and IO-Transition Systems *} + +record ('\, '\) TS = + init :: "'\ set" + trans :: "('\\'\\'\) set" + +type_synonym ('\,'o,'\) io_lts = "('\ + 'o,'\) TS" + + + +inductive_set mult :: "('\, '\) TS \ ('\ \ '\ list \ '\) set" +for TS :: "('\, '\) TS" +where refl: "(s,[],s) \ mult(TS)" + | step: "\ (s,a,s') \ (trans TS); (s',R,s'') \ mult(TS)\ \ (s,a#R,s'') \ mult(TS)" + +definition Straces :: "('\,'\) TS \ '\ list set" where + "Straces TS \ {l. \ \\<^sub>0 \(init TS). \ s'. (\\<^sub>0,l,s') \ mult(TS)}" +definition after :: "[('\,'\) TS, '\ list] \ '\ set" (infixl "after" 100) where + "TS after l \ {s' . \ \\<^sub>0 \(init TS). (\\<^sub>0,l,s') \ mult(TS)}" + (* again, we make the set of initial states explicit here *) + +definition out :: "[('\,'o ,'\) io_lts,'\ set] \ ('o ) set" where + "out TS ss \ {a. \ s \ ss. \ s'. (s,Inr a,s') \ (trans TS)}" + +definition ioco :: "[('\,'o ,'\)io_lts,('\,'o ,'\)io_lts] \ bool" (infixl "ioco" 200) where + "i ioco s \ (\ t \ Straces(s). out i (i after t) \ out s (s after t))" + + + +(* The following notation is based on a concrete ts. *) +consts ts :: "('\, '\) TS" +(* underspecified *) + + +syntax "_ts" :: "['\,'\,'\] \ bool" ("_ --<_>--> _" [0,0,60] 60) + +syntax (xsymbols) + "_tc" :: "['\,'\,'\] \ bool" ("_ --<_>\ _" [0,0,60] 60) + +syntax "_tsm" :: "['\,'\,'\] \ bool" ("_ ==<_>==> _" [0,0,60] 60) + +syntax (xsymbols) + "_tc," :: "['\,'\,'\] \ bool" ("_ =<_>\ _" [0,0,60] 60) + +translations "s ----> s'" == "(s,c,s') \ CONST ts" + +text{* Purpose: Prove under which conditions Mfold-Test is equivalent + to ioco, i.e. under which conditions do we actually test ioco. + I foresee two problems: + \begin{enumerate} + \item \textbf{Quiescense} IOCO theory assumes in the notion of + output elements "quit actions" $\delta$ which were treated + as "non-observable" conceptually. Actually, in our testing approach, + we will assume that termination of a program under test is + observable, so the test harness will always at least deliver + "None" (representing $\delta$). + \item \textbf{Deep Nondeterminism}. IOCO theory assumes the possibilty + of a branching of the LTS whose concequences can be observed + in terms of output actions much later; i.e. there are transitions + such as $(s,a,s') \isasymin (snd TS)$ and $(s,a,s'') \isasymin (snd TS)$ + with $s' \isasymnoteq s''$. + \item \textbf{IO Nondeterminism}. A system under test should always + accept one (possibly trivial) input and produce an output; there + should be no possibility for it to decide non-deterministically + to produce input or output. + \end{enumerate} + + \textbf{Conjecture}: Our Mfold-Testing corresponds exactly to IOCO testing + if the underlying transition systems are deterministic and + quiescence is observable. +*} + + +end diff --git a/src/test-gen/src/main/Interleaving.thy b/src/test-gen/src/main/Interleaving.thy new file mode 100644 index 0000000..f63c0ea --- /dev/null +++ b/src/test-gen/src/main/Interleaving.thy @@ -0,0 +1,244 @@ +theory Interleaving +imports Main +begin + +(* +fun interleave :: "'a list => 'a list => 'a list set" +where "interleave [] [] = {[]}" + |"interleave (A) [] = {A}" + |"interleave [] B = {B}" + |"interleave (a#A)(b#B) = (let S = interleave A (b#B); + S' = interleave (a#A) (B) + in (\ x. a # x) ` S \ (\ x. b # x) ` S')" +*) + +fun interleave :: "'a list => 'a list => 'a list set" +where "interleave [] [] = {[]}" + | "interleave (A) [] = {A}" + | "interleave [] B = {B}" + |"interleave (a # A) (b # B) = + (\x. a # x) ` interleave A (b # B) \ + (\x. b # x) ` interleave (a # A) B" + +find_theorems interleave + +value"interleave [1::int,2,3] [11,12]" + + +fun interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t :: "'a list => 'a list => 'a list list" +where "interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t [] [] = [[]]" + |"interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t (A) [] = [A]" + |"interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t [] B = [B]" + |"interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t (a#A)(b#B) = (let S = interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t A (b#B); + S' = interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t (a#A) (B) + in map (\ x. a # x) S @ map (\ x. b # x) S')" + + +value "interleave' [1::int,2,1,2] [2,1]" +value "interleave [1::int,2] [1]" + +(* Junk --- should be associativ application of interleave *) +fun interleave2 :: "'a list => 'a list => 'a list => 'a list set" +where "interleave2 [] [] [] = {[]}" + |"interleave2 (A) [] [] = {A}" + |"interleave2 [] B [] = {B}" + |"interleave2 [] [] C = {C}" + |"interleave2 (A)(B) [] = interleave A B" + |"interleave2 (A)[] (C) = interleave A C" + |"interleave2 [] B (C) = interleave B C" + |"interleave2 (a#A)(b#B) (c#C) = (let S = interleave2 A (b#B) (c#C) ; + S' = interleave2 (a#A) (B) (c#C) ; + S'' = interleave2 (a#A) (b#B) (C) + in (\ x. a # x) ` ( S) \ (\ x. b # x) ` S' \ (\ x. c # x) ` S'')" + +lemma replace : "interleave2 A B C = ((\x\interleave A B. interleave X C))" +oops + +(* +fun interleave1 :: "'a list => 'a list => 'a list => 'a list => 'a list set" +where "interleave1 [] [] [] [] = {[]}" + |"interleave1 (A) [] [] [] = {A}" + |"interleave1 [] B [] []= {B}" + |"interleave1 [] [] C []= {C}" + |"interleave1 [] [] [] D= {D}" + |"interleave1 (A)(B) (C) []= interleave2 A B C" + |"interleave1 (A)(B) [] D = interleave2 A B D" + |"interleave1 (A)[] (C) D= interleave2 A C D" + |"interleave1 [] B (C) D= interleave2 B C D" + |"interleave1 (a#A)(b#B) (c#C) (d#D) = (let S = interleave1 A (b#B) (c#C) (d#D); + S' = interleave1 (a#A) (B) (c#C) (d#D); + S'' = interleave1 (a#A) (b#B) (C) (d#D); + S'''= interleave1 (a#A) (b#B) (c#C) (D) + in (\ x. a # x) ` S \ (\ x. b # x) ` S' \ (\ x. c # x) ` S'' \ (\ x. d # x) ` S''')" + +value "int(card(interleave1 [] [] [2::int,1,2,1] [1,2]))" +*) +fun bus_interleave :: " ('a::order) list \ 'a list \ 'a list set" +where "bus_interleave [] [] = {[]}" + |"bus_interleave (A) [] = {A}" + |"bus_interleave [] B = {B}" + |"bus_interleave (a#A)(b#B) = + (if a \ b then (\ x. a # x) ` bus_interleave A (b#B) + else if b \ a then (\ x. b # x) ` bus_interleave (a#A) (B) + else (let S = bus_interleave A (b#B); + S' = bus_interleave (a#A) (B) + in (\ x. a # x) ` S \ (\ x. b # x) ` S'))" + +fun sync_interleave :: "('a \ bool) \ ('a \ 'a \ bool) \ ('a \ bool) \'a list \ 'a list \ 'a list set" +where "sync_interleave L S R [] [] = {[]}" + |"sync_interleave L S R (A) [] = {A}" + |"sync_interleave L S R [] B = {B}" + |"sync_interleave L S R (a#A)(b#B) = + (if L a \ \ L b then sync_interleave L S R (A) (b#B) + else if \ L a \ L b then sync_interleave L S R (a#A) (B) + else + if L a \ L b \ S a b then (\ x. a # x) ` sync_interleave L S R (A) (B) + else (let IL = sync_interleave L S R A (b#B); + IL' = sync_interleave L S R (a#A) (B) + in (\ x. a # x) ` IL \ (\ x. b # x) ` IL'))" + +fun sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t :: "'a \'a list \ 'a list \ 'a list list" +where "sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n [] [] = [[]]" + |"sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n (A) [] = [A]" + |"sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n [] B = [B]" + |"sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n (a#A)(b#B) = + (if a = n \ b \ n then sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n (A) (b#B) + else if a \ n \ b = n then sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n (a#A) (B) + else + if a = n \ b= n then map (\ x. n # x) (sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n (A) (B) ) + else (let S = sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n A (b#B); + S' = sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n (a#A) (B) + in map (\ x. a # x) S @ map (\ x. b # x) S'))" + +value "((sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t 2 [1::int,3,2 ] [1,2]))" + + + +value "sync_interleave' 2 [1::int,3,2 ] [1,2]" + +fun sync_interleave1 :: "'a list \'a list \ 'a list \ 'a list set" +where + "sync_interleave1 [] [] [] = {[]}" + |"sync_interleave1 n [] [] = {[]}" + |"sync_interleave1 [] A [] = {A}" + |"sync_interleave1 [] [] B = {B}" + |"sync_interleave1 n (A) [] = {A}" + |"sync_interleave1 n [] B = {B}" + |"sync_interleave1 [] A B = interleave A B" + |"sync_interleave1 (n#N) (a#A)(b#B) = + (if a = n \ b \ n then sync_interleave1 (n#N) (A) (b#B) + else if a \ n \ b = n then sync_interleave1 (n#N) (a#A) (B) + else + if a = n \ b= n then (\ x. n # x) ` sync_interleave1 (n#N) (A) (B) + else + (let S = sync_interleave1 (n#N) A (b#B); + S' = sync_interleave1 (n#N) (a#A) (B) + in (\ x. a # x) ` S \ (\ x. b # x) ` S'))" + +value "((sync_interleave1 [1,3] [1::int,3,2 ] [1,2]))" +value "((sync_interleave1 [3,1] [1::int,3,2 ] [1,2]))" +value "(sync_interleave1 [3,1] [1::int,2 ] [1,3,2])" + +find_theorems " interleave" +value "int (card (interleave [1::int,2,3,4 ] [8,9,10,11]))" + +term" \ X" + + +lemma "x \ set A \ x \ \(set ` interleave A B)" +proof (induct A B rule: interleave.induct) + case 1 + then show ?case by simp +next + case (2 a A) + then show ?case by simp +next + case (3 b B) + then show ?case by simp +next + case (4 a A b B) + from 4 have "x \ \(set ` interleave (a # A) B)" by simp + then show ?case by auto +qed + + +lemma commute1[simp]: "interleave A [] = interleave [] A" by(case_tac A, simp_all) + + +lemma interleave_commute'[rule_format]: "\B. interleave A B = interleave B A" +proof(induct A) + show "\B. interleave [] B = interleave B []" by(simp) +next + show "\a A. \B. interleave A B = interleave B A \ \B. interleave (a # A) B = interleave B (a # A)" + apply(rule allI,induct_tac B, simp_all) + apply(subst Set.Un_commute) + apply (elim allE) + apply (erule ssubst)+ + apply (simp only: triv_forall_equality) + done +qed + + +lemma interleave1 [simp] : "(x \ interleave [] A) = (x = A)" +by(induct A, simp_all only: interleave.simps, simp_all) + + +lemma interleave2 [simp] : "(x \ interleave A []) = (x = A)" +by(induct A, simp_all only: interleave.simps, simp_all) + + + +lemma length_interleave_lemma: + "\n < k . length A + length B \ n + \ (x::'a list) \ (interleave A B) + \ length x = (length A + length B)" +proof(induct k arbitrary: A B x) + case 0 fix A::"'a list" and B::"'a list" and x::"'a list" + have * : "\A B. length A + length B \ 0 \ length A = 0" by auto + have ** : "\A B. length A + length B \ 0 \ length B = 0" by auto + show "\n<0. length A + length B \ n + \ x \ interleave A B + \ length x = length A + length B" + by auto + case (Suc k) fix A::"'a list" and B::"'a list" and x::"'a list" + assume H: "\(A::'a list) B x. \n n + \ x \ interleave A B + \ length x = length A + length B" + have H': "\(A::'a list) B x n. x \ interleave A B \ n length A + length B \ n + \ length x = length A + length B" + by(erule H[THEN spec, THEN mp, THEN mp, THEN mp], auto) + + show "\n n + \ x \ interleave A B + \ length x = length A + length B" + apply(case_tac "A", simp_all) + apply(case_tac "B", simp_all) + apply(auto) + apply(case_tac "n", simp_all) + apply(case_tac "list",auto) + apply(rule trans, erule H', auto) + apply(rule trans, erule H', auto) + apply(case_tac "n", simp_all) + apply(case_tac "list",auto) + apply(rule trans, erule H', auto) + apply(rule trans, erule H', auto) + done +qed + + +lemma length_interleave: "x \ (interleave A B) \ length x = (length A + length B)" +apply(rule length_interleave_lemma[rule_format, of "length A + length B" "length A + length B + 1"]) +by auto + + +lemma cardinality: "card(interleave A B) = ( length A * length B + 1)" (* really ? *) +oops + +lemma causality : + "\ k i. i < k \ (\ C \ (interleave A B). (\ l m. l < m \ A!i = C!l \ A!k = C!m))" (* really ? *) +oops + + + +end + diff --git a/src/test-gen/src/main/Monads.thy b/src/test-gen/src/main/Monads.thy new file mode 100644 index 0000000..b03d619 --- /dev/null +++ b/src/test-gen/src/main/Monads.thy @@ -0,0 +1,1256 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * Monads.thy --- a base testing theory for sequential computations. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007 ETH Zurich, Switzerland + * 2009-2017 Univ. Paris-Sud, France + * 2009-2012 Achim D. Brucker, Germany + * 2015-2017 University Sheffield, UK + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Basic Monad Theory for Sequential Computations *} + +theory Monads imports Main +begin + +section{* A Generic (Approximative) Notion of Monad *} + +text {* + A (fake) monad is a structure with a return and a bind satisfying three laws. + Since we cannot use generic type constructors in HOL, + we formulate monads using only one type 'a and its monadic representation 'am. +*} + +locale Monad = +fixes returm :: "'a \ 'am" + and bind :: "'am \ ('a \ 'am) \ 'am" (infixr ">>=" 70) +assumes left_unit : "(returm x) >>= f = f x" + and right_unit: "m >>= returm = m" + and assoc : "(m >>= f) >>= g = m >>= (\ x . f x >>= g)" +begin + +definition pipe :: "'am \ 'am \ 'am" +where "pipe f g \ bind f (\ _. g)" + +lemma pipe_left_unit : "pipe (returm x) f = f" +by(simp add: left_unit pipe_def) + + +lemma pipe_right_unit : "pipe f (returm x) = f" +oops (* Not possible to formulate in this Monad Framework generically ! ! ! + In the concrete SE-Monad, we have the (interesting) special case: + "(m;- (return ())) = m" *) + +lemma pipe_assoc : "pipe f (pipe g h) = pipe (pipe f g) h" +by(simp add: assoc pipe_def) + +primrec Mrun :: "('b \ 'a \ 'am) \ 'b list \ 'a \ 'am" +where "Mrun m [] = returm" + | "Mrun m (b#bs) = (\ a . Mrun m bs a >>= m b)" + +end + +section{*General Framework for Monad-based Sequence-Test *} +text{* As such, Higher-order Logic as a purely functional specification + formalism has no built-in mechanism for state and state-transitions. + Forms of testing involving state require therefore explicit mechanisms + for their treatment inside the logic; a well-known technique to model + states inside purely functional languages are \emph{monads} made popular + by Wadler and Moggi and extensively used in Haskell. \HOL is powerful + enough to represent the most important \emph{instances} of standard monads; + however, it is not possible to represent monads as such due to well-known + limitations of the Hindley-Milner type-system (no type-constructor-classes). *} + +text{* Here is a variant for state-exception monads, that models precisely + transition functions with preconditions. Next, we declare the + state-backtrack-monad. In all of them, our concept of i/o stepping + functions can be formulated; these are functions mapping input + to a given monad. Later on, we will build the usual concepts of: + \begin{enumerate} + \item deterministic i/o automata, + \item non-deterministic i/o automata, and + \item labelled transition systems (LTS) + \end{enumerate} +*} + +subsection{* Definition : Standard State Exception Monads *} + +subsubsection{* Definition : Core Types and Operators *} + +type_synonym ('o, '\) MON\<^sub>S\<^sub>E = "'\ \ ('o \ '\)" (* = '\ \ ('o \ '\)option *) + +definition bind_SE :: "('o,'\)MON\<^sub>S\<^sub>E \ ('o \ ('o','\)MON\<^sub>S\<^sub>E) \ ('o','\)MON\<^sub>S\<^sub>E" +where "bind_SE f g = (\\. case f \ of None \ None + | Some (out, \') \ g out \')" + +notation bind_SE ("bind\<^sub>S\<^sub>E") + +syntax (xsymbols) + "_bind_SE" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>E,('o','\)MON\<^sub>S\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>E" + ("(2 _ \ _; _)" [5,8,8]8) +translations + "x \ f; g" == "CONST bind_SE f (% x . g)" + +definition unit_SE :: "'o \ ('o, '\)MON\<^sub>S\<^sub>E" ("(return _)" 8) +where "unit_SE e = (\\. Some(e,\))" +notation unit_SE ("unit\<^sub>S\<^sub>E") + +text{* In the following, we prove the required Monad-laws *} + +lemma bind_right_unit[simp]: "(x \ m; return x) = m" + apply (simp add: unit_SE_def bind_SE_def) + apply (rule ext) + apply (case_tac "m \", simp_all) + done + +lemma bind_left_unit [simp]: "(x \ return c; P x) = P c" + by (simp add: unit_SE_def bind_SE_def) + +lemma bind_assoc[simp]: "(y \ (x \ m; k x); h y) = (x \ m; (y \ k x; h y))" + apply (simp add: unit_SE_def bind_SE_def, rule ext) + apply (case_tac "m \", simp_all) + apply (case_tac "a", simp_all) + done + +interpretation SE : Monad unit_SE bind_SE + by unfold_locales (simp_all) + + +subsubsection{* Definition : More Operators and their Properties *} + +definition fail_SE :: "('o, '\)MON\<^sub>S\<^sub>E" +where "fail_SE = (\\. None)" +notation fail_SE ("fail\<^sub>S\<^sub>E") + +definition assert_SE :: "('\ \ bool) \ (bool, '\)MON\<^sub>S\<^sub>E" +where "assert_SE P = (\\. if P \ then Some(True,\) else None)" +notation assert_SE ("assert\<^sub>S\<^sub>E") + +definition assume_SE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>E" +where "assume_SE P = (\\. if \\ . P \ then Some((), SOME \ . P \) else None)" +notation assume_SE ("assume\<^sub>S\<^sub>E") + + +lemma bind_left_fail_SE[simp] : "(x \ fail\<^sub>S\<^sub>E; P x) = fail\<^sub>S\<^sub>E" + by (simp add: fail_SE_def bind_SE_def) + + +text{* We also provide a "Pipe-free" - variant of the bind operator. +Just a "standard" programming sequential operator without output frills. *} +(* TODO: Eliminate/Modify this. Is a consequence of the Monad-Instantiation. *) + + +definition bind_SE' :: "[('\, '\)MON\<^sub>S\<^sub>E, ('\, '\)MON\<^sub>S\<^sub>E] \ ('\, '\)MON\<^sub>S\<^sub>E" (infixr ";-" 60) +where "f ;- g = (_ \ f ; g)" + +lemma bind_assoc'[simp]: "((m;- k);- h ) = (m;- (k;- h))" +by(simp add:bind_SE'_def) + + +lemma bind_left_unit' [simp]: "((return c);- P) = P" + by (simp add: bind_SE'_def) + + +lemma bind_left_fail_SE'[simp]: "(fail\<^sub>S\<^sub>E;- P) = fail\<^sub>S\<^sub>E" + by (simp add: bind_SE'_def) + +lemma bind_right_unit'[simp]: "(m;- (return ())) = m" + by (simp add: bind_SE'_def) + +text{* The bind-operator in the state-exception monad yields already + a semantics for the concept of an input sequence on the meta-level: *} +lemma syntax_test: "(o1 \ f1 ; o2 \ f2; return (post o1 o2)) = X" +oops + +definition if_SE :: "['\ \ bool, ('\, '\)MON\<^sub>S\<^sub>E, ('\, '\)MON\<^sub>S\<^sub>E] \ ('\, '\)MON\<^sub>S\<^sub>E" +where "if_SE c E F = (\\. if c \ then E \ else F \)" + +syntax (xsymbols) + "_if_SE" :: "['\ \ bool,('o,'\)MON\<^sub>S\<^sub>E,('o','\)MON\<^sub>S\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>E" + ("(if\<^sub>S\<^sub>E _ then _ else _fi)" [5,8,8]8) +translations + "(if\<^sub>S\<^sub>E cond then T1 else T2 fi)" == "CONST if_SE cond T1 T2" + + +subsubsection{* Theory of a Monadic While *} + +text{* Prerequisites *} +fun replicator :: "[('a, '\)MON\<^sub>S\<^sub>E, nat] \ (unit, '\)MON\<^sub>S\<^sub>E" (infixr "^^^" 60) +where "f ^^^ 0 = (return ())" + | "f ^^^ (Suc n) = (f ;- f ^^^ n)" + + +fun replicator2 :: "[('a, '\)MON\<^sub>S\<^sub>E, nat, ('b, '\)MON\<^sub>S\<^sub>E] \ ('b, '\)MON\<^sub>S\<^sub>E" (infixr "^:^" 60) +where "(f ^:^ 0) M = (M )" + | "(f ^:^ (Suc n)) M = (f ;- ((f ^:^ n) M))" + + +text{* First Step : Establishing an embedding between partial functions and relations *} +(* plongement *) +definition Mon2Rel :: "(unit, '\)MON\<^sub>S\<^sub>E \ ('\ \ '\) set" +where "Mon2Rel f = {(x, y). (f x = Some((), y))}" +(* ressortir *) +definition Rel2Mon :: " ('\ \ '\) set \ (unit, '\)MON\<^sub>S\<^sub>E " +where "Rel2Mon S = (\ \. if \\'. (\, \') \ S then Some((), SOME \'. (\, \') \ S) else None)" + +lemma Mon2Rel_Rel2Mon_id: assumes det:"single_valued R" shows "(Mon2Rel \ Rel2Mon) R = R" +apply (simp add: comp_def Mon2Rel_def Rel2Mon_def,auto) +apply (case_tac "\\'. (a, \') \ R", auto) +apply (subst (2) some_eq_ex) +using det[simplified single_valued_def] by auto + + +lemma Rel2Mon_Id: "(Rel2Mon \ Mon2Rel) x = x" +apply (rule ext) +apply (auto simp: comp_def Mon2Rel_def Rel2Mon_def) +apply (erule contrapos_pp, drule HOL.not_sym, simp) +done + +lemma single_valued_Mon2Rel: "single_valued (Mon2Rel B)" +by (auto simp: single_valued_def Mon2Rel_def) + +text{* Second Step : Proving an induction principle allowing to establish that lfp remains + deterministic *} + + +(* A little complete partial order theory due to Tobias Nipkow *) +definition chain :: "(nat \ 'a set) \ bool" +where "chain S = (\i. S i \ S(Suc i))" + +lemma chain_total: "chain S ==> S i \ S j \ S j \ S i" +by (metis chain_def le_cases lift_Suc_mono_le) + +definition cont :: "('a set => 'b set) => bool" +where "cont f = (\S. chain S \ f(UN n. S n) = (UN n. f(S n)))" + +lemma mono_if_cont: fixes f :: "'a set \ 'b set" + assumes "cont f" shows "mono f" +proof + fix a b :: "'a set" assume "a \ b" + let ?S = "\n::nat. if n=0 then a else b" + have "chain ?S" using `a \ b` by(auto simp: chain_def) + hence "f(UN n. ?S n) = (UN n. f(?S n))" + using assms by(simp add: cont_def) + moreover have "(UN n. ?S n) = b" using `a \ b` by (auto split: if_splits) + moreover have "(UN n. f(?S n)) = f a \ f b" by (auto split: if_splits) + ultimately show "f a \ f b" by (metis Un_upper1) +qed + +lemma chain_iterates: fixes f :: "'a set \ 'a set" + assumes "mono f" shows "chain(\n. (f^^n) {})" +proof- + { fix n have "(f ^^ n) {} \ (f ^^ Suc n) {}" using assms + by(induction n) (auto simp: mono_def) } + thus ?thesis by(auto simp: chain_def) +qed + +theorem lfp_if_cont: + assumes "cont f" shows "lfp f = (\n. (f ^^ n) {})" (is "_ = ?U") +proof + show "lfp f \ ?U" + proof (rule lfp_lowerbound) + have "f ?U = (UN n. (f^^Suc n){})" + using chain_iterates[OF mono_if_cont[OF assms]] assms + by(simp add: cont_def) + also have "\ = (f^^0){} \ \" by simp + also have "\ = ?U" + apply(auto simp del: funpow.simps) + by (metis empty_iff funpow_0 old.nat.exhaust) + finally show "f ?U \ ?U" by simp + qed +next + { fix n p assume "f p \ p" + have "(f^^n){} \ p" + proof(induction n) + case 0 show ?case by simp + next + case Suc + from monoD[OF mono_if_cont[OF assms] Suc] `f p \ p` + show ?case by simp + qed + } + thus "?U \ lfp f" by(auto simp: lfp_def) +qed + + +lemma single_valued_UN_chain: + assumes "chain S" "(!!n. single_valued (S n))" + shows "single_valued(UN n. S n)" +proof(auto simp: single_valued_def) + fix m n x y z assume "(x, y) \ S m" "(x, z) \ S n" + with chain_total[OF assms(1), of m n] assms(2) + show "y = z" by (auto simp: single_valued_def) +qed + +lemma single_valued_lfp: +fixes f :: "('a \ 'a) set \ ('a \ 'a) set" +assumes "cont f" "\r. single_valued r \ single_valued (f r)" +shows "single_valued(lfp f)" +unfolding lfp_if_cont[OF assms(1)] +proof(rule single_valued_UN_chain[OF chain_iterates[OF mono_if_cont[OF assms(1)]]]) + fix n show "single_valued ((f ^^ n) {})" + by(induction n)(auto simp: assms(2)) +qed + + +text{* Third Step: Definition of the Monadic While *} +definition \ :: "['\ \ bool,('\ \ '\) set] \ (('\ \ '\) set \ ('\ \ '\) set)" +where "\ b cd = (\cw. {(s,t). if b s then (s, t) \ cd O cw else s = t})" + + +definition while_SE :: "['\ \ bool, (unit, '\)MON\<^sub>S\<^sub>E] \ (unit, '\)MON\<^sub>S\<^sub>E" +where "while_SE c B \ (Rel2Mon(lfp(\ c (Mon2Rel B))))" + +syntax (xsymbols) + "_while_SE" :: "['\ \ bool, (unit, '\)MON\<^sub>S\<^sub>E] \ (unit, '\)MON\<^sub>S\<^sub>E" + ("(while\<^sub>S\<^sub>E _ do _ od)" [8,8]8) +translations + "while\<^sub>S\<^sub>E c do b od" == "CONST while_SE c b" + +lemma cont_\: "cont (\ c b)" +by (auto simp: cont_def \_def) + +text{* The fixpoint theory now allows us to establish that the lfp constructed over + @{term Mon2Rel} remains deterministic *} + +theorem single_valued_lfp_Mon2Rel: "single_valued (lfp(\ c (Mon2Rel B)))" +apply(rule single_valued_lfp, simp_all add: cont_\) +apply(auto simp: \_def single_valued_def) +apply(metis single_valued_Mon2Rel[of "B"] single_valued_def) +done + + +lemma Rel2Mon_if: + "Rel2Mon {(s, t). if b s then (s, t) \ Mon2Rel c O lfp (\ b (Mon2Rel c)) else s = t} \ = + (if b \ then Rel2Mon (Mon2Rel c O lfp (\ b (Mon2Rel c))) \ else Some ((), \))" +by (simp add: Rel2Mon_def) + +lemma Rel2Mon_homomorphism: + assumes determ_X: "single_valued X" and determ_Y: "single_valued Y" + shows "Rel2Mon (X O Y) = (Rel2Mon X) ;- (Rel2Mon Y)" +proof - + have relational_partial_next_in_O: "\x E F. (\y. (x, y) \ (E O F)) \ (\y. (x, y) \ E)" + by (auto) + have some_eq_intro: "\ X x y . single_valued X \ (x, y) \ X \ (SOME y. (x, y) \ X) = y" + by (auto simp: single_valued_def) + + show ?thesis +apply (simp add: Rel2Mon_def bind_SE'_def bind_SE_def) +apply (rule ext, rename_tac "\") +apply (case_tac " \ \'. (\, \') \ X O Y") +apply (simp only: HOL.if_True) +apply (frule relational_partial_next_in_O) +apply (auto simp: single_valued_relcomp some_eq_intro determ_X determ_Y relcomp.relcompI) +by blast +qed + + + +text{* Putting everything together, the theory of embedding and the invariance of + determinism of the while-body, gives us the usual unfold-theorem: *} +theorem while_SE_unfold: +"(while\<^sub>S\<^sub>E b do c od) = (if\<^sub>S\<^sub>E b then (c ;- (while\<^sub>S\<^sub>E b do c od)) else return () fi)" +apply (simp add: if_SE_def bind_SE'_def while_SE_def unit_SE_def) +apply (subst lfp_unfold [OF mono_if_cont, OF cont_\]) +apply (rule ext) +apply (subst \_def) +apply (auto simp: Rel2Mon_if Rel2Mon_homomorphism bind_SE'_def Rel2Mon_Id [simplified comp_def] + single_valued_Mon2Rel single_valued_lfp_Mon2Rel ) +done + + +subsection{* Chaining Executions : Definitions of Multi-bind Operators *} + +text{* In order to express execution sequences inside \HOL --- rather +than arguing over a certain pattern of terms on the meta-level --- and +in order to make our theory amenable to formal reasoning over execution sequences, +we represent them as lists of input and generalize the bind-operator +of the state-exception monad accordingly. The approach is straightforward, +but comes with a price: we have to encapsulate all input and output data +into one type, and restrict ourselves to a uniform step function. +Assume that we have a typed interface to a module with +the operations $op_1$, $op_2$, \ldots, $op_n$ with the inputs +$\iota_1$, $\iota_2$, \ldots, $\iota_n$ (outputs are treated analogously). +Then we can encode for this interface the general input - type: +\begin{displaymath} +\texttt{datatype}\ \texttt{in}\ =\ op_1\ ::\ \iota_1\ |\ ...\ |\ \iota_n +\end{displaymath} +Obviously, we loose some type-safety in this approach; we have to express +that in traces only \emph{corresponding} input and output belonging to the +same operation will occur; this form of side-conditions have to be expressed +inside \HOL. From the user perspective, this will not make much difference, +since junk-data resulting from too weak typing can be ruled out by adopted +front-ends. +*} + +text{* Note that the subsequent notion of a test-sequence allows the io stepping +function (and the special case of a program under test) to stop execution +\emph{within} the sequence; such premature terminations are characterized by an +output list which is shorter than the input list. + +Intuitively, @{text mbind} corresponds to a sequence of operation calls, separated by +";", in Java. The operation calls may fail (raising an exception), which means that +the state is maintained and the exception can still be caught at the end of the +execution sequence. + +*} + +fun mbind :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" +where "mbind [] iostep \ = Some([], \)" + | "mbind (a#S) iostep \ = + (case iostep a \ of + None \ Some([], \) + | Some (out, \') \ (case mbind S iostep \' of + None \ Some([out],\') + | Some(outs,\'') \ Some(out#outs,\'')))" + +notation mbind ("mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e") (* future name: mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e *) + +text{* This definition is fail-safe; in case of an exception, the current state is maintained, + the computation as a whole is marked as success. + Compare to the fail-strict variant @{text "mbind'"}: *} + +lemma mbind_unit [simp]: + "mbind [] f = (return [])" + by(rule ext, simp add: unit_SE_def) + +text{* The characteristic property of @{term mbind} --- which distinguishes it from + @{text mbind} defined in the sequel --- is that it never fails; it ``swallows'' internal + errors occuring during the computation. *} +lemma mbind_nofailure [simp]: + "mbind S f \ \ None" + apply(rule_tac x=\ in spec) + apply(induct S, auto simp:unit_SE_def) + apply(case_tac "f a x", auto) + apply(erule_tac x="b" in allE) + apply(erule exE, erule exE, simp) + done + +text{* In contrast, we define a fail-strict sequential execution operator. +He has more the characteristic to fail globally whenever one of its operation +steps fails. + +Intuitively speaking, @{text mbind'} corresponds to an execution of operations +where a results in a System-Halt. Another interpretation of @{text mbind'} is to +view it as a kind of @{term foldl} foldl over lists via @{term bind\<^sub>S\<^sub>E}.*} + +fun mbind' :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" +where "mbind' [] iostep \ = Some([], \)" | + "mbind' (a#S) iostep \ = + (case iostep a \ of + None \ None + | Some (out, \') \ (case mbind' S iostep \' of + None \ None (* fail-strict *) + | Some(outs,\'') \ Some(out#outs,\'')))" +notation mbind' ("mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p") (* future name: mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p *) + +lemma mbind'_unit [simp]: + "mbind' [] f = (return [])" + by(rule ext, simp add: unit_SE_def) + +lemma mbind'_bind [simp]: + "(x \ mbind' (a#S) F; M x) = (a \ (F a); (x \ mbind' S F; M (a # x)))" + by(rule ext, rename_tac "z",simp add: bind_SE_def split: option.split) + +declare mbind'.simps[simp del] (* use only more abstract definitions *) + +text{* The next @{text mbind} sequential execution operator is called +Fail-Purge. He has more the characteristic to never fail, just "stuttering" +above operation steps that fail. Another alternative in modeling. *} + +fun mbind'' :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" +where "mbind'' [] iostep \ = Some([], \)" | + "mbind'' (a#S) iostep \ = + (case iostep a \ of + None \ mbind'' S iostep \ + | Some (out, \') \ (case mbind'' S iostep \' of + None \ None (* does not occur *) + | Some(outs,\'') \ Some(out#outs,\'')))" + +notation mbind'' ("mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e") (* future name: mbind\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e\<^sub>F\<^sub>a\<^sub>i\<^sub>l *) +declare mbind''.simps[simp del] (* use only more abstract definitions *) + + +text{* mbind' as failure strict operator can be seen as a foldr on bind - + if the types would match \ldots *} + +subsubsection{* Definition : Miscellaneous Operators and their Properties *} + +definition try_SE :: "('o,'\) MON\<^sub>S\<^sub>E \ ('o option,'\) MON\<^sub>S\<^sub>E" ("try\<^sub>S\<^sub>E") +where "try\<^sub>S\<^sub>E ioprog = (\\. case ioprog \ of + None \ Some(None, \) + | Some(outs, \') \ Some(Some outs, \'))" +text{* In contrast, mbind as a failure safe operator can roughly be seen + as a foldr on bind - try: + m1 ; try m2 ; try m3; ... Note, that the rough equivalence only holds for + certain predicates in the sequence - length equivalence modulo None, + for example. However, if a conditional is added, the equivalence + can be made precise: *} + +lemma mbind_try: + "(x \ mbind (a#S) F; M x) = + (a' \ try\<^sub>S\<^sub>E(F a); + if a' = None + then (M []) + else (x \ mbind S F; M (the a' # x)))" +apply(rule ext) +apply(simp add: bind_SE_def try_SE_def) +apply(case_tac "F a x", auto) +apply(simp add: bind_SE_def try_SE_def) +apply(case_tac "mbind S F b", auto) +done + +text{* On this basis, a symbolic evaluation scheme can be established + that reduces mbind-code to try\_SE\_code and ite-cascades. *} + +definition alt_SE :: "[('o, '\)MON\<^sub>S\<^sub>E, ('o, '\)MON\<^sub>S\<^sub>E] \ ('o, '\)MON\<^sub>S\<^sub>E" (infixl "\\<^sub>S\<^sub>E" 10) +where "(f \\<^sub>S\<^sub>E g) = (\ \. case f \ of None \ g \ + | Some H \ Some H)" + +definition malt_SE :: "('o, '\)MON\<^sub>S\<^sub>E list \ ('o, '\)MON\<^sub>S\<^sub>E" +where "malt_SE S = foldr alt_SE S fail\<^sub>S\<^sub>E" +notation malt_SE ("\\<^sub>S\<^sub>E") + +lemma malt_SE_mt [simp]: "\\<^sub>S\<^sub>E [] = fail\<^sub>S\<^sub>E" +by(simp add: malt_SE_def) + +lemma malt_SE_cons [simp]: "\\<^sub>S\<^sub>E (a # S) = (a \\<^sub>S\<^sub>E (\\<^sub>S\<^sub>E S))" +by(simp add: malt_SE_def) + + + + +subsection{* Definition and Properties of Valid Execution Sequences*} + +text{* A key-notion in our framework is the \emph{valid} execution +sequence, \ie{} a sequence that: +\begin{enumerate} +\item terminates (not obvious since while), +\item results in a final @{term True}, +\item does not fail globally (but recall the FailSave and FailPurge + variants of @{term mbind}-operators, that handle local exceptions in + one or another way). +\end{enumerate} +Seen from an automata perspective (where the monad - operations correspond to +the step function), valid execution sequences can be used to model ``feasible paths'' +across an automaton. *} + +definition valid_SE :: "'\ \ (bool,'\) MON\<^sub>S\<^sub>E \ bool" (infix "\" 15) +where "(\ \ m) = (m \ \ None \ fst(the (m \)))" +text{* This notation consideres failures as valid -- a definition +inspired by I/O conformance. *} + +subsubsection{* Valid Execution Sequences and their Symbolic Execution *} +lemma exec_unit_SE [simp]: "(\ \ (return P)) = (P)" +by(auto simp: valid_SE_def unit_SE_def) + +lemma exec_unit_SE' [simp]: "(\\<^sub>0 \ (\\. Some (f \, \))) = (f \\<^sub>0)" +by(simp add: valid_SE_def ) + +lemma exec_fail_SE [simp]: "(\ \ fail\<^sub>S\<^sub>E) = False" +by(auto simp: valid_SE_def fail_SE_def) + +lemma exec_fail_SE'[simp]: "\(\\<^sub>0 \ (\\. None))" +by(simp add: valid_SE_def ) + +text{* The following the rules are in a sense the heart of the entire symbolic execution approach *} +lemma exec_bind_SE_failure: +"A \ = None \ \(\ \ ((s \ A ; M s)))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def) + +lemma exec_bind_SE_success: +"A \ = Some(b,\') \ (\ \ ((s \ A ; M s))) = (\' \ (M b))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def ) + +lemma exec_bind_SE_success': (* atomic boolean Monad "Query Functions" *) +"M \ = Some(f \,\) \ (\ \ M) = f \" +by(simp add: valid_SE_def unit_SE_def bind_SE_def ) + + + + +lemma exec_bind_SE_success'': +"\ \ ((s \ A ; M s)) \ \ v \'. the(A \) = (v,\') \ \' \ (M v)" +apply(auto simp: valid_SE_def unit_SE_def bind_SE_def) +apply(cases "A \", simp_all) +apply(drule_tac x="A \" and f=the in arg_cong, simp) +apply(rule_tac x="fst aa" in exI) +apply(rule_tac x="snd aa" in exI, auto) +done + + +lemma exec_bind_SE_success''': +"\ \ ((s \ A ; M s)) \ \ a. (A \) = Some a \ (snd a) \ (M (fst a))" +apply(auto simp: valid_SE_def unit_SE_def bind_SE_def) +apply(cases "A \", simp_all) +apply(drule_tac x="A \" and f=the in arg_cong, simp) +apply(rule_tac x="fst aa" in exI) +apply(rule_tac x="snd aa" in exI, auto) +done + + +lemma exec_bind_SE_success'''' : +"\ \ ((s \ A ; M s)) \ \ v \'. A \ = Some(v,\') \ \' \ (M v)" +apply(auto simp: valid_SE_def unit_SE_def bind_SE_def) +apply(cases "A \", simp_all) +apply(drule_tac x="A \" and f=the in arg_cong, simp) +apply(rule_tac x="fst aa" in exI) +apply(rule_tac x="snd aa" in exI, auto) +done + + + + +text{* Recall \verb+mbind_unit+ for the base case. *} + +lemma valid_mbind_mt : "(\ \ ( s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e [] f; unit\<^sub>S\<^sub>E (P s))) = P [] " by simp +lemma valid_mbind_mtE: "\ \ ( s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e [] f; unit\<^sub>S\<^sub>E (P s)) \ (P [] \ Q) \ Q" +by(auto simp: valid_mbind_mt) + +lemma valid_mbind'_mt : "(\ \ ( s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p [] f; unit\<^sub>S\<^sub>E (P s))) = P [] " by simp +lemma valid_mbind'_mtE: "\ \ ( s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p [] f; unit\<^sub>S\<^sub>E (P s)) \ (P [] \ Q) \ Q" +by(auto simp: valid_mbind'_mt) + +lemma valid_mbind''_mt : "(\ \ ( s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e [] f; unit\<^sub>S\<^sub>E (P s))) = P [] " +by(simp add: mbind''.simps valid_SE_def bind_SE_def unit_SE_def) +lemma valid_mbind''_mtE: "\ \ ( s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e [] f; unit\<^sub>S\<^sub>E (P s)) \ (P [] \ Q) \ Q" +by(auto simp: valid_mbind''_mt) + + +lemma exec_mbindFSave_failure: +"ioprog a \ = None \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; M s)) = (\ \ (M []))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def) + +lemma exec_mbindFStop_failure: +"ioprog a \ = None \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (a#S) ioprog ; M s)) = (False)" +by(simp add: exec_bind_SE_failure) + +lemma exec_mbindFPurge_failure: +"ioprog a \ = None \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (a#S) ioprog ; M s)) = (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (S) ioprog ; M s))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def mbind''.simps) + + +lemma exec_mbindFSave_success : +"ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; M s)) = + (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog ; M (b#s)))" +unfolding valid_SE_def unit_SE_def bind_SE_def +by(cases "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog \'", auto) + +lemma exec_mbindFStop_success : +"ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (a#S) ioprog ; M s)) = + (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S ioprog ; M (b#s)))" +unfolding valid_SE_def unit_SE_def bind_SE_def +by(cases "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S ioprog \'", auto simp: mbind'.simps) + +lemma exec_mbindFPurge_success : +"ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (a#S) ioprog ; M s)) = + (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog ; M (b#s)))" +unfolding valid_SE_def unit_SE_def bind_SE_def +by(cases "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog \'", auto simp: mbind''.simps) + +lemma exec_mbindFSave: +"(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; return (P s))) = + (case ioprog a \ of + None \ (\ \ (return (P []))) + | Some(b,\') \ (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog ; return (P (b#s)))))" +apply(case_tac "ioprog a \") +apply(auto simp: exec_mbindFSave_failure exec_mbindFSave_success split: prod.splits) +done + +lemma mbind_eq_sexec: +assumes * : "\b \'. f a \ = Some(b,\') \ + (os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S f; P (b#os)) = (os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S f; P' (b#os))" +shows "( a \ f a; x \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S f; P (a # x)) \ = + ( a \ f a; x \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S f; P'(a # x)) \" + apply(cases "f a \ = None") + apply(subst bind_SE_def, simp) + apply(subst bind_SE_def, simp) + apply auto + apply(subst bind_SE_def, simp) + apply(subst bind_SE_def, simp) +apply(simp add: *) +done + + +lemma mbind_eq_sexec': +assumes * : "\b \'. f a \ = Some(b,\') \ + (P (b))\' = (P' (b))\'" +shows "( a \ f a; P (a)) \ = + ( a \ f a; P'(a)) \" + apply(cases "f a \ = None") + apply(subst bind_SE_def, simp) + apply(subst bind_SE_def, simp) + apply auto + apply(subst bind_SE_def, simp) + apply(subst bind_SE_def, simp) + apply(simp add: *) +done + +lemma mbind'_concat: +"(os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (S@T) f; P os) = (os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S f; os' \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p T f; P (os @ os'))" +proof (rule ext, rename_tac "\", induct S arbitrary: \ P) + case Nil show ?case by simp +next + case (Cons a S) show ?case + apply(insert Cons.hyps, simp) + by(rule mbind_eq_sexec',simp) +qed + +lemma assert_suffix_inv : + "\ \ ( _ \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p xs istep; assert\<^sub>S\<^sub>E (P)) + \ \\. P \ \ (\ \ (_ \ istep x; assert\<^sub>S\<^sub>E (P))) + \ \ \ ( _ \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (xs @ [x]) istep; assert\<^sub>S\<^sub>E (P))" +apply(subst mbind'_concat, simp) +unfolding bind_SE_def assert_SE_def valid_SE_def +apply(auto split: option.split option.split_asm) +apply(case_tac "aa",simp_all) +apply(case_tac "P bb",simp_all) +apply (metis option.distinct(1)) +apply(case_tac "aa",simp_all) +apply(case_tac "P bb",simp_all) +by (metis option.distinct(1)) + + + +text{* Universal splitting and symbolic execution rule *} +lemma exec_mbindFSave_E: +assumes seq : "(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; (P s)))" + and none: "ioprog a \ = None \ (\ \ (P [])) \ Q" + and some: "\ b \'. ioprog a \ = Some(b,\') \ (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog;(P (b#s)))) \ Q " +shows "Q" +using seq +proof(cases "ioprog a \") + case None assume ass:"ioprog a \ = None" show "Q" + apply(rule none[OF ass]) + apply(insert ass, erule_tac ioprog1=ioprog in exec_mbindFSave_failure[THEN iffD1],rule seq) + done +next + case (Some aa) assume ass:"ioprog a \ = Some aa" show "Q" + apply(insert ass,cases "aa",simp, rename_tac "out" "\'") + apply(erule some) + apply(insert ass,simp) + apply(erule_tac ioprog1=ioprog in exec_mbindFSave_success[THEN iffD1],rule seq) + done +qed + +text{* The next rule reveals the particular interest in deduction; + as an elimination rule, it allows for a linear conversion of a validity judgement + @{term "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p"} over an input list @{term "S"} into a constraint system; without any + branching ... Symbolic execution can even be stopped tactically whenever + @{term "ioprog a \ = Some(b,\')"} comes to a contradiction. *} +lemma exec_mbindFStop_E: +assumes seq : "(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (a#S) ioprog ; (P s)))" + and some: "\b \'. ioprog a \ = Some(b,\') \ (\'\ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S ioprog;(P(b#s)))) \ Q" +shows "Q" +using seq +proof(cases "ioprog a \") + case None assume ass:"ioprog a \ = None" show "Q" + apply(insert ass seq) + apply(drule_tac \=\ and S=S and M=P in exec_mbindFStop_failure, simp) + done +next + case (Some aa) assume ass:"ioprog a \ = Some aa" show "Q" + apply(insert ass,cases "aa",simp, rename_tac "out" "\'") + apply(erule some) + apply(insert ass,simp) + apply(erule_tac ioprog1=ioprog in exec_mbindFStop_success[THEN iffD1],rule seq) + done +qed + + +lemma exec_mbindFPurge_E: +assumes seq : "(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (a#S) ioprog ; (P s)))" + and none: "ioprog a \ = None \ (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog;(P (s)))) \ Q" + and some: "\ b \'. ioprog a \ = Some(b,\') \ (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog;(P (b#s)))) \ Q " +shows "Q" +using seq +proof(cases "ioprog a \") + case None assume ass:"ioprog a \ = None" show "Q" + apply(rule none[OF ass]) + apply(insert ass, erule_tac ioprog1=ioprog in exec_mbindFPurge_failure[THEN iffD1],rule seq) + done +next + case (Some aa) assume ass:"ioprog a \ = Some aa" show "Q" + apply(insert ass,cases "aa",simp, rename_tac "out" "\'") + apply(erule some) + apply(insert ass,simp) + apply(erule_tac ioprog1=ioprog in exec_mbindFPurge_success[THEN iffD1],rule seq) + done +qed + + +lemma assert_disch1 :" P \ \ (\ \ (x \ assert\<^sub>S\<^sub>E P; M x)) = (\ \ (M True))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def) + +lemma assert_disch2 :" \ P \ \ \ (\ \ (x \ assert\<^sub>S\<^sub>E P ; M s))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def) + +lemma assert_disch3 :" \ P \ \ \ (\ \ (assert\<^sub>S\<^sub>E P))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def) + +lemma assert_disch4 :" P \ \ (\ \ (assert\<^sub>S\<^sub>E P))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def) + +lemma assert_simp : "(\ \ assert\<^sub>S\<^sub>E P) = P \" +by (meson assert_disch3 assert_disch4) + +lemmas assert_D = assert_simp[THEN iffD1] (* legacy *) + +lemma assert_bind_simp : "(\ \ (x \ assert\<^sub>S\<^sub>E P; M x)) = (P \ \ (\ \ (M True)))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def split: HOL.if_split_asm) + +lemmas assert_bindD = assert_bind_simp[THEN iffD1] (* legacy *) + + +lemma assume_D : "(\ \ (_ \ assume\<^sub>S\<^sub>E P; M)) \ \ \. (P \ \ (\ \ M) )" +apply(auto simp: bind_SE_def assume_SE_def valid_SE_def split: HOL.if_split_asm) +apply(rule_tac x="Eps P" in exI, auto) +apply(subst Hilbert_Choice.someI,assumption,simp) +done + + +lemma assume_E : +assumes * : "\ \ ( _ \ assume\<^sub>S\<^sub>E P; M) " +and ** : "\ \. P \ \ \ \ M \ Q" +shows "Q" +apply(insert *) +by(insert *[THEN assume_D], auto intro: **) + +lemma assume_E' : +assumes * : "\ \ assume\<^sub>S\<^sub>E P ;- M" +and ** : "\ \. P \ \ \ \ M \ Q" +shows "Q" +by(insert *[simplified "bind_SE'_def", THEN assume_D], auto intro: **) + + +text{* These two rule prove that the SE Monad in connection with the notion of valid sequence +is actually sufficient for a representation of a Boogie-like language. The SBE monad with explicit +sets of states --- to be shown below --- is strictly speaking not necessary (and will therefore +be discontinued in the development). *} + +term "if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi" + +lemma if_SE_D1 : "P \ \ (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = (\ \ B\<^sub>1)" +by(auto simp: if_SE_def valid_SE_def) + +lemma if_SE_D1' : "P \ \ (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi);-M) = (\ \ (B\<^sub>1;-M))" +by(auto simp: if_SE_def valid_SE_def bind_SE'_def bind_SE_def) + + +lemma if_SE_D2 : "\ P \ \ (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = (\ \ B\<^sub>2)" +by(auto simp: if_SE_def valid_SE_def) + +lemma if_SE_D2' : "\ P \ \ (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi);-M) = (\ \ B\<^sub>2;-M)" +by(auto simp: if_SE_def valid_SE_def bind_SE'_def bind_SE_def) + + +lemma if_SE_split_asm : +"(\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = ((P \ \ (\ \ B\<^sub>1)) \ (\ P \ \ (\ \ B\<^sub>2)))" +by(cases "P \",auto simp: if_SE_D1 if_SE_D2) + +lemma if_SE_split_asm': +"(\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi);-M) = ((P \ \ (\ \ B\<^sub>1;-M)) \ (\ P \ \ (\ \ B\<^sub>2;-M)))" +by(cases "P \",auto simp: if_SE_D1' if_SE_D2') + + +lemma if_SE_split: +"(\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = ((P \ \ (\ \ B\<^sub>1)) \ (\ P \ \ (\ \ B\<^sub>2)))" +by(cases "P \", auto simp: if_SE_D1 if_SE_D2) + + +lemma if_SE_split': +"(\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi);-M) = ((P \ \ (\ \ B\<^sub>1;-M)) \ (\ P \ \ (\ \ B\<^sub>2;-M)))" +by(cases "P \", auto simp: if_SE_D1' if_SE_D2') + +lemma if_SE_execE: + assumes A: "\ \ ((if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi))" + and B: "P \ \ \ \ (B\<^sub>1) \ Q" + and C: "\ P \\ \ \ (B\<^sub>2) \ Q" + shows "Q" +by(insert A [simplified if_SE_split],cases "P \", simp_all, auto elim: B C) + + +lemma if_SE_execE': + assumes A: "\ \ ((if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi);-M)" + and B: "P \ \ \ \ (B\<^sub>1;-M) \ Q" + and C: "\ P \\ \ \ (B\<^sub>2;-M) \ Q" + shows "Q" +by(insert A [simplified if_SE_split'],cases "P \", simp_all, auto elim: B C) + + +lemma exec_while : +"(\ \ ((while\<^sub>S\<^sub>E b do c od) ;- M)) = + (\ \ ((if\<^sub>S\<^sub>E b then c ;- (while\<^sub>S\<^sub>E b do c od) else unit\<^sub>S\<^sub>E ()fi) ;- M))" +apply(subst while_SE_unfold) +by(simp add: bind_SE'_def ) + +lemmas exec_whileD = exec_while[THEN iffD1] + +lemma if_SE_execE'': +"\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi) ;- M +\ (P \ \ \ \ B\<^sub>1 ;- M \ Q) +\ (\ P \ \ \ \ B\<^sub>2 ;- M \ Q) +\ Q" +by(auto elim: if_SE_execE') + + +lemma [code]: + "(\ \ m) = (case (m \) of None \ False | (Some (x,y)) \ x)" + apply(simp add: valid_SE_def) + apply(cases "m \ = None", simp_all) + apply(insert not_None_eq, auto) +done + + +text{* Test-Refinements will be stated in terms of the failsave @{term mbind}, opting + more generality. The following lemma allows for an optimization both in + test execution as well as in symbolic execution for an important special case of + the post-codition: Whenever the latter has the constraint that the length of input and + output sequence equal each other (that is to say: no failure occured), failsave mbind + can be reduced to failstop mbind ... *} +lemma mbindFSave_vs_mbindFStop : + "(\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s ioprog); return(length \s = length os \ P \s os))) = + (\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s ioprog); return(P \s os)))" + apply(rule_tac x=P in spec) + apply(rule_tac x=\ in spec) + proof(induct "\s") + case Nil show ?case by(simp_all add: mbind_try try_SE_def del: Monads.mbind.simps) + case (Cons a \s) show ?case + apply(rule allI, rename_tac "\",rule allI, rename_tac "P") + apply(insert Cons.hyps) + apply(case_tac "ioprog a \") + apply(simp only: exec_mbindFSave_failure exec_mbindFStop_failure, simp) + apply(simp add: split_paired_all del: Monads.mbind.simps ) + apply(rename_tac "\'") + apply(subst exec_mbindFSave_success, assumption) + apply(subst (2) exec_bind_SE_success, assumption) + apply(erule_tac x="\'" in allE) + apply(erule_tac x="\\s s. P (a # \s) (aa # s)" in allE) (* heureka ! *) + apply(simp) + done + qed + + +lemma mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e_vs_mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p: +assumes A: "\ \ \. ioprog \ \ \ None" +shows "(\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s ioprog); P os)) = + (\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s ioprog); P os))" +proof(induct "\s") + case Nil show ?case by simp +next + case (Cons a \s) + from Cons.hyps + have B:"\ S f \. mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S f \ \ None " by simp + have C:"\\. mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s ioprog \ = mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s ioprog \" + apply(induct \s, simp) + apply(rule allI,rename_tac "\") + apply(simp add: Monads.mbind'.simps(2)) + apply(insert A, erule_tac x="a" in allE) + apply(erule_tac x="\" and P="\\ . ioprog a \ \ None" in allE) + apply(auto split:option.split) + done + show ?case + apply(insert A,erule_tac x="a" in allE,erule_tac x="\" in allE) + apply(simp, elim exE) + apply(rename_tac "out" "\'") + apply(insert B, erule_tac x=\s in allE, erule_tac x=ioprog in allE, erule_tac x=\' in allE) + apply(subst(asm) not_None_eq, elim exE) + apply(subst Monads.exec_bind_SE_success) + apply(simp split: option.split, auto) + apply(rule_tac s="(\ a b c. a # (fst c)) out \' (aa, b)" in trans, simp,rule refl) + apply(rule_tac s="(\ a b c. (snd c)) out \' (aa, b)" in trans, simp,rule refl) + apply(simp_all) + apply(subst Monads.exec_bind_SE_success, assumption) + apply(subst Monads.exec_bind_SE_success) + apply(rule_tac s="Some (aa, b)" in trans,simp_all add:C) + apply(subst(asm) Monads.exec_bind_SE_success, assumption) + apply(subst(asm) Monads.exec_bind_SE_success) + apply(rule_tac s="Some (aa, b)" in trans,simp_all add:C) + done +qed + + +section{* Valid Test Sequences in the State Exception Backtrack Monad *} +text{* This is still an unstructured merge of executable monad concepts +and specification oriented high-level properties initiating test procedures. *} + + + +section{* Hoare *} +(* STILL VERY EXPERIMENTAL STUFF *) + +definition hoare\<^sub>1 :: "('\ \ bool) \ ('\, '\)MON\<^sub>S\<^sub>E \ ('\ \ '\ \ bool) \ bool" ("\\<^sub>1 ({(1_)}/ (_)/ {(1_)})" 50) +where "(\\<^sub>1{P} M {Q} ) = (\\. \ \ (_ \ assume\<^sub>S\<^sub>E P ; x \ M; assert\<^sub>S\<^sub>E (Q x)))" + + +definition hoare\<^sub>2 :: "('\ \ bool) \ ('\, '\)MON\<^sub>S\<^sub>E \ ('\ \ '\ \ bool) \ bool" ("\\<^sub>2 ({(1_)}/ (_)/ {(1_)})" 50) +where "(\\<^sub>2{P} M {Q} ) = (\\. P \ \ (\ \ (x \ M; assert\<^sub>S\<^sub>E (Q x))))" + + + +find_theorems "assert\<^sub>S\<^sub>E" + +find_theorems "assume\<^sub>S\<^sub>E" + +lemma "P \ \ (_ \ assume\<^sub>S\<^sub>E P ; x \ M; assert\<^sub>S\<^sub>E (\\. (x=X) \ Q x \))" +oops + +lemma "\\. \ X. \ \ (_ \ assume\<^sub>S\<^sub>E P ; x \ M; assert\<^sub>S\<^sub>E (\\. x=X \ Q x \))" +oops + + +lemma "\ X. \ \ (_ \ assume\<^sub>S\<^sub>E P ; x \ M; assert\<^sub>S\<^sub>E (\\. x=X \ Q x \)) + \ + \ \ (_ \ assume\<^sub>S\<^sub>E P ; x \ M; assert\<^sub>S\<^sub>E (\\. Q x \))" +unfolding valid_SE_def assume_SE_def assert_SE_def bind_SE_def +by(auto split: if_split HOL.if_split_asm Option.option.split Option.option.split_asm) + + +lemma monadic_sequence_rule: + "\ X \\<^sub>1. (\ \ (_ \ assume\<^sub>S\<^sub>E (\\'. (\=\') \ P \) ; x \ M; assert\<^sub>S\<^sub>E (\\. (x=X) \ (\=\\<^sub>1) \ Q x \))) + \ + (\\<^sub>1 \ (_ \ assume\<^sub>S\<^sub>E (\\. (\=\\<^sub>1) \ Q x \) ; y \ M'; assert\<^sub>S\<^sub>E (\\. R x y \))) + \ + \ \ (_ \ assume\<^sub>S\<^sub>E (\\'. (\=\') \ P \) ; x \ M; y \ M'; assert\<^sub>S\<^sub>E (R x y))" +apply(elim exE impE conjE) +apply(drule Monads.assume_D) +apply(elim exE impE conjE) +unfolding valid_SE_def assume_SE_def assert_SE_def bind_SE_def +apply(auto split: if_split HOL.if_split_asm Option.option.split Option.option.split_asm) +apply (metis (mono_tags, lifting) option.simps(3) someI_ex) +oops + + + +subsection{* Legacy Bindings *} + + +lemma valid_true[simp]: (* legacy: special case *) + "(\ \ (s \ return x ; return (P s))) = P x" by simp + + +(* +lemmas valid_both = exec_mbindFSave (* legacy *) +lemmas valid_success = exec_mbindFSave_success (* legacy *) +lemmas valid_success'' = exec_mbindFSave_success(* legacy *) +lemmas valid_success' = exec_bind_SE_success (* legacy *) +lemmas valid_failure = exec_mbindFSave_failure (* legacy : *) +lemmas valid_failure' = exec_bind_SE_failure (* legacy *) +lemmas valid_failure''=valid_failure (* legacy : *) +lemmas valid_failure''' = exec_mbindFStop_failure (* legacy : *) +lemmas valid_propagate_fail = exec_fail_SE (* legacy *) +lemmas valid_propagate_fail' = exec_fail_SE' (* legacy *) +lemmas valid_propoagate_3' = valid_propagate_fail' (* legacy *) +lemmas valid_propagate_2 = exec_bind_SE_success''(* legacy *) +lemmas valid_propagate_1 = exec_unit_SE (* legacy *) +lemmas valid_successElem = exec_bind_SE_success' (* legacy *) +lemmas valid_propagate_2' = exec_bind_SE_success'''(* legacy *) +lemmas valid_propagate_2'' = exec_bind_SE_success'''' (* legacy *) +lemmas valid_propoagate_3 = exec_unit_SE' (* legacy *) + *) +(* legacy ?: +lemma valid_success'': +"ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind (a#S) ioprog ; return (P s))) = + (\' \ (s \ mbind S ioprog ; return (P (b#s))))" +unfolding valid_SE_def unit_SE_def bind_SE_def +by(cases "mbind S ioprog \'", auto) +*) + +subsection{* State Backtrack Monads *} +text{*This subsection is still rudimentary and as such an interesting +formal analogue to the previous monad definitions. It is doubtful that it is +interesting for testing and as a cmputational stucture at all. +Clearly more relevant is ``sequence'' instead of ``set,'' which would +rephrase Isabelle's internal tactic concept. *} + +type_synonym ('o, '\) MON\<^sub>S\<^sub>B = "'\ \ ('o \ '\) set" + +definition bind_SB :: "('o, '\)MON\<^sub>S\<^sub>B \ ('o \ ('o', '\)MON\<^sub>S\<^sub>B) \ ('o', '\)MON\<^sub>S\<^sub>B" +where "bind_SB f g \ = \ ((\(out, \). (g out \)) ` (f \))" +notation bind_SB ("bind\<^sub>S\<^sub>B") + +definition unit_SB :: "'o \ ('o, '\)MON\<^sub>S\<^sub>B" ("(returns _)" 8) +where "unit_SB e = (\\. {(e,\)})" +notation unit_SB ("unit\<^sub>S\<^sub>B") + +syntax (xsymbols) + "_bind_SB" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>B,('o','\)MON\<^sub>S\<^sub>B] \ ('o','\)MON\<^sub>S\<^sub>B" + ("(2 _ := _; _)" [5,8,8]8) +translations + "x := f; g" == "CONST bind_SB f (% x . g)" + + + +lemma bind_left_unit_SB : "(x := returns a; m x) = m a" + by (rule ext,simp add: unit_SB_def bind_SB_def) + +lemma bind_right_unit_SB: "(x := m; returns x) = m" + by (rule ext, simp add: unit_SB_def bind_SB_def) + + +lemma bind_assoc_SB: "(y := (x := m; k x); h y) = (x := m; (y := k x; h y))" + by (rule ext, simp add: unit_SB_def bind_SB_def split_def) + + + +subsection{* State Backtrack Exception Monad (vulgo: Boogie-PL) *} +text{* The following combination of the previous two Monad-Constructions +allows for the semantic foundation of a simple generic assertion language +in the style of Schirmers Simpl-Language or Rustan Leino's Boogie-PL language. +The key is to use the exceptional element None for violations of +the assert-statement. *} +type_synonym ('o, '\) MON\<^sub>S\<^sub>B\<^sub>E = "'\ \ (('o \ '\) set) option" + + +definition bind_SBE :: "('o,'\)MON\<^sub>S\<^sub>B\<^sub>E \ ('o \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E) \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E" +where "bind_SBE f g = (\\. case f \ of None \ None + | Some S \ (let S' = (\(out, \'). g out \') ` S + in if None \ S' then None + else Some(\ (the ` S'))))" + +syntax (xsymbols) + "_bind_SBE" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>B\<^sub>E,('o','\)MON\<^sub>S\<^sub>B\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E" + ("(2 _ :\ _; _)" [5,8,8]8) +translations + "x :\ f; g" == "CONST bind_SBE f (% x . g)" + +definition unit_SBE :: "'o \ ('o, '\)MON\<^sub>S\<^sub>B\<^sub>E" ("(returning _)" 8) +where "unit_SBE e = (\\. Some({(e,\)}))" + +definition assert_SBE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" +where "assert_SBE e = (\\. if e \ then Some({((),\)}) + else None)" +notation assert_SBE ("assert\<^sub>S\<^sub>B\<^sub>E") + +definition assume_SBE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" +where "assume_SBE e = (\\. if e \ then Some({((),\)}) + else Some {})" +notation assume_SBE ("assume\<^sub>S\<^sub>B\<^sub>E") + + +definition havoc_SBE :: " (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" +where "havoc_SBE = (\\. Some({x. True}))" +notation havoc_SBE ("havoc\<^sub>S\<^sub>B\<^sub>E") + + +lemma bind_left_unit_SBE : "(x :\ returning a; m x) = m a" + by (rule ext,simp add: unit_SBE_def bind_SBE_def) + +lemma bind_right_unit_SBE: "(x :\ m; returning x) = m" + apply (rule ext, simp add: unit_SBE_def bind_SBE_def) + apply (case_tac "m x", simp_all add:Let_def) + apply (rule HOL.ccontr, simp add: Set.image_iff) + done + + +lemmas aux = trans[OF HOL.neq_commute,OF Option.not_None_eq] + + +lemma bind_assoc_SBE: "(y :\ (x :\ m; k x); h y) = (x :\ m; (y :\ k x; h y))" +proof (rule ext, rename_tac z, simp add: unit_SBE_def bind_SBE_def, + case_tac "m z", simp_all add: Let_def Set.image_iff, safe, goal_cases) print_cases + case (1 z a aa b ab ba aaa baa) then show ?case + using case_prod_conv option.case_eq_if by force +next + case (2 z a aa b ab ba) then show ?case + apply(rule_tac x="(aa,b)" in bexI, simp_all add:split_def) + apply(erule_tac x="(aa,b)" in ballE) + apply(auto simp: aux image_def split_def intro!: rev_bexI) + done +next + case (3 z a aa b) then show ?case + by (metis (no_types, lifting) case_prod_conv option.case_eq_if) +next + case (4 z a aa b) then show ?case + apply(auto simp: aux image_def split_def intro!: rev_bexI) + apply (auto simp add: Let_def split: if_split_asm option.split_asm) + apply fastforce + done +next + case (5 z a aa b ab ba aaa baa) then show ?case + apply simp apply((erule_tac x="(ab,ba)" in ballE)+) + apply(simp_all add: aux Option.not_None_eq, (erule exE)+, simp add:split_def) + apply(erule rev_bexI,case_tac "None\(\p. h (fst p) (snd p))`y",auto simp:split_def) + done + +next + case (6 z a aa b aaa ba) then show ?case + apply simp apply((erule_tac x="(aaa,ba)" in ballE)+) + apply(simp_all add: aux, (erule exE)+, simp add:split_def) + apply(erule rev_bexI, case_tac "None\(\p. h(fst p)(snd p))`y",auto simp:split_def) + done +qed + + +interpretation SBE : Monad unit_SBE bind_SBE + by unfold_locales (simp_all add: bind_left_unit_SBE bind_right_unit_SBE bind_assoc_SBE) + + +definition valid_SBE :: "'\ \ ('a,'\) MON\<^sub>S\<^sub>B\<^sub>E \ bool" (infix "\\<^sub>S\<^sub>B\<^sub>E" 15) +where "\ \\<^sub>S\<^sub>B\<^sub>E m \ (m \ \ None)" +text{* This notation consideres all non-failures as valid. *} + + +lemma assume_assert: "(\ \\<^sub>S\<^sub>B\<^sub>E ( _ :\ assume\<^sub>S\<^sub>B\<^sub>E P ; assert\<^sub>S\<^sub>B\<^sub>E Q)) = (P \ \ Q \)" + by(simp add: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) + +lemma assert_intro: "Q \ \ \ \\<^sub>S\<^sub>B\<^sub>E (assert\<^sub>S\<^sub>B\<^sub>E Q)" + by(simp add: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) + +lemma assume_dest: + "\ \ \\<^sub>S\<^sub>B\<^sub>E (x :\ assume\<^sub>S\<^sub>B\<^sub>E Q; M x); Q \' \ \ \ \\<^sub>S\<^sub>B\<^sub>E M ()" + apply(auto simp: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) + apply(cases "Q \",simp_all) + oops + +text{* This still needs work. What would be needed is a kind + of wp - calculus that comes out of that. So far: nope. *} + +(* TODO: IF THEN ELSE and WHILE + Monadic Laws + Operational Rules. *) + + + + + +end diff --git a/src/test-gen/src/main/Observers.thy b/src/test-gen/src/main/Observers.thy new file mode 100644 index 0000000..aa38341 --- /dev/null +++ b/src/test-gen/src/main/Observers.thy @@ -0,0 +1,200 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * Observers.thy --- the base testing theory for reactive sequence testing. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007, ETH Zurich, Switzerland + * 2009 B. Wolff, Univ. Paris-Sud, France + * 2009 Achim D. Brucker, Germany + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Observers managing Abstract State*} + +theory Observers imports Monads +begin + + +section{* IO-stepping Function Transfomers *} + +text{* The following adaption combinator converts an input-output + program under test of type: $\iota \Rightarrow \sigma + \rightharpoonup o \times \sigma$ with program state $\sigma$ into a + state transition program that can be processed by mbind. The key + idea to turn mbind into a test-driver for a reactive system is by + providing an internal state $\sigma'$, managed by the test driver, + and external, problem-specific functions ``rebind'' and + ``substitute'' that operate on this internal state. For example, + this internal state can be instantiated with an environment $var + \rightharpoonup value$. The output (or parts of it) can then be + bound to vars in the environment. In contrast, substitute can then + explicit substitute variables occuring in value representations + into pure values, e.g. is can substitue $c~("X")$ into $c~3$ + provided the environment contained the map with $X \leadsto 3$. *} + +text{* The state of the test-driver consists of two parts: the state + of the observer (or: adaptor) $\sigma$ and the internal state $\sigma'$ of the + the step-function of the system under test $ioprog$ is allowed to use. *} + +definition observer :: "['\ \ 'o \ '\, '\ \ '\ \ '\, '\\'\' \ '\ \ 'o \ bool] + \ ('\ \ '\' \ 'o \'\') + \ ('\ \ ('\\'\' \ '\\'\'))" + +where "observer rebind substitute postcond ioprog = + (\ input. (\ (\, \'). let input'= substitute \ input in + case ioprog input' \' of + None \ None (* ioprog failure - eg. timeout ... *) + | Some (output, \''') \ let \'' = rebind \ output in + (if postcond (\'',\''') input' output + then Some(\'', \''') + else None (* postcond failure *) )))" + +text{* The subsequent $observer$ version is more powerful: it admits also preconditions +of $ioprog$, which make reference to the observer state $\sigma_{obs}$. The observer-state +may contain an environment binding values to explicit variables. In such a scenario, the +$precond\_solve$ may consist of a \emph{solver} that constructs a solution from +\begin{enumerate} +\item this environment, +\item the observable state of the $ioprog$, +\item the abstract input + (which may be related to a precondition which contains references to explicit + variables) +\end{enumerate} +such that all the explicit variables contained in the preconditions and the +explicit variables in the abstract input are substituted against values +that make the preconditions true. The values must be stored in the +environment and are reported in the observer-state $\sigma_{obs}$. +*} + +definition observer1 :: "['\_obs \ 'o_c \ '\_obs, + '\_obs \ '\ \ '\_a \ ('\_c \ '\_obs), + '\_obs \ '\ \ '\_c \ 'o_c \ bool] + \ ('\_c \ ('o_c, '\)MON\<^sub>S\<^sub>E) + \ ('\_a \ ('o_c, '\_obs \'\)MON\<^sub>S\<^sub>E) " + +where "observer1 rebind precond_solve postcond ioprog = + (\ in_a. (\ (\_obs, \). let (in_c,\_obs') = precond_solve \_obs \ in_a + in case ioprog in_c \ of + None \ None (* ioprog failure - eg. timeout ... *) + | Some (out_c, \') \(let \_obs'' = rebind \_obs' out_c + in if postcond \_obs'' \' in_c out_c + then Some(out_c, (\_obs', \')) + else None (* postcond failure *) )))" + + +definition observer2 :: "['\_obs \ 'o_c \ '\_obs, '\_obs \ '\_a \ '\_c, '\_obs \ '\ \ '\_c \ 'o_c \ bool] + \ ('\_c \ ('o_c, '\)MON\<^sub>S\<^sub>E) + \ ('\_a \ ('o_c, '\_obs \'\)MON\<^sub>S\<^sub>E) " + +where "observer2 rebind substitute postcond ioprog = + (\ in_a. (\ (\_obs, \). let in_c = substitute \_obs in_a + in case ioprog in_c \ of + None \ None (* ioprog failure - eg. timeout ... *) + | Some (out_c, \') \(let \_obs' = rebind \_obs out_c + in if postcond \_obs' \' in_c out_c + then Some(out_c, (\_obs', \')) + else None (* postcond failure *) )))" + +text{* Note that this version of the observer is just a +monad-transformer; it transforms the i/o stepping function $ioprog$ +into another stepping function, which is the combined sub-system +consisting of the observer and, for example, a program under test +$\PUT$. The observer takes the \emph{abstract} input $in_a$, +substitutes explicit variables in it by concrete values stored by its +own state $\sigma_{obs}$ and constructs \emph{concrete} input $in_c$, +runs $ioprog$ in this context, and evaluates the return: the concrete +output $out_c$ and the successor state $\sigma'$ are used to extract +from concrete output concrete values and stores them inside its own +successor state $\sigma_{obs}'$. Provided that a post-condition is +passed succesfully, the output and the combined successor-state is +reported as success. + +Note that we made the following testability assumptions: +\begin{enumerate} +\item $ioprog$ behaves wrt. to the reported state and input as a function, i.e. it behaves + deterministically, and +\item it is not necessary to destinguish internal failure and post-condition-failure. + (Modelling Bug? This is superfluous and blind featurism ... One could do this by + introducing an own "weakening"-monad endo-transformer.) +\end{enumerate} + +*} + +text{* observer2 can actually be decomposed into two combinators - one +dealing with the management of explicit variables and one that tackles +post-conditions. *} + +definition observer3 :: "['\_obs \ 'o \ '\_obs, '\_obs \ '\_a \ '\_c] + \ ('\_c \ ('o, '\)MON\<^sub>S\<^sub>E) + \ ('\_a \ ('o, '\_obs \'\)MON\<^sub>S\<^sub>E) " + +where "observer3 rebind substitute ioprog = + (\ in_a. (\ (\_obs, \). + let in_c = substitute \_obs in_a + in case ioprog in_c \ of + None \ None (* ioprog failure - eg. timeout ... *) + | Some (out_c, \') \(let \_obs' = rebind \_obs out_c + in Some(out_c, (\_obs', \')) )))" + + + +definition observer4 :: "['\ \ '\ \ 'o \ bool] + \ ('\ \ ('o, '\)MON\<^sub>S\<^sub>E) + \ ('\ \ ('o, '\)MON\<^sub>S\<^sub>E)" + +where "observer4 postcond ioprog = + (\ input. (\ \. case ioprog input \ of + None \ None (* ioprog failure - eg. timeout ... *) + | Some (output, \') \ (if postcond \' input output + then Some(output, \') + else None (* postcond failure *) )))" + +text{* The following lemma explains the relationsship between $observer2$ and the +decoposed versions $observer3$ and $observer4$. The full equality does not hold - +the reason is that the two kinds of preconditions are different in a subtle way: +the postcondition may make reference to the abstract state. (See our example +\verb+Sequence_test+ based on a symbolic environment in the observer state.) +If the postcondition does not do this, they are equivalent. *} + +find_theorems name:"prod" name:"case" name:"beta" +lemma observer_decompose: + " observer2 r s (\ x. pc) io = (observer3 r s (observer4 pc io))" + apply(rule ext, rule ext) + apply(auto simp: observer2_def observer3_def + observer4_def Let_def case_prod_beta) + apply(case_tac "io (s a x) b", auto) +done + +end diff --git a/src/test-gen/src/main/Old_SMT/old_smt_builtin.ML b/src/test-gen/src/main/Old_SMT/old_smt_builtin.ML new file mode 100644 index 0000000..e492ed4 --- /dev/null +++ b/src/test-gen/src/main/Old_SMT/old_smt_builtin.ML @@ -0,0 +1,231 @@ +(* Title: HOL/Library/Old_SMT/old_smt_builtin.ML + Author: Sascha Boehme, TU Muenchen + +Tables of types and terms directly supported by SMT solvers. +*) + +signature OLD_SMT_BUILTIN = +sig + (*for experiments*) + val filter_builtins: (typ -> bool) -> Proof.context -> Proof.context + + (*built-in types*) + val add_builtin_typ: Old_SMT_Utils.class -> + typ * (typ -> string option) * (typ -> int -> string option) -> + Context.generic -> Context.generic + val add_builtin_typ_ext: typ * (typ -> bool) -> Context.generic -> + Context.generic + val dest_builtin_typ: Proof.context -> typ -> string option + val is_builtin_typ_ext: Proof.context -> typ -> bool + + (*built-in numbers*) + val dest_builtin_num: Proof.context -> term -> (string * typ) option + val is_builtin_num: Proof.context -> term -> bool + val is_builtin_num_ext: Proof.context -> term -> bool + + (*built-in functions*) + type 'a bfun = Proof.context -> typ -> term list -> 'a + type bfunr = string * int * term list * (term list -> term) + val add_builtin_fun: Old_SMT_Utils.class -> + (string * typ) * bfunr option bfun -> Context.generic -> Context.generic + val add_builtin_fun': Old_SMT_Utils.class -> term * string -> Context.generic -> + Context.generic + val add_builtin_fun_ext: (string * typ) * term list bfun -> + Context.generic -> Context.generic + val add_builtin_fun_ext': string * typ -> Context.generic -> Context.generic + val add_builtin_fun_ext'': string -> Context.generic -> Context.generic + val dest_builtin_fun: Proof.context -> string * typ -> term list -> + bfunr option + val dest_builtin_eq: Proof.context -> term -> term -> bfunr option + val dest_builtin_pred: Proof.context -> string * typ -> term list -> + bfunr option + val dest_builtin_conn: Proof.context -> string * typ -> term list -> + bfunr option + val dest_builtin: Proof.context -> string * typ -> term list -> bfunr option + val dest_builtin_ext: Proof.context -> string * typ -> term list -> + term list option + val is_builtin_fun: Proof.context -> string * typ -> term list -> bool + val is_builtin_fun_ext: Proof.context -> string * typ -> term list -> bool +end + +structure Old_SMT_Builtin: OLD_SMT_BUILTIN = +struct + + +(* built-in tables *) + +datatype ('a, 'b) kind = Ext of 'a | Int of 'b + +type ('a, 'b) ttab = ((typ * ('a, 'b) kind) Ord_List.T) Old_SMT_Utils.dict + +fun typ_ord ((T, _), (U, _)) = + let + fun tord (TVar _, Type _) = GREATER + | tord (Type _, TVar _) = LESS + | tord (Type (n, Ts), Type (m, Us)) = + if n = m then list_ord tord (Ts, Us) + else Term_Ord.typ_ord (T, U) + | tord TU = Term_Ord.typ_ord TU + in tord (T, U) end + +fun insert_ttab cs T f = + Old_SMT_Utils.dict_map_default (cs, []) + (Ord_List.insert typ_ord (perhaps (try Logic.varifyT_global) T, f)) + +fun merge_ttab ttabp = + Old_SMT_Utils.dict_merge (Ord_List.merge typ_ord) ttabp + +fun lookup_ttab ctxt ttab T = + let fun match (U, _) = Sign.typ_instance (Proof_Context.theory_of ctxt) (T, U) + in + get_first (find_first match) + (Old_SMT_Utils.dict_lookup ttab (Old_SMT_Config.solver_class_of ctxt)) + end + +type ('a, 'b) btab = ('a, 'b) ttab Symtab.table + +fun insert_btab cs n T f = + Symtab.map_default (n, []) (insert_ttab cs T f) + +fun merge_btab btabp = Symtab.join (K merge_ttab) btabp + +fun lookup_btab ctxt btab (n, T) = + (case Symtab.lookup btab n of + NONE => NONE + | SOME ttab => lookup_ttab ctxt ttab T) + +type 'a bfun = Proof.context -> typ -> term list -> 'a + +type bfunr = string * int * term list * (term list -> term) + +structure Builtins = Generic_Data +( + type T = + (typ -> bool, (typ -> string option) * (typ -> int -> string option)) ttab * + (term list bfun, bfunr option bfun) btab + val empty = ([], Symtab.empty) + val extend = I + fun merge ((t1, b1), (t2, b2)) = (merge_ttab (t1, t2), merge_btab (b1, b2)) +) + +fun filter_ttab keep_T = map (apsnd (filter (keep_T o fst))) + +fun filter_builtins keep_T = + Context.proof_map (Builtins.map (fn (ttab, btab) => + (filter_ttab keep_T ttab, Symtab.map (K (filter_ttab keep_T)) btab))) + + +(* built-in types *) + +fun add_builtin_typ cs (T, f, g) = + Builtins.map (apfst (insert_ttab cs T (Int (f, g)))) + +fun add_builtin_typ_ext (T, f) = + Builtins.map (apfst (insert_ttab Old_SMT_Utils.basicC T (Ext f))) + +fun lookup_builtin_typ ctxt = + lookup_ttab ctxt (fst (Builtins.get (Context.Proof ctxt))) + +fun dest_builtin_typ ctxt T = + (case lookup_builtin_typ ctxt T of + SOME (_, Int (f, _)) => f T + | _ => NONE) + +fun is_builtin_typ_ext ctxt T = + (case lookup_builtin_typ ctxt T of + SOME (_, Int (f, _)) => is_some (f T) + | SOME (_, Ext f) => f T + | NONE => false) + + +(* built-in numbers *) + +fun dest_builtin_num ctxt t = + (case try HOLogic.dest_number t of + NONE => NONE + | SOME (T, i) => + if i < 0 then NONE else + (case lookup_builtin_typ ctxt T of + SOME (_, Int (_, g)) => g T i |> Option.map (rpair T) + | _ => NONE)) + +val is_builtin_num = is_some oo dest_builtin_num + +fun is_builtin_num_ext ctxt t = + (case try HOLogic.dest_number t of + NONE => false + | SOME (T, _) => is_builtin_typ_ext ctxt T) + + +(* built-in functions *) + +fun add_builtin_fun cs ((n, T), f) = + Builtins.map (apsnd (insert_btab cs n T (Int f))) + +fun add_builtin_fun' cs (t, n) = + let + val c as (m, T) = Term.dest_Const t + fun app U ts = Term.list_comb (Const (m, U), ts) + fun bfun _ U ts = SOME (n, length (Term.binder_types T), ts, app U) + in add_builtin_fun cs (c, bfun) end + +fun add_builtin_fun_ext ((n, T), f) = + Builtins.map (apsnd (insert_btab Old_SMT_Utils.basicC n T (Ext f))) + +fun add_builtin_fun_ext' c = add_builtin_fun_ext (c, fn _ => fn _ => I) + +fun add_builtin_fun_ext'' n context = + let val thy = Context.theory_of context + in add_builtin_fun_ext' (n, Sign.the_const_type thy n) context end + +fun lookup_builtin_fun ctxt = + lookup_btab ctxt (snd (Builtins.get (Context.Proof ctxt))) + +fun dest_builtin_fun ctxt (c as (_, T)) ts = + (case lookup_builtin_fun ctxt c of + SOME (_, Int f) => f ctxt T ts + | _ => NONE) + +fun dest_builtin_eq ctxt t u = + let + val aT = TFree (Name.aT, @{sort type}) + val c = (@{const_name HOL.eq}, aT --> aT --> @{typ bool}) + fun mk ts = Term.list_comb (HOLogic.eq_const (Term.fastype_of (hd ts)), ts) + in + dest_builtin_fun ctxt c [] + |> Option.map (fn (n, i, _, _) => (n, i, [t, u], mk)) + end + +fun special_builtin_fun pred ctxt (c as (_, T)) ts = + if pred (Term.body_type T, Term.binder_types T) then + dest_builtin_fun ctxt c ts + else NONE + +fun dest_builtin_pred ctxt = special_builtin_fun (equal @{typ bool} o fst) ctxt + +fun dest_builtin_conn ctxt = + special_builtin_fun (forall (equal @{typ bool}) o (op ::)) ctxt + +fun dest_builtin ctxt c ts = + let val t = Term.list_comb (Const c, ts) + in + (case dest_builtin_num ctxt t of + SOME (n, _) => SOME (n, 0, [], K t) + | NONE => dest_builtin_fun ctxt c ts) + end + +fun dest_builtin_fun_ext ctxt (c as (_, T)) ts = + (case lookup_builtin_fun ctxt c of + SOME (_, Int f) => f ctxt T ts |> Option.map (fn (_, _, us, _) => us) + | SOME (_, Ext f) => SOME (f ctxt T ts) + | NONE => NONE) + +fun dest_builtin_ext ctxt c ts = + if is_builtin_num_ext ctxt (Term.list_comb (Const c, ts)) then SOME [] + else dest_builtin_fun_ext ctxt c ts + +fun is_builtin_fun ctxt c ts = is_some (dest_builtin_fun ctxt c ts) + +fun is_builtin_fun_ext ctxt c ts = is_some (dest_builtin_fun_ext ctxt c ts) + +end diff --git a/src/test-gen/src/main/Old_SMT/old_smt_config.ML b/src/test-gen/src/main/Old_SMT/old_smt_config.ML new file mode 100644 index 0000000..318b2ce --- /dev/null +++ b/src/test-gen/src/main/Old_SMT/old_smt_config.ML @@ -0,0 +1,254 @@ +(* Title: HOL/Library/Old_SMT/old_smt_config.ML + Author: Sascha Boehme, TU Muenchen + +Configuration options and diagnostic tools for SMT. +*) + +signature OLD_SMT_CONFIG = +sig + (*solver*) + type solver_info = { + name: string, + class: Proof.context -> Old_SMT_Utils.class, + avail: unit -> bool, + options: Proof.context -> string list } + val add_solver: solver_info -> Context.generic -> Context.generic + val set_solver_options: string * string -> Context.generic -> Context.generic + val is_available: Proof.context -> string -> bool + val available_solvers_of: Proof.context -> string list + val select_solver: string -> Context.generic -> Context.generic + val solver_of: Proof.context -> string + val solver_class_of: Proof.context -> Old_SMT_Utils.class + val solver_options_of: Proof.context -> string list + + (*options*) + val oracle: bool Config.T + val datatypes: bool Config.T + val timeout: real Config.T + val random_seed: int Config.T + val read_only_certificates: bool Config.T + val verbose: bool Config.T + val trace: bool Config.T + val trace_used_facts: bool Config.T + val monomorph_limit: int Config.T + val monomorph_instances: int Config.T + val infer_triggers: bool Config.T + val filter_only_facts: bool Config.T + val debug_files: string Config.T + + (*tools*) + val with_timeout: Proof.context -> ('a -> 'b) -> 'a -> 'b + + (*diagnostics*) + val trace_msg: Proof.context -> ('a -> string) -> 'a -> unit + val verbose_msg: Proof.context -> ('a -> string) -> 'a -> unit + + (*certificates*) + val select_certificates: string -> Context.generic -> Context.generic + val certificates_of: Proof.context -> Cache_IO.cache option + + (*setup*) + val setup: theory -> theory + val print_setup: Proof.context -> unit +end + +structure Old_SMT_Config: OLD_SMT_CONFIG = +struct + +(* solver *) + +type solver_info = { + name: string, + class: Proof.context -> Old_SMT_Utils.class, + avail: unit -> bool, + options: Proof.context -> string list } + +(* FIXME just one data slot (record) per program unit *) +structure Solvers = Generic_Data +( + type T = (solver_info * string list) Symtab.table * string option + val empty = (Symtab.empty, NONE) + val extend = I + fun merge ((ss1, s1), (ss2, s2)) = + (Symtab.merge (K true) (ss1, ss2), merge_options (s1, s2)) +) + +fun set_solver_options (name, options) = + let val opts = String.tokens (Symbol.is_ascii_blank o str) options + in Solvers.map (apfst (Symtab.map_entry name (apsnd (K opts)))) end + +fun add_solver (info as {name, ...} : solver_info) context = + if Symtab.defined (fst (Solvers.get context)) name then + error ("Solver already registered: " ^ quote name) + else + context + |> Solvers.map (apfst (Symtab.update (name, (info, [])))) + |> Context.map_theory (Attrib.setup (Binding.name ("old_" ^ name ^ "_options")) + (Scan.lift (@{keyword "="} |-- Args.name) >> + (Thm.declaration_attribute o K o set_solver_options o pair name)) + ("additional command line options for SMT solver " ^ quote name)) + +fun all_solvers_of ctxt = Symtab.keys (fst (Solvers.get (Context.Proof ctxt))) + +fun solver_name_of ctxt = snd (Solvers.get (Context.Proof ctxt)) + +fun is_available ctxt name = + (case Symtab.lookup (fst (Solvers.get (Context.Proof ctxt))) name of + SOME ({avail, ...}, _) => avail () + | NONE => false) + +fun available_solvers_of ctxt = + filter (is_available ctxt) (all_solvers_of ctxt) + +fun warn_solver (Context.Proof ctxt) name = + if Context_Position.is_visible ctxt then + warning ("The SMT solver " ^ quote name ^ " is not installed.") + else () + | warn_solver _ _ = (); + +fun select_solver name context = + let + val ctxt = Context.proof_of context + val upd = Solvers.map (apsnd (K (SOME name))) + in + if not (member (op =) (all_solvers_of ctxt) name) then + error ("Trying to select unknown solver: " ^ quote name) + else if not (is_available ctxt name) then + (warn_solver context name; upd context) + else upd context + end + +fun no_solver_err () = error "No SMT solver selected" + +fun solver_of ctxt = + (case solver_name_of ctxt of + SOME name => name + | NONE => no_solver_err ()) + +fun solver_info_of default select ctxt = + (case Solvers.get (Context.Proof ctxt) of + (solvers, SOME name) => select (Symtab.lookup solvers name) + | (_, NONE) => default ()) + +fun solver_class_of ctxt = + let fun class_of ({class, ...}: solver_info, _) = class ctxt + in solver_info_of no_solver_err (class_of o the) ctxt end + +fun solver_options_of ctxt = + let + fun all_options NONE = [] + | all_options (SOME ({options, ...} : solver_info, opts)) = + opts @ options ctxt + in solver_info_of (K []) all_options ctxt end + +val setup_solver = + Attrib.setup @{binding old_smt_solver} + (Scan.lift (@{keyword "="} |-- Args.name) >> + (Thm.declaration_attribute o K o select_solver)) + "SMT solver configuration" + + +(* options *) + +val oracle = Attrib.setup_config_bool @{binding old_smt_oracle} (K true) +val datatypes = Attrib.setup_config_bool @{binding old_smt_datatypes} (K false) +val timeout = Attrib.setup_config_real @{binding old_smt_timeout} (K 30.0) +val random_seed = Attrib.setup_config_int @{binding old_smt_random_seed} (K 1) +val read_only_certificates = Attrib.setup_config_bool @{binding old_smt_read_only_certificates} (K false) +val verbose = Attrib.setup_config_bool @{binding old_smt_verbose} (K true) +val trace = Attrib.setup_config_bool @{binding old_smt_trace} (K false) +val trace_used_facts = Attrib.setup_config_bool @{binding old_smt_trace_used_facts} (K false) +val monomorph_limit = Attrib.setup_config_int @{binding old_smt_monomorph_limit} (K 10) +val monomorph_instances = Attrib.setup_config_int @{binding old_smt_monomorph_instances} (K 500) +val infer_triggers = Attrib.setup_config_bool @{binding old_smt_infer_triggers} (K false) +val filter_only_facts = Attrib.setup_config_bool @{binding old_smt_filter_only_facts} (K false) +val debug_files = Attrib.setup_config_string @{binding old_smt_debug_files} (K "") + + +(* diagnostics *) + +fun cond_trace flag f x = if flag then tracing ("SMT: " ^ f x) else () + +fun verbose_msg ctxt = cond_trace (Config.get ctxt verbose) + +fun trace_msg ctxt = cond_trace (Config.get ctxt trace) + + +(* tools *) + +fun with_timeout ctxt f x = + Timeout.apply (seconds (Config.get ctxt timeout)) f x + handle Timeout.TIMEOUT _ => raise Old_SMT_Failure.SMT Old_SMT_Failure.Time_Out + + +(* certificates *) + +(* FIXME just one data slot (record) per program unit *) +structure Certificates = Generic_Data +( + type T = Cache_IO.cache option + val empty = NONE + val extend = I + fun merge (s, _) = s (* FIXME merge options!? *) +) + +val get_certificates_path = + Option.map (Cache_IO.cache_path_of) o Certificates.get o Context.Proof + +fun select_certificates name context = context |> Certificates.put ( + if name = "" then NONE + else + Path.explode name + |> Path.append (Resources.master_directory (Context.theory_of context)) + |> SOME o Cache_IO.unsynchronized_init) + +val certificates_of = Certificates.get o Context.Proof + +val setup_certificates = + Attrib.setup @{binding old_smt_certificates} + (Scan.lift (@{keyword "="} |-- Args.name) >> + (Thm.declaration_attribute o K o select_certificates)) + "SMT certificates configuration" + + +(* setup *) + +val setup = + setup_solver #> + setup_certificates + +fun print_setup ctxt = + let + fun string_of_bool b = if b then "true" else "false" + + val names = available_solvers_of ctxt + val ns = if null names then ["(none)"] else sort_strings names + val n = the_default "(none)" (solver_name_of ctxt) + val opts = solver_options_of ctxt + + val t = string_of_real (Config.get ctxt timeout) + + val certs_filename = + (case get_certificates_path ctxt of + SOME path => Path.print path + | NONE => "(disabled)") + in + Pretty.writeln (Pretty.big_list "SMT setup:" [ + Pretty.str ("Current SMT solver: " ^ n), + Pretty.str ("Current SMT solver options: " ^ space_implode " " opts), + Pretty.str_list "Available SMT solvers: " "" ns, + Pretty.str ("Current timeout: " ^ t ^ " seconds"), + Pretty.str ("With proofs: " ^ + string_of_bool (not (Config.get ctxt oracle))), + Pretty.str ("Certificates cache: " ^ certs_filename), + Pretty.str ("Fixed certificates: " ^ + string_of_bool (Config.get ctxt read_only_certificates))]) + end + +val _ = + Outer_Syntax.command @{command_keyword old_smt_status} + "show the available SMT solvers, the currently selected SMT solver, \ + \and the values of SMT configuration options" + (Scan.succeed (Toplevel.keep (print_setup o Toplevel.context_of))) + +end diff --git a/src/test-gen/src/main/Old_SMT/old_smt_datatypes.ML b/src/test-gen/src/main/Old_SMT/old_smt_datatypes.ML new file mode 100644 index 0000000..971dc74 --- /dev/null +++ b/src/test-gen/src/main/Old_SMT/old_smt_datatypes.ML @@ -0,0 +1,94 @@ +(* Title: HOL/Library/Old_SMT/old_smt_datatypes.ML + Author: Sascha Boehme, TU Muenchen + +Collector functions for common type declarations and their representation +as algebraic datatypes. +*) + +signature OLD_SMT_DATATYPES = +sig + val add_decls: typ -> + (typ * (term * term list) list) list list * Proof.context -> + (typ * (term * term list) list) list list * Proof.context +end + +structure Old_SMT_Datatypes: OLD_SMT_DATATYPES = +struct + +fun mk_selectors T Ts = + Variable.variant_fixes (replicate (length Ts) "select") + #>> map2 (fn U => fn n => Free (n, T --> U)) Ts + + +(* free constructor type declarations *) + +fun get_ctr_sugar_decl ({ctrs, ...} : Ctr_Sugar.ctr_sugar) T Ts ctxt = + let + fun mk_constr ctr0 = + let val ctr = Ctr_Sugar.mk_ctr Ts ctr0 in + mk_selectors T (binder_types (fastype_of ctr)) #>> pair ctr + end + in + fold_map mk_constr ctrs ctxt + |>> (pair T #> single) + end + + +(* typedef declarations *) + +fun get_typedef_decl (({Abs_name, Rep_name, abs_type, rep_type, ...}, {Abs_inverse, ...}) + : Typedef.info) T Ts = + if can (curry (op RS) @{thm UNIV_I}) Abs_inverse then + let + val env = snd (Term.dest_Type abs_type) ~~ Ts + val instT = Term.map_atyps (perhaps (AList.lookup (op =) env)) + + val constr = Const (Abs_name, instT (rep_type --> abs_type)) + val select = Const (Rep_name, instT (abs_type --> rep_type)) + in [(T, [(constr, [select])])] end + else + [] + + +(* collection of declarations *) + +fun declared declss T = exists (exists (equal T o fst)) declss +fun declared' dss T = exists (exists (equal T o fst) o snd) dss + +fun get_decls T n Ts ctxt = + (case Ctr_Sugar.ctr_sugar_of ctxt n of + SOME ctr_sugar => get_ctr_sugar_decl ctr_sugar T Ts ctxt + | NONE => + (case Typedef.get_info ctxt n of + [] => ([], ctxt) + | info :: _ => (get_typedef_decl info T Ts, ctxt))) + +fun add_decls T (declss, ctxt) = + let + fun depends Ts ds = exists (member (op =) (map fst ds)) Ts + + fun add (TFree _) = I + | add (TVar _) = I + | add (T as Type (@{type_name fun}, _)) = + fold add (Term.body_type T :: Term.binder_types T) + | add @{typ bool} = I + | add (T as Type (n, Ts)) = (fn (dss, ctxt1) => + if declared declss T orelse declared' dss T then (dss, ctxt1) + else if Old_SMT_Builtin.is_builtin_typ_ext ctxt1 T then (dss, ctxt1) + else + (case get_decls T n Ts ctxt1 of + ([], _) => (dss, ctxt1) + | (ds, ctxt2) => + let + val constrTs = + maps (map (snd o Term.dest_Const o fst) o snd) ds + val Us = fold (union (op =) o Term.binder_types) constrTs [] + + fun ins [] = [(Us, ds)] + | ins ((Uds as (Us', _)) :: Udss) = + if depends Us' ds then (Us, ds) :: Uds :: Udss + else Uds :: ins Udss + in fold add Us (ins dss, ctxt2) end)) + in add T ([], ctxt) |>> append declss o map snd end + +end diff --git a/src/test-gen/src/main/Old_SMT/old_smt_failure.ML b/src/test-gen/src/main/Old_SMT/old_smt_failure.ML new file mode 100644 index 0000000..394287c --- /dev/null +++ b/src/test-gen/src/main/Old_SMT/old_smt_failure.ML @@ -0,0 +1,61 @@ +(* Title: HOL/Library/Old_SMT/old_smt_failure.ML + Author: Sascha Boehme, TU Muenchen + +Failures and exception of SMT. +*) + +signature OLD_SMT_FAILURE = +sig + type counterexample = { + is_real_cex: bool, + free_constraints: term list, + const_defs: term list} + datatype failure = + Counterexample of counterexample | + Time_Out | + Out_Of_Memory | + Abnormal_Termination of int | + Other_Failure of string + val pretty_counterexample: Proof.context -> counterexample -> Pretty.T + val string_of_failure: Proof.context -> failure -> string + exception SMT of failure +end + +structure Old_SMT_Failure: OLD_SMT_FAILURE = +struct + +type counterexample = { + is_real_cex: bool, + free_constraints: term list, + const_defs: term list} + +datatype failure = + Counterexample of counterexample | + Time_Out | + Out_Of_Memory | + Abnormal_Termination of int | + Other_Failure of string + +fun pretty_counterexample ctxt {is_real_cex, free_constraints, const_defs} = + let + val msg = + if is_real_cex then "Counterexample found (possibly spurious)" + else "Potential counterexample found" + in + if null free_constraints andalso null const_defs then Pretty.str msg + else + Pretty.big_list (msg ^ ":") + (map (Syntax.pretty_term ctxt) (free_constraints @ const_defs)) + end + +fun string_of_failure ctxt (Counterexample cex) = + Pretty.string_of (pretty_counterexample ctxt cex) + | string_of_failure _ Time_Out = "Timed out" + | string_of_failure _ Out_Of_Memory = "Ran out of memory" + | string_of_failure _ (Abnormal_Termination err) = + "Solver terminated abnormally with error code " ^ string_of_int err + | string_of_failure _ (Other_Failure msg) = msg + +exception SMT of failure + +end diff --git a/src/test-gen/src/main/Old_SMT/old_smt_normalize.ML b/src/test-gen/src/main/Old_SMT/old_smt_normalize.ML new file mode 100644 index 0000000..18cf0b7 --- /dev/null +++ b/src/test-gen/src/main/Old_SMT/old_smt_normalize.ML @@ -0,0 +1,652 @@ +(* Title: HOL/Library/Old_SMT/old_smt_normalize.ML + Author: Sascha Boehme, TU Muenchen + +Normalization steps on theorems required by SMT solvers. +*) + +signature OLD_SMT_NORMALIZE = +sig + val drop_fact_warning: Proof.context -> thm -> unit + val atomize_conv: Proof.context -> conv + type extra_norm = Proof.context -> thm list * thm list -> thm list * thm list + val add_extra_norm: Old_SMT_Utils.class * extra_norm -> Context.generic -> + Context.generic + val normalize: (int * (int option * thm)) list -> Proof.context -> + (int * thm) list * Proof.context + val setup: theory -> theory +end + +structure Old_SMT_Normalize: OLD_SMT_NORMALIZE = +struct + +fun drop_fact_warning ctxt = + Old_SMT_Config.verbose_msg ctxt (prefix "Warning: dropping assumption: " o + Thm.string_of_thm ctxt) + + +(* general theorem normalizations *) + +(** instantiate elimination rules **) + +local + val (cpfalse, cfalse) = + `Old_SMT_Utils.mk_cprop (Thm.cterm_of @{context} @{const False}) + + fun inst f ct thm = + let val cv = f (Drule.strip_imp_concl (Thm.cprop_of thm)) + in Thm.instantiate ([], [(dest_Var (Thm.term_of cv), ct)]) thm end +in + +fun instantiate_elim thm = + (case Thm.concl_of thm of + @{const Trueprop} $ Var (_, @{typ bool}) => inst Thm.dest_arg cfalse thm + | Var _ => inst I cpfalse thm + | _ => thm) + +end + + +(** normalize definitions **) + +fun norm_def thm = + (case Thm.prop_of thm of + @{const Trueprop} $ (Const (@{const_name HOL.eq}, _) $ _ $ Abs _) => + norm_def (thm RS @{thm fun_cong}) + | Const (@{const_name Pure.eq}, _) $ _ $ Abs _ => + norm_def (thm RS @{thm meta_eq_to_obj_eq}) + | _ => thm) + + +(** atomization **) + +fun atomize_conv ctxt ct = + (case Thm.term_of ct of + @{const Pure.imp} $ _ $ _ => + Conv.binop_conv (atomize_conv ctxt) then_conv + Conv.rewr_conv @{thm atomize_imp} + | Const (@{const_name Pure.eq}, _) $ _ $ _ => + Conv.binop_conv (atomize_conv ctxt) then_conv + Conv.rewr_conv @{thm atomize_eq} + | Const (@{const_name Pure.all}, _) $ Abs _ => + Conv.binder_conv (atomize_conv o snd) ctxt then_conv + Conv.rewr_conv @{thm atomize_all} + | _ => Conv.all_conv) ct + +val setup_atomize = + fold Old_SMT_Builtin.add_builtin_fun_ext'' [@{const_name Pure.imp}, + @{const_name Pure.eq}, @{const_name Pure.all}, @{const_name Trueprop}] + + +(** unfold special quantifiers **) + +local + val ex1_def = mk_meta_eq @{lemma + "Ex1 = (%P. EX x. P x & (ALL y. P y --> y = x))" + by (rule ext) (simp only: Ex1_def)} + + val ball_def = mk_meta_eq @{lemma "Ball = (%A P. ALL x. x : A --> P x)" + by (rule ext)+ (rule Ball_def)} + + val bex_def = mk_meta_eq @{lemma "Bex = (%A P. EX x. x : A & P x)" + by (rule ext)+ (rule Bex_def)} + + val special_quants = [(@{const_name Ex1}, ex1_def), + (@{const_name Ball}, ball_def), (@{const_name Bex}, bex_def)] + + fun special_quant (Const (n, _)) = AList.lookup (op =) special_quants n + | special_quant _ = NONE + + fun special_quant_conv _ ct = + (case special_quant (Thm.term_of ct) of + SOME thm => Conv.rewr_conv thm + | NONE => Conv.all_conv) ct +in + +fun unfold_special_quants_conv ctxt = + Old_SMT_Utils.if_exists_conv (is_some o special_quant) + (Conv.top_conv special_quant_conv ctxt) + +val setup_unfolded_quants = + fold (Old_SMT_Builtin.add_builtin_fun_ext'' o fst) special_quants + +end + + +(** trigger inference **) + +local + (*** check trigger syntax ***) + + fun dest_trigger (Const (@{const_name pat}, _) $ _) = SOME true + | dest_trigger (Const (@{const_name nopat}, _) $ _) = SOME false + | dest_trigger _ = NONE + + fun eq_list [] = false + | eq_list (b :: bs) = forall (equal b) bs + + fun proper_trigger t = + t + |> these o try HOLogic.dest_list + |> map (map_filter dest_trigger o these o try HOLogic.dest_list) + |> (fn [] => false | bss => forall eq_list bss) + + fun proper_quant inside f t = + (case t of + Const (@{const_name All}, _) $ Abs (_, _, u) => proper_quant true f u + | Const (@{const_name Ex}, _) $ Abs (_, _, u) => proper_quant true f u + | @{const trigger} $ p $ u => + (if inside then f p else false) andalso proper_quant false f u + | Abs (_, _, u) => proper_quant false f u + | u1 $ u2 => proper_quant false f u1 andalso proper_quant false f u2 + | _ => true) + + fun check_trigger_error ctxt t = + error ("SMT triggers must only occur under quantifier and multipatterns " ^ + "must have the same kind: " ^ Syntax.string_of_term ctxt t) + + fun check_trigger_conv ctxt ct = + if proper_quant false proper_trigger (Old_SMT_Utils.term_of ct) then + Conv.all_conv ct + else check_trigger_error ctxt (Thm.term_of ct) + + + (*** infer simple triggers ***) + + fun dest_cond_eq ct = + (case Thm.term_of ct of + Const (@{const_name HOL.eq}, _) $ _ $ _ => Thm.dest_binop ct + | @{const HOL.implies} $ _ $ _ => dest_cond_eq (Thm.dest_arg ct) + | _ => raise CTERM ("no equation", [ct])) + + fun get_constrs thy (Type (n, _)) = these (Old_Datatype_Data.get_constrs thy n) + | get_constrs _ _ = [] + + fun is_constr thy (n, T) = + let fun match (m, U) = m = n andalso Sign.typ_instance thy (T, U) + in can (the o find_first match o get_constrs thy o Term.body_type) T end + + fun is_constr_pat thy t = + (case Term.strip_comb t of + (Free _, []) => true + | (Const c, ts) => is_constr thy c andalso forall (is_constr_pat thy) ts + | _ => false) + + fun is_simp_lhs ctxt t = + (case Term.strip_comb t of + (Const c, ts as _ :: _) => + not (Old_SMT_Builtin.is_builtin_fun_ext ctxt c ts) andalso + forall (is_constr_pat (Proof_Context.theory_of ctxt)) ts + | _ => false) + + fun has_all_vars vs t = + subset (op aconv) (vs, map Free (Term.add_frees t [])) + + fun minimal_pats vs ct = + if has_all_vars vs (Thm.term_of ct) then + (case Thm.term_of ct of + _ $ _ => + (case apply2 (minimal_pats vs) (Thm.dest_comb ct) of + ([], []) => [[ct]] + | (ctss, ctss') => union (eq_set (op aconvc)) ctss ctss') + | _ => []) + else [] + + fun proper_mpat _ _ _ [] = false + | proper_mpat thy gen u cts = + let + val tps = (op ~~) (`gen (map Thm.term_of cts)) + fun some_match u = tps |> exists (fn (t', t) => + Pattern.matches thy (t', u) andalso not (t aconv u)) + in not (Term.exists_subterm some_match u) end + + val pat = + Old_SMT_Utils.mk_const_pat @{theory} @{const_name pat} Old_SMT_Utils.destT1 + fun mk_pat ct = Thm.apply (Old_SMT_Utils.instT' ct pat) ct + + fun mk_clist T = apply2 (Thm.cterm_of @{context}) (HOLogic.cons_const T, HOLogic.nil_const T) + fun mk_list (ccons, cnil) f cts = fold_rev (Thm.mk_binop ccons o f) cts cnil + val mk_pat_list = mk_list (mk_clist @{typ pattern}) + val mk_mpat_list = mk_list (mk_clist @{typ "pattern list"}) + fun mk_trigger ctss = mk_mpat_list (mk_pat_list mk_pat) ctss + + val trigger_eq = + mk_meta_eq @{lemma "p = trigger t p" by (simp add: trigger_def)} + + fun insert_trigger_conv [] ct = Conv.all_conv ct + | insert_trigger_conv ctss ct = + let val (ctr, cp) = Thm.dest_binop (Thm.rhs_of trigger_eq) ||> rpair ct + in Thm.instantiate ([], map (apfst (dest_Var o Thm.term_of)) [cp, (ctr, mk_trigger ctss)]) trigger_eq end + + fun infer_trigger_eq_conv outer_ctxt (ctxt, cvs) ct = + let + val (lhs, rhs) = dest_cond_eq ct + + val vs = map Thm.term_of cvs + val thy = Proof_Context.theory_of ctxt + + fun get_mpats ct = + if is_simp_lhs ctxt (Thm.term_of ct) then minimal_pats vs ct + else [] + val gen = Variable.export_terms ctxt outer_ctxt + val filter_mpats = filter (proper_mpat thy gen (Thm.term_of rhs)) + + in insert_trigger_conv (filter_mpats (get_mpats lhs)) ct end + + fun has_trigger (@{const trigger} $ _ $ _) = true + | has_trigger _ = false + + fun try_trigger_conv cv ct = + if Old_SMT_Utils.under_quant has_trigger (Old_SMT_Utils.term_of ct) then + Conv.all_conv ct + else Conv.try_conv cv ct + + fun infer_trigger_conv ctxt = + if Config.get ctxt Old_SMT_Config.infer_triggers then + try_trigger_conv + (Old_SMT_Utils.under_quant_conv (infer_trigger_eq_conv ctxt) ctxt) + else Conv.all_conv +in + +fun trigger_conv ctxt = + Old_SMT_Utils.prop_conv + (check_trigger_conv ctxt then_conv infer_trigger_conv ctxt) + +val setup_trigger = + fold Old_SMT_Builtin.add_builtin_fun_ext'' + [@{const_name pat}, @{const_name nopat}, @{const_name trigger}] + +end + + +(** adding quantifier weights **) + +local + (*** check weight syntax ***) + + val has_no_weight = + not o Term.exists_subterm (fn @{const weight} => true | _ => false) + + fun is_weight (@{const weight} $ w $ t) = + (case try HOLogic.dest_number w of + SOME (_, i) => i >= 0 andalso has_no_weight t + | _ => false) + | is_weight t = has_no_weight t + + fun proper_trigger (@{const trigger} $ _ $ t) = is_weight t + | proper_trigger t = is_weight t + + fun check_weight_error ctxt t = + error ("SMT weight must be a non-negative number and must only occur " ^ + "under the top-most quantifier and an optional trigger: " ^ + Syntax.string_of_term ctxt t) + + fun check_weight_conv ctxt ct = + if Old_SMT_Utils.under_quant proper_trigger (Old_SMT_Utils.term_of ct) then + Conv.all_conv ct + else check_weight_error ctxt (Thm.term_of ct) + + + (*** insertion of weights ***) + + fun under_trigger_conv cv ct = + (case Thm.term_of ct of + @{const trigger} $ _ $ _ => Conv.arg_conv cv + | _ => cv) ct + + val weight_eq = + mk_meta_eq @{lemma "p = weight i p" by (simp add: weight_def)} + fun mk_weight_eq w = + let val cv = Thm.dest_arg1 (Thm.rhs_of weight_eq) + in + Thm.instantiate ([], [(dest_Var (Thm.term_of cv), Numeral.mk_cnumber @{ctyp int} w)]) + weight_eq + end + + fun add_weight_conv NONE _ = Conv.all_conv + | add_weight_conv (SOME weight) ctxt = + let val cv = Conv.rewr_conv (mk_weight_eq weight) + in Old_SMT_Utils.under_quant_conv (K (under_trigger_conv cv)) ctxt end +in + +fun weight_conv weight ctxt = + Old_SMT_Utils.prop_conv + (check_weight_conv ctxt then_conv add_weight_conv weight ctxt) + +val setup_weight = Old_SMT_Builtin.add_builtin_fun_ext'' @{const_name weight} + +end + + +(** combined general normalizations **) + +fun gen_normalize1_conv ctxt weight = + atomize_conv ctxt then_conv + unfold_special_quants_conv ctxt then_conv + Thm.beta_conversion true then_conv + trigger_conv ctxt then_conv + weight_conv weight ctxt + +fun gen_normalize1 ctxt weight thm = + thm + |> instantiate_elim + |> norm_def + |> Conv.fconv_rule (Thm.beta_conversion true then_conv Thm.eta_conversion) + |> Drule.forall_intr_vars + |> Conv.fconv_rule (gen_normalize1_conv ctxt weight) + +fun gen_norm1_safe ctxt (i, (weight, thm)) = + (case try (gen_normalize1 ctxt weight) thm of + SOME thm' => SOME (i, thm') + | NONE => (drop_fact_warning ctxt thm; NONE)) + +fun gen_normalize ctxt iwthms = map_filter (gen_norm1_safe ctxt) iwthms + + + +(* unfolding of definitions and theory-specific rewritings *) + +fun expand_head_conv cv ct = + (case Thm.term_of ct of + _ $ _ => + Conv.fun_conv (expand_head_conv cv) then_conv + Conv.try_conv (Thm.beta_conversion false) + | _ => cv) ct + + +(** rewrite bool case expressions as if expressions **) + +local + fun is_case_bool (Const (@{const_name "bool.case_bool"}, _)) = true + | is_case_bool _ = false + + val thm = mk_meta_eq @{lemma + "case_bool = (%x y P. if P then x else y)" by (rule ext)+ simp} + + fun unfold_conv _ = + Old_SMT_Utils.if_true_conv (is_case_bool o Term.head_of) + (expand_head_conv (Conv.rewr_conv thm)) +in + +fun rewrite_case_bool_conv ctxt = + Old_SMT_Utils.if_exists_conv is_case_bool (Conv.top_conv unfold_conv ctxt) + +val setup_case_bool = + Old_SMT_Builtin.add_builtin_fun_ext'' @{const_name "bool.case_bool"} + +end + + +(** unfold abs, min and max **) + +local + val abs_def = mk_meta_eq @{lemma + "abs = (%a::'a::abs_if. if a < 0 then - a else a)" + by (rule ext) (rule abs_if)} + + val min_def = mk_meta_eq @{lemma "min = (%a b. if a <= b then a else b)" + by (rule ext)+ (rule min_def)} + + val max_def = mk_meta_eq @{lemma "max = (%a b. if a <= b then b else a)" + by (rule ext)+ (rule max_def)} + + val defs = [(@{const_name min}, min_def), (@{const_name max}, max_def), + (@{const_name abs}, abs_def)] + + fun is_builtinT ctxt T = + Old_SMT_Builtin.is_builtin_typ_ext ctxt (Term.domain_type T) + + fun abs_min_max ctxt (Const (n, T)) = + (case AList.lookup (op =) defs n of + NONE => NONE + | SOME thm => if is_builtinT ctxt T then SOME thm else NONE) + | abs_min_max _ _ = NONE + + fun unfold_amm_conv ctxt ct = + (case abs_min_max ctxt (Term.head_of (Thm.term_of ct)) of + SOME thm => expand_head_conv (Conv.rewr_conv thm) + | NONE => Conv.all_conv) ct +in + +fun unfold_abs_min_max_conv ctxt = + Old_SMT_Utils.if_exists_conv (is_some o abs_min_max ctxt) + (Conv.top_conv unfold_amm_conv ctxt) + +val setup_abs_min_max = fold (Old_SMT_Builtin.add_builtin_fun_ext'' o fst) defs + +end + + +(** embedding of standard natural number operations into integer operations **) + +local + val nat_embedding = @{lemma + "ALL n. nat (int n) = n" + "ALL i. i >= 0 --> int (nat i) = i" + "ALL i. i < 0 --> int (nat i) = 0" + by simp_all} + + val simple_nat_ops = [ + @{const less (nat)}, @{const less_eq (nat)}, + @{const Suc}, @{const plus (nat)}, @{const minus (nat)}] + + val mult_nat_ops = + [@{const times (nat)}, @{const divide (nat)}, @{const modulo (nat)}] + + val nat_ops = simple_nat_ops @ mult_nat_ops + + val nat_consts = nat_ops @ [@{const numeral (nat)}, + @{const zero_class.zero (nat)}, @{const one_class.one (nat)}] + + val nat_int_coercions = [@{const of_nat (int)}, @{const nat}] + + val builtin_nat_ops = nat_int_coercions @ simple_nat_ops + + val is_nat_const = member (op aconv) nat_consts + + fun is_nat_const' @{const of_nat (int)} = true + | is_nat_const' t = is_nat_const t + + val expands = map mk_meta_eq @{lemma + "0 = nat 0" + "1 = nat 1" + "(numeral :: num => nat) = (%i. nat (numeral i))" + "op < = (%a b. int a < int b)" + "op <= = (%a b. int a <= int b)" + "Suc = (%a. nat (int a + 1))" + "op + = (%a b. nat (int a + int b))" + "op - = (%a b. nat (int a - int b))" + "op * = (%a b. nat (int a * int b))" + "op div = (%a b. nat (int a div int b))" + "op mod = (%a b. nat (int a mod int b))" + by (fastforce simp add: nat_mult_distrib nat_div_distrib nat_mod_distrib)+} + + val ints = map mk_meta_eq @{lemma + "int 0 = 0" + "int 1 = 1" + "int (Suc n) = int n + 1" + "int (n + m) = int n + int m" + "int (n - m) = int (nat (int n - int m))" + "int (n * m) = int n * int m" + "int (n div m) = int n div int m" + "int (n mod m) = int n mod int m" + by (auto simp add: of_nat_mult zdiv_int zmod_int)} + + val int_if = mk_meta_eq @{lemma + "int (if P then n else m) = (if P then int n else int m)" + by simp} + + fun mk_number_eq ctxt i lhs = + let + val eq = Old_SMT_Utils.mk_cequals lhs (Numeral.mk_cnumber @{ctyp int} i) + val tac = + Simplifier.simp_tac (put_simpset HOL_ss ctxt addsimps [@{thm of_nat_numeral}]) 1 + in Goal.norm_result ctxt (Goal.prove_internal ctxt [] eq (K tac)) end + + fun ite_conv cv1 cv2 = + Conv.combination_conv (Conv.combination_conv (Conv.arg_conv cv1) cv2) cv2 + + fun int_conv ctxt ct = + (case Thm.term_of ct of + @{const of_nat (int)} $ (n as (@{const numeral (nat)} $ _)) => + Conv.rewr_conv (mk_number_eq ctxt (snd (HOLogic.dest_number n)) ct) + | @{const of_nat (int)} $ _ => + (Conv.rewrs_conv ints then_conv Conv.sub_conv ints_conv ctxt) else_conv + (Conv.rewr_conv int_if then_conv + ite_conv (nat_conv ctxt) (int_conv ctxt)) else_conv + Conv.sub_conv (Conv.top_sweep_conv nat_conv) ctxt + | _ => Conv.no_conv) ct + + and ints_conv ctxt = Conv.top_sweep_conv int_conv ctxt + + and expand_conv ctxt = + Old_SMT_Utils.if_conv (is_nat_const o Term.head_of) + (expand_head_conv (Conv.rewrs_conv expands) then_conv ints_conv ctxt) + (int_conv ctxt) + + and nat_conv ctxt = Old_SMT_Utils.if_exists_conv is_nat_const' + (Conv.top_sweep_conv expand_conv ctxt) + + val uses_nat_int = Term.exists_subterm (member (op aconv) nat_int_coercions) +in + +val nat_as_int_conv = nat_conv + +fun add_nat_embedding thms = + if exists (uses_nat_int o Thm.prop_of) thms then (thms, nat_embedding) + else (thms, []) + +val setup_nat_as_int = + Old_SMT_Builtin.add_builtin_typ_ext (@{typ nat}, K true) #> + fold (Old_SMT_Builtin.add_builtin_fun_ext' o Term.dest_Const) builtin_nat_ops + +end + + +(** normalize numerals **) + +local + (* + rewrite Numeral1 into 1 + rewrite - 0 into 0 + *) + + fun is_irregular_number (Const (@{const_name numeral}, _) $ Const (@{const_name num.One}, _)) = + true + | is_irregular_number (Const (@{const_name uminus}, _) $ Const (@{const_name Groups.zero}, _)) = + true + | is_irregular_number _ = + false; + + fun is_strange_number ctxt t = is_irregular_number t andalso Old_SMT_Builtin.is_builtin_num ctxt t; + + val proper_num_ss = + simpset_of (put_simpset HOL_ss @{context} + addsimps @{thms Num.numeral_One minus_zero}) + + fun norm_num_conv ctxt = + Old_SMT_Utils.if_conv (is_strange_number ctxt) + (Simplifier.rewrite (put_simpset proper_num_ss ctxt)) Conv.no_conv +in + +fun normalize_numerals_conv ctxt = + Old_SMT_Utils.if_exists_conv (is_strange_number ctxt) + (Conv.top_sweep_conv norm_num_conv ctxt) + +end + + +(** combined unfoldings and rewritings **) + +fun unfold_conv ctxt = + rewrite_case_bool_conv ctxt then_conv + unfold_abs_min_max_conv ctxt then_conv + nat_as_int_conv ctxt then_conv + Thm.beta_conversion true + +fun unfold1 ctxt = map (apsnd (Conv.fconv_rule (unfold_conv ctxt))) + +fun burrow_ids f ithms = + let + val (is, thms) = split_list ithms + val (thms', extra_thms) = f thms + in (is ~~ thms') @ map (pair ~1) extra_thms end + +fun unfold2 ctxt ithms = + ithms + |> map (apsnd (Conv.fconv_rule (normalize_numerals_conv ctxt))) + |> burrow_ids add_nat_embedding + + + +(* overall normalization *) + +type extra_norm = Proof.context -> thm list * thm list -> thm list * thm list + +structure Extra_Norms = Generic_Data +( + type T = extra_norm Old_SMT_Utils.dict + val empty = [] + val extend = I + fun merge data = Old_SMT_Utils.dict_merge fst data +) + +fun add_extra_norm (cs, norm) = + Extra_Norms.map (Old_SMT_Utils.dict_update (cs, norm)) + +fun apply_extra_norms ctxt ithms = + let + val cs = Old_SMT_Config.solver_class_of ctxt + val es = Old_SMT_Utils.dict_lookup (Extra_Norms.get (Context.Proof ctxt)) cs + in burrow_ids (fold (fn e => e ctxt) es o rpair []) ithms end + +local + val ignored = member (op =) [@{const_name All}, @{const_name Ex}, + @{const_name Let}, @{const_name If}, @{const_name HOL.eq}] + + val schematic_consts_of = + let + fun collect (@{const trigger} $ p $ t) = + collect_trigger p #> collect t + | collect (t $ u) = collect t #> collect u + | collect (Abs (_, _, t)) = collect t + | collect (t as Const (n, _)) = + if not (ignored n) then Monomorph.add_schematic_consts_of t else I + | collect _ = I + and collect_trigger t = + let val dest = these o try HOLogic.dest_list + in fold (fold collect_pat o dest) (dest t) end + and collect_pat (Const (@{const_name pat}, _) $ t) = collect t + | collect_pat (Const (@{const_name nopat}, _) $ t) = collect t + | collect_pat _ = I + in (fn t => collect t Symtab.empty) end +in + +fun monomorph ctxt xthms = + let val (xs, thms) = split_list xthms + in + map (pair 1) thms + |> Monomorph.monomorph schematic_consts_of ctxt + |> maps (uncurry (map o pair)) o map2 pair xs o map (map snd) + end + +end + +fun normalize iwthms ctxt = + iwthms + |> gen_normalize ctxt + |> unfold1 ctxt + |> monomorph ctxt + |> unfold2 ctxt + |> apply_extra_norms ctxt + |> rpair ctxt + +val setup = Context.theory_map ( + setup_atomize #> + setup_unfolded_quants #> + setup_trigger #> + setup_weight #> + setup_case_bool #> + setup_abs_min_max #> + setup_nat_as_int) + +end diff --git a/src/test-gen/src/main/Old_SMT/old_smt_real.ML b/src/test-gen/src/main/Old_SMT/old_smt_real.ML new file mode 100644 index 0000000..6a2a793 --- /dev/null +++ b/src/test-gen/src/main/Old_SMT/old_smt_real.ML @@ -0,0 +1,134 @@ +(* Title: HOL/Library/Old_SMT/old_smt_real.ML + Author: Sascha Boehme, TU Muenchen + +SMT setup for reals. +*) + +structure Old_SMT_Real: sig end = +struct + + +(* SMT-LIB logic *) + +fun smtlib_logic ts = + if exists (Term.exists_type (Term.exists_subtype (equal @{typ real}))) ts + then SOME "AUFLIRA" + else NONE + + +(* SMT-LIB and Z3 built-ins *) + +local + fun real_num _ i = SOME (string_of_int i ^ ".0") + + fun is_linear [t] = Old_SMT_Utils.is_number t + | is_linear [t, u] = Old_SMT_Utils.is_number t orelse Old_SMT_Utils.is_number u + | is_linear _ = false + + fun mk_times ts = Term.list_comb (@{const times (real)}, ts) + + fun times _ _ ts = if is_linear ts then SOME ("*", 2, ts, mk_times) else NONE +in + +val setup_builtins = + Old_SMT_Builtin.add_builtin_typ Old_SMTLIB_Interface.smtlibC + (@{typ real}, K (SOME "Real"), real_num) #> + fold (Old_SMT_Builtin.add_builtin_fun' Old_SMTLIB_Interface.smtlibC) [ + (@{const less (real)}, "<"), + (@{const less_eq (real)}, "<="), + (@{const uminus (real)}, "~"), + (@{const plus (real)}, "+"), + (@{const minus (real)}, "-") ] #> + Old_SMT_Builtin.add_builtin_fun Old_SMTLIB_Interface.smtlibC + (Term.dest_Const @{const times (real)}, times) #> + Old_SMT_Builtin.add_builtin_fun' Old_Z3_Interface.smtlib_z3C + (@{const times (real)}, "*") #> + Old_SMT_Builtin.add_builtin_fun' Old_Z3_Interface.smtlib_z3C + (@{const divide (real)}, "/") + +end + + +(* Z3 constructors *) + +local + fun z3_mk_builtin_typ (Old_Z3_Interface.Sym ("Real", _)) = SOME @{typ real} + | z3_mk_builtin_typ (Old_Z3_Interface.Sym ("real", _)) = SOME @{typ real} + (*FIXME: delete*) + | z3_mk_builtin_typ _ = NONE + + fun z3_mk_builtin_num _ i T = + if T = @{typ real} then SOME (Numeral.mk_cnumber @{ctyp real} i) + else NONE + + fun mk_nary _ cu [] = cu + | mk_nary ct _ cts = uncurry (fold_rev (Thm.mk_binop ct)) (split_last cts) + + val mk_uminus = Thm.apply (Thm.cterm_of @{context} @{const uminus (real)}) + val add = Thm.cterm_of @{context} @{const plus (real)} + val real0 = Numeral.mk_cnumber @{ctyp real} 0 + val mk_sub = Thm.mk_binop (Thm.cterm_of @{context} @{const minus (real)}) + val mk_mul = Thm.mk_binop (Thm.cterm_of @{context} @{const times (real)}) + val mk_div = Thm.mk_binop (Thm.cterm_of @{context} @{const divide (real)}) + val mk_lt = Thm.mk_binop (Thm.cterm_of @{context} @{const less (real)}) + val mk_le = Thm.mk_binop (Thm.cterm_of @{context} @{const less_eq (real)}) + + fun z3_mk_builtin_fun (Old_Z3_Interface.Sym ("-", _)) [ct] = SOME (mk_uminus ct) + | z3_mk_builtin_fun (Old_Z3_Interface.Sym ("+", _)) cts = + SOME (mk_nary add real0 cts) + | z3_mk_builtin_fun (Old_Z3_Interface.Sym ("-", _)) [ct, cu] = + SOME (mk_sub ct cu) + | z3_mk_builtin_fun (Old_Z3_Interface.Sym ("*", _)) [ct, cu] = + SOME (mk_mul ct cu) + | z3_mk_builtin_fun (Old_Z3_Interface.Sym ("/", _)) [ct, cu] = + SOME (mk_div ct cu) + | z3_mk_builtin_fun (Old_Z3_Interface.Sym ("<", _)) [ct, cu] = + SOME (mk_lt ct cu) + | z3_mk_builtin_fun (Old_Z3_Interface.Sym ("<=", _)) [ct, cu] = + SOME (mk_le ct cu) + | z3_mk_builtin_fun (Old_Z3_Interface.Sym (">", _)) [ct, cu] = + SOME (mk_lt cu ct) + | z3_mk_builtin_fun (Old_Z3_Interface.Sym (">=", _)) [ct, cu] = + SOME (mk_le cu ct) + | z3_mk_builtin_fun _ _ = NONE +in + +val z3_mk_builtins = { + mk_builtin_typ = z3_mk_builtin_typ, + mk_builtin_num = z3_mk_builtin_num, + mk_builtin_fun = (fn _ => fn sym => fn cts => + (case try (Thm.typ_of_cterm o hd) cts of + SOME @{typ real} => z3_mk_builtin_fun sym cts + | _ => NONE)) } + +end + + +(* Z3 proof reconstruction *) + +val real_rules = @{lemma + "0 + (x::real) = x" + "x + 0 = x" + "0 * x = 0" + "1 * x = x" + "x + y = y + x" + by auto} + +val real_linarith_proc = + Simplifier.make_simproc @{context} "fast_real_arith" + {lhss = [@{term "(m::real) < n"}, @{term "(m::real) \ n"}, @{term "(m::real) = n"}], + proc = K Lin_Arith.simproc} + + +(* setup *) + +val _ = + Theory.setup + (Context.theory_map ( + Old_SMTLIB_Interface.add_logic (10, smtlib_logic) #> + setup_builtins #> + Old_Z3_Interface.add_mk_builtins z3_mk_builtins #> + fold Old_Z3_Proof_Reconstruction.add_z3_rule real_rules #> + Old_Z3_Proof_Tools.add_simproc real_linarith_proc)) + +end diff --git a/src/test-gen/src/main/Old_SMT/old_smt_setup_solvers.ML b/src/test-gen/src/main/Old_SMT/old_smt_setup_solvers.ML new file mode 100644 index 0000000..15e01db --- /dev/null +++ b/src/test-gen/src/main/Old_SMT/old_smt_setup_solvers.ML @@ -0,0 +1,189 @@ +(* Title: HOL/Library/Old_SMT/old_smt_setup_solvers.ML + Author: Sascha Boehme, TU Muenchen + +Setup SMT solvers. +*) + +signature OLD_SMT_SETUP_SOLVERS = +sig + datatype z3_non_commercial = + Z3_Non_Commercial_Unknown | + Z3_Non_Commercial_Accepted | + Z3_Non_Commercial_Declined + val z3_non_commercial: unit -> z3_non_commercial + val z3_with_extensions: bool Config.T + val setup: theory -> theory +end + +structure Old_SMT_Setup_Solvers: OLD_SMT_SETUP_SOLVERS = +struct + +(* helper functions *) + +fun make_avail name () = getenv ("OLD_" ^ name ^ "_SOLVER") <> "" +fun make_command name () = [getenv ("OLD_" ^ name ^ "_SOLVER")] + +fun outcome_of unsat sat unknown solver_name line = + if String.isPrefix unsat line then Old_SMT_Solver.Unsat + else if String.isPrefix sat line then Old_SMT_Solver.Sat + else if String.isPrefix unknown line then Old_SMT_Solver.Unknown + else raise Old_SMT_Failure.SMT (Old_SMT_Failure.Other_Failure ("Solver " ^ + quote solver_name ^ " failed. Enable SMT tracing by setting the " ^ + "configuration option " ^ quote (Config.name_of Old_SMT_Config.trace) ^ " and " ^ + "see the trace for details.")) + +fun on_first_line test_outcome solver_name lines = + let + val empty_line = (fn "" => true | _ => false) + val split_first = (fn [] => ("", []) | l :: ls => (l, ls)) + val (l, ls) = split_first (snd (take_prefix empty_line lines)) + in (test_outcome solver_name l, ls) end + + +(* CVC3 *) + +local + fun cvc3_options ctxt = [ + "-seed", string_of_int (Config.get ctxt Old_SMT_Config.random_seed), + "-lang", "smtlib", "-output-lang", "presentation", + "-timeout", string_of_int (Real.ceil (Config.get ctxt Old_SMT_Config.timeout))] +in + +val cvc3: Old_SMT_Solver.solver_config = { + name = "cvc3", + class = K Old_SMTLIB_Interface.smtlibC, + avail = make_avail "CVC3", + command = make_command "CVC3", + options = cvc3_options, + default_max_relevant = 400 (* FUDGE *), + supports_filter = false, + outcome = + on_first_line (outcome_of "Unsatisfiable." "Satisfiable." "Unknown."), + cex_parser = NONE, + reconstruct = NONE } + +end + + +(* Yices *) + +val yices: Old_SMT_Solver.solver_config = { + name = "yices", + class = K Old_SMTLIB_Interface.smtlibC, + avail = make_avail "YICES", + command = make_command "YICES", + options = (fn ctxt => [ + "--rand-seed=" ^ string_of_int (Config.get ctxt Old_SMT_Config.random_seed), + "--timeout=" ^ + string_of_int (Real.ceil (Config.get ctxt Old_SMT_Config.timeout)), + "--smtlib"]), + default_max_relevant = 350 (* FUDGE *), + supports_filter = false, + outcome = on_first_line (outcome_of "unsat" "sat" "unknown"), + cex_parser = NONE, + reconstruct = NONE } + + +(* Z3 *) + +datatype z3_non_commercial = + Z3_Non_Commercial_Unknown | + Z3_Non_Commercial_Accepted | + Z3_Non_Commercial_Declined + + +local + val accepted = member (op =) ["yes", "Yes", "YES"] + val declined = member (op =) ["no", "No", "NO"] +in + +fun z3_non_commercial () = + let + val flag2 = getenv "OLD_Z3_NON_COMMERCIAL" + in + if accepted flag2 then Z3_Non_Commercial_Accepted + else if declined flag2 then Z3_Non_Commercial_Declined + else Z3_Non_Commercial_Unknown + end + +fun if_z3_non_commercial f = + (case z3_non_commercial () of + Z3_Non_Commercial_Accepted => f () + | Z3_Non_Commercial_Declined => + error (Pretty.string_of (Pretty.para + "The SMT solver Z3 may only be used for non-commercial applications.")) + | Z3_Non_Commercial_Unknown => + error + (Pretty.string_of + (Pretty.para + ("The SMT solver Z3 is not activated. To activate it, set the Isabelle \ + \system option \"z3_non_commercial\" to \"yes\" (e.g. via \ + \the Isabelle/jEdit menu Plugin Options / Isabelle / General).")) ^ + "\n\nSee also " ^ Url.print (Url.explode "http://z3.codeplex.com/license"))) + +end + + +val z3_with_extensions = + Attrib.setup_config_bool @{binding old_z3_with_extensions} (K false) + +local + fun z3_make_command name () = if_z3_non_commercial (make_command name) + + fun z3_options ctxt = + ["-rs:" ^ string_of_int (Config.get ctxt Old_SMT_Config.random_seed), + "MODEL=true", + "SOFT_TIMEOUT=" ^ + string_of_int (Real.ceil (1000.0 * Config.get ctxt Old_SMT_Config.timeout)), + "-smt"] + |> not (Config.get ctxt Old_SMT_Config.oracle) ? + append ["DISPLAY_PROOF=true", "PROOF_MODE=2"] + + fun z3_on_first_or_last_line solver_name lines = + let + fun junk l = + if String.isPrefix "WARNING: Out of allocated virtual memory" l + then raise Old_SMT_Failure.SMT Old_SMT_Failure.Out_Of_Memory + else + String.isPrefix "WARNING" l orelse + String.isPrefix "ERROR" l orelse + forall Symbol.is_ascii_blank (Symbol.explode l) + val lines = filter_out junk lines + fun outcome split = + the_default ("", []) (try split lines) + |>> outcome_of "unsat" "sat" "unknown" solver_name + in + (* Starting with version 4.0, Z3 puts the outcome on the first line of the + output rather than on the last line. *) + outcome (fn lines => (hd lines, tl lines)) + handle Old_SMT_Failure.SMT _ => outcome (swap o split_last) + end + + fun select_class ctxt = + if Config.get ctxt z3_with_extensions then Old_Z3_Interface.smtlib_z3C + else Old_SMTLIB_Interface.smtlibC +in + +val z3: Old_SMT_Solver.solver_config = { + name = "z3", + class = select_class, + avail = make_avail "Z3", + command = z3_make_command "Z3", + options = z3_options, + default_max_relevant = 350 (* FUDGE *), + supports_filter = true, + outcome = z3_on_first_or_last_line, + cex_parser = SOME Old_Z3_Model.parse_counterex, + reconstruct = SOME Old_Z3_Proof_Reconstruction.reconstruct } + +end + + +(* overall setup *) + +val setup = + Old_SMT_Solver.add_solver cvc3 #> + Old_SMT_Solver.add_solver yices #> + Old_SMT_Solver.add_solver z3 + +end diff --git a/src/test-gen/src/main/Old_SMT/old_smt_solver.ML b/src/test-gen/src/main/Old_SMT/old_smt_solver.ML new file mode 100644 index 0000000..da7b8e4 --- /dev/null +++ b/src/test-gen/src/main/Old_SMT/old_smt_solver.ML @@ -0,0 +1,374 @@ +(* Title: HOL/Library/Old_SMT/old_smt_solver.ML + Author: Sascha Boehme, TU Muenchen + +SMT solvers registry and SMT tactic. +*) + +signature OLD_SMT_SOLVER = +sig + (*configuration*) + datatype outcome = Unsat | Sat | Unknown + type solver_config = { + name: string, + class: Proof.context -> Old_SMT_Utils.class, + avail: unit -> bool, + command: unit -> string list, + options: Proof.context -> string list, + default_max_relevant: int, + supports_filter: bool, + outcome: string -> string list -> outcome * string list, + cex_parser: (Proof.context -> Old_SMT_Translate.recon -> string list -> + term list * term list) option, + reconstruct: (Proof.context -> Old_SMT_Translate.recon -> string list -> + int list * thm) option } + + (*registry*) + val add_solver: solver_config -> theory -> theory + val solver_name_of: Proof.context -> string + val available_solvers_of: Proof.context -> string list + val apply_solver: Proof.context -> (int * (int option * thm)) list -> + int list * thm + val default_max_relevant: Proof.context -> string -> int + + (*filter*) + type 'a smt_filter_data = + ('a * thm) list * ((int * thm) list * Proof.context) + val smt_filter_preprocess: Proof.context -> thm list -> thm -> + ('a * (int option * thm)) list -> int -> 'a smt_filter_data + val smt_filter_apply: Time.time -> 'a smt_filter_data -> + {outcome: Old_SMT_Failure.failure option, used_facts: ('a * thm) list} + + (*tactic*) + val smt_tac: Proof.context -> thm list -> int -> tactic + val smt_tac': Proof.context -> thm list -> int -> tactic +end + +structure Old_SMT_Solver: OLD_SMT_SOLVER = +struct + + +(* interface to external solvers *) + +local + +fun make_cmd command options problem_path proof_path = + space_implode " " + ("(exec 2>&1;" :: map Bash.string (command () @ options) @ + [File.bash_path problem_path, ")", ">", File.bash_path proof_path]) + +fun trace_and ctxt msg f x = + let val _ = Old_SMT_Config.trace_msg ctxt (fn () => msg) () + in f x end + +fun run ctxt name mk_cmd input = + (case Old_SMT_Config.certificates_of ctxt of + NONE => + if not (Old_SMT_Config.is_available ctxt name) then + error ("The SMT solver " ^ quote name ^ " is not installed.") + else if Config.get ctxt Old_SMT_Config.debug_files = "" then + trace_and ctxt ("Invoking SMT solver " ^ quote name ^ " ...") + (Cache_IO.run mk_cmd) input + else + let + val base_path = Path.explode (Config.get ctxt Old_SMT_Config.debug_files) + val in_path = Path.ext "smt_in" base_path + val out_path = Path.ext "smt_out" base_path + in Cache_IO.raw_run mk_cmd input in_path out_path end + | SOME certs => + (case Cache_IO.lookup certs input of + (NONE, key) => + if Config.get ctxt Old_SMT_Config.read_only_certificates then + error ("Bad certificate cache: missing certificate") + else + Cache_IO.run_and_cache certs key mk_cmd input + | (SOME output, _) => + trace_and ctxt ("Using cached certificate from " ^ + Path.print (Cache_IO.cache_path_of certs) ^ " ...") + I output)) + +fun run_solver ctxt name mk_cmd input = + let + fun pretty tag ls = Pretty.string_of (Pretty.big_list tag + (map Pretty.str ls)) + + val _ = Old_SMT_Config.trace_msg ctxt (pretty "Problem:" o split_lines) input + + val {redirected_output=res, output=err, return_code} = + Old_SMT_Config.with_timeout ctxt (run ctxt name mk_cmd) input + val _ = Old_SMT_Config.trace_msg ctxt (pretty "Solver:") err + + val ls = fst (take_suffix (equal "") res) + val _ = Old_SMT_Config.trace_msg ctxt (pretty "Result:") ls + + val _ = return_code <> 0 andalso + raise Old_SMT_Failure.SMT (Old_SMT_Failure.Abnormal_Termination return_code) + in ls end + +fun trace_assms ctxt = + Old_SMT_Config.trace_msg ctxt (Pretty.string_of o + Pretty.big_list "Assertions:" o map (Thm.pretty_thm ctxt o snd)) + +fun trace_recon_data ({context=ctxt, typs, terms, ...} : Old_SMT_Translate.recon) = + let + fun pretty_eq n p = Pretty.block [Pretty.str n, Pretty.str " = ", p] + fun p_typ (n, T) = pretty_eq n (Syntax.pretty_typ ctxt T) + fun p_term (n, t) = pretty_eq n (Syntax.pretty_term ctxt t) + in + Old_SMT_Config.trace_msg ctxt (fn () => + Pretty.string_of (Pretty.big_list "Names:" [ + Pretty.big_list "sorts:" (map p_typ (Symtab.dest typs)), + Pretty.big_list "functions:" (map p_term (Symtab.dest terms))])) () + end + +in + +fun invoke name command ithms ctxt = + let + val options = Old_SMT_Config.solver_options_of ctxt + val comments = ("solver: " ^ name) :: + ("timeout: " ^ string_of_real (Config.get ctxt Old_SMT_Config.timeout)) :: + ("random seed: " ^ + string_of_int (Config.get ctxt Old_SMT_Config.random_seed)) :: + "arguments:" :: options + + val (str, recon as {context=ctxt', ...}) = + ithms + |> tap (trace_assms ctxt) + |> Old_SMT_Translate.translate ctxt comments + ||> tap trace_recon_data + in (run_solver ctxt' name (make_cmd command options) str, recon) end + +end + + +(* configuration *) + +datatype outcome = Unsat | Sat | Unknown + +type solver_config = { + name: string, + class: Proof.context -> Old_SMT_Utils.class, + avail: unit -> bool, + command: unit -> string list, + options: Proof.context -> string list, + default_max_relevant: int, + supports_filter: bool, + outcome: string -> string list -> outcome * string list, + cex_parser: (Proof.context -> Old_SMT_Translate.recon -> string list -> + term list * term list) option, + reconstruct: (Proof.context -> Old_SMT_Translate.recon -> string list -> + int list * thm) option } + + +(* registry *) + +type solver_info = { + command: unit -> string list, + default_max_relevant: int, + supports_filter: bool, + reconstruct: Proof.context -> string list * Old_SMT_Translate.recon -> + int list * thm } + +structure Solvers = Generic_Data +( + type T = solver_info Symtab.table + val empty = Symtab.empty + val extend = I + fun merge data = Symtab.merge (K true) data +) + +local + fun finish outcome cex_parser reconstruct ocl outer_ctxt + (output, (recon as {context=ctxt, ...} : Old_SMT_Translate.recon)) = + (case outcome output of + (Unsat, ls) => + if not (Config.get ctxt Old_SMT_Config.oracle) andalso is_some reconstruct + then the reconstruct outer_ctxt recon ls + else ([], ocl ()) + | (result, ls) => + let + val (ts, us) = + (case cex_parser of SOME f => f ctxt recon ls | _ => ([], [])) + in + raise Old_SMT_Failure.SMT (Old_SMT_Failure.Counterexample { + is_real_cex = (result = Sat), + free_constraints = ts, + const_defs = us}) + end) + + val cfalse = Thm.cterm_of @{context} (@{const Trueprop} $ @{const False}) +in + +fun add_solver cfg = + let + val {name, class, avail, command, options, default_max_relevant, + supports_filter, outcome, cex_parser, reconstruct} = cfg + + fun core_oracle () = cfalse + + fun solver ocl = { + command = command, + default_max_relevant = default_max_relevant, + supports_filter = supports_filter, + reconstruct = finish (outcome name) cex_parser reconstruct ocl } + + val info = {name=name, class=class, avail=avail, options=options} + in + Thm.add_oracle (Binding.name name, core_oracle) #-> (fn (_, ocl) => + Context.theory_map (Solvers.map (Symtab.update_new (name, solver ocl)))) #> + Context.theory_map (Old_SMT_Config.add_solver info) + end + +end + +fun get_info ctxt name = + the (Symtab.lookup (Solvers.get (Context.Proof ctxt)) name) + +val solver_name_of = Old_SMT_Config.solver_of + +val available_solvers_of = Old_SMT_Config.available_solvers_of + +fun name_and_info_of ctxt = + let val name = solver_name_of ctxt + in (name, get_info ctxt name) end + +fun gen_preprocess ctxt iwthms = Old_SMT_Normalize.normalize iwthms ctxt + +fun gen_apply (ithms, ctxt) = + let val (name, {command, reconstruct, ...}) = name_and_info_of ctxt + in + (ithms, ctxt) + |-> invoke name command + |> reconstruct ctxt + |>> distinct (op =) + end + +fun apply_solver ctxt = gen_apply o gen_preprocess ctxt + +val default_max_relevant = #default_max_relevant oo get_info + +val supports_filter = #supports_filter o snd o name_and_info_of + + +(* check well-sortedness *) + +val has_topsort = Term.exists_type (Term.exists_subtype (fn + TFree (_, []) => true + | TVar (_, []) => true + | _ => false)) + +(* without this test, we would run into problems when atomizing the rules: *) +fun check_topsort ctxt thm = + if has_topsort (Thm.prop_of thm) then + (Old_SMT_Normalize.drop_fact_warning ctxt thm; TrueI) + else + thm + +fun check_topsorts ctxt iwthms = map (apsnd (apsnd (check_topsort ctxt))) iwthms + + +(* filter *) + +val cnot = Thm.cterm_of @{context} @{const Not} + +fun mk_result outcome xrules = { outcome = outcome, used_facts = xrules } + +type 'a smt_filter_data = ('a * thm) list * ((int * thm) list * Proof.context) + +fun smt_filter_preprocess ctxt facts goal xwthms i = + let + val ctxt = + ctxt + |> Config.put Old_SMT_Config.oracle false + |> Config.put Old_SMT_Config.filter_only_facts true + + val ({context=ctxt', prems, concl, ...}, _) = Subgoal.focus ctxt i NONE goal + fun negate ct = Thm.dest_comb ct ||> Thm.apply cnot |-> Thm.apply + val cprop = + (case try negate (Thm.rhs_of (Old_SMT_Normalize.atomize_conv ctxt' concl)) of + SOME ct => ct + | NONE => raise Old_SMT_Failure.SMT (Old_SMT_Failure.Other_Failure ( + "goal is not a HOL term"))) + in + map snd xwthms + |> map_index I + |> append (map (pair ~1 o pair NONE) (Thm.assume cprop :: prems @ facts)) + |> check_topsorts ctxt' + |> gen_preprocess ctxt' + |> pair (map (apsnd snd) xwthms) + end + +fun smt_filter_apply time_limit (xthms, (ithms, ctxt)) = + let + val ctxt' = + ctxt + |> Config.put Old_SMT_Config.timeout (Time.toReal time_limit) + + fun filter_thms false = K xthms + | filter_thms true = map_filter (try (nth xthms)) o fst + in + (ithms, ctxt') + |> gen_apply + |> filter_thms (supports_filter ctxt') + |> mk_result NONE + end + handle Old_SMT_Failure.SMT fail => mk_result (SOME fail) [] + + +(* SMT tactic *) + +local + fun trace_assumptions ctxt iwthms idxs = + let + val wthms = + idxs + |> filter (fn i => i >= 0) + |> map_filter (AList.lookup (op =) iwthms) + in + if Config.get ctxt Old_SMT_Config.trace_used_facts andalso length wthms > 0 + then + tracing (Pretty.string_of (Pretty.big_list "SMT used facts:" + (map (Thm.pretty_thm ctxt o snd) wthms))) + else () + end + + fun solve ctxt iwthms = + iwthms + |> check_topsorts ctxt + |> apply_solver ctxt + |>> trace_assumptions ctxt iwthms + |> snd + + fun str_of ctxt fail = + Old_SMT_Failure.string_of_failure ctxt fail + |> prefix ("Solver " ^ Old_SMT_Config.solver_of ctxt ^ ": ") + + fun safe_solve ctxt iwthms = SOME (solve ctxt iwthms) + handle + Old_SMT_Failure.SMT (fail as Old_SMT_Failure.Counterexample _) => + (Old_SMT_Config.verbose_msg ctxt (str_of ctxt) fail; NONE) + | Old_SMT_Failure.SMT (fail as Old_SMT_Failure.Time_Out) => + error ("SMT: Solver " ^ quote (Old_SMT_Config.solver_of ctxt) ^ ": " ^ + Old_SMT_Failure.string_of_failure ctxt fail ^ " (setting the " ^ + "configuration option " ^ quote (Config.name_of Old_SMT_Config.timeout) ^ " might help)") + | Old_SMT_Failure.SMT fail => error (str_of ctxt fail) + + fun tag_rules thms = map_index (apsnd (pair NONE)) thms + fun tag_prems thms = map (pair ~1 o pair NONE) thms + + fun resolve ctxt (SOME thm) = resolve_tac ctxt [thm] 1 + | resolve _ NONE = no_tac + + fun tac prove ctxt rules = + CONVERSION (Old_SMT_Normalize.atomize_conv ctxt) + THEN' resolve_tac ctxt @{thms ccontr} + THEN' SUBPROOF (fn {context = ctxt', prems, ...} => + resolve ctxt' (prove ctxt' (tag_rules rules @ tag_prems prems))) ctxt +in + +val smt_tac = tac safe_solve +val smt_tac' = tac (SOME oo solve) + +end + +end diff --git a/src/test-gen/src/main/Old_SMT/old_smt_translate.ML b/src/test-gen/src/main/Old_SMT/old_smt_translate.ML new file mode 100644 index 0000000..ab4a2a2 --- /dev/null +++ b/src/test-gen/src/main/Old_SMT/old_smt_translate.ML @@ -0,0 +1,589 @@ +(* Title: HOL/Library/Old_SMT/old_smt_translate.ML + Author: Sascha Boehme, TU Muenchen + +Translate theorems into an SMT intermediate format and serialize them. +*) + +signature OLD_SMT_TRANSLATE = +sig + (*intermediate term structure*) + datatype squant = SForall | SExists + datatype 'a spattern = SPat of 'a list | SNoPat of 'a list + datatype sterm = + SVar of int | + SApp of string * sterm list | + SLet of string * sterm * sterm | + SQua of squant * string list * sterm spattern list * int option * sterm + + (*translation configuration*) + type prefixes = {sort_prefix: string, func_prefix: string} + type sign = { + header: string list, + sorts: string list, + dtyps: (string * (string * (string * string) list) list) list list, + funcs: (string * (string list * string)) list } + type config = { + prefixes: prefixes, + header: term list -> string list, + is_fol: bool, + has_datatypes: bool, + serialize: string list -> sign -> sterm list -> string } + type recon = { + context: Proof.context, + typs: typ Symtab.table, + terms: term Symtab.table, + rewrite_rules: thm list, + assms: (int * thm) list } + + (*translation*) + val add_config: Old_SMT_Utils.class * (Proof.context -> config) -> + Context.generic -> Context.generic + val translate: Proof.context -> string list -> (int * thm) list -> + string * recon +end + +structure Old_SMT_Translate: OLD_SMT_TRANSLATE = +struct + + +(* intermediate term structure *) + +datatype squant = SForall | SExists + +datatype 'a spattern = SPat of 'a list | SNoPat of 'a list + +datatype sterm = + SVar of int | + SApp of string * sterm list | + SLet of string * sterm * sterm | + SQua of squant * string list * sterm spattern list * int option * sterm + + + +(* translation configuration *) + +type prefixes = {sort_prefix: string, func_prefix: string} + +type sign = { + header: string list, + sorts: string list, + dtyps: (string * (string * (string * string) list) list) list list, + funcs: (string * (string list * string)) list } + +type config = { + prefixes: prefixes, + header: term list -> string list, + is_fol: bool, + has_datatypes: bool, + serialize: string list -> sign -> sterm list -> string } + +type recon = { + context: Proof.context, + typs: typ Symtab.table, + terms: term Symtab.table, + rewrite_rules: thm list, + assms: (int * thm) list } + + + +(* translation context *) + +fun make_tr_context {sort_prefix, func_prefix} = + (sort_prefix, 1, Typtab.empty, func_prefix, 1, Termtab.empty) + +fun string_of_index pre i = pre ^ string_of_int i + +fun add_typ T proper (cx as (sp, Tidx, typs, fp, idx, terms)) = + (case Typtab.lookup typs T of + SOME (n, _) => (n, cx) + | NONE => + let + val n = string_of_index sp Tidx + val typs' = Typtab.update (T, (n, proper)) typs + in (n, (sp, Tidx+1, typs', fp, idx, terms)) end) + +fun add_fun t sort (cx as (sp, Tidx, typs, fp, idx, terms)) = + (case Termtab.lookup terms t of + SOME (n, _) => (n, cx) + | NONE => + let + val n = string_of_index fp idx + val terms' = Termtab.update (t, (n, sort)) terms + in (n, (sp, Tidx, typs, fp, idx+1, terms')) end) + +fun sign_of header dtyps (_, _, typs, _, _, terms) = { + header = header, + sorts = Typtab.fold (fn (_, (n, true)) => cons n | _ => I) typs [], + dtyps = dtyps, + funcs = Termtab.fold (fn (_, (n, SOME ss)) => cons (n,ss) | _ => I) terms []} + +fun recon_of ctxt rules thms ithms (_, _, typs, _, _, terms) = + let + fun add_typ (T, (n, _)) = Symtab.update (n, T) + val typs' = Typtab.fold add_typ typs Symtab.empty + + fun add_fun (t, (n, _)) = Symtab.update (n, t) + val terms' = Termtab.fold add_fun terms Symtab.empty + + val assms = map (pair ~1) thms @ ithms + in + {context=ctxt, typs=typs', terms=terms', rewrite_rules=rules, assms=assms} + end + + + +(* preprocessing *) + +(** datatype declarations **) + +fun collect_datatypes_and_records (tr_context, ctxt) ts = + let + val (declss, ctxt') = + fold (Term.fold_types Old_SMT_Datatypes.add_decls) ts ([], ctxt) + + fun is_decl_typ T = exists (exists (equal T o fst)) declss + + fun add_typ' T proper = + (case Old_SMT_Builtin.dest_builtin_typ ctxt' T of + SOME n => pair n + | NONE => add_typ T proper) + + fun tr_select sel = + let val T = Term.range_type (Term.fastype_of sel) + in add_fun sel NONE ##>> add_typ' T (not (is_decl_typ T)) end + fun tr_constr (constr, selects) = + add_fun constr NONE ##>> fold_map tr_select selects + fun tr_typ (T, cases) = add_typ' T false ##>> fold_map tr_constr cases + val (declss', tr_context') = fold_map (fold_map tr_typ) declss tr_context + + fun add (constr, selects) = + Termtab.update (constr, length selects) #> + fold (Termtab.update o rpair 1) selects + val funcs = fold (fold (fold add o snd)) declss Termtab.empty + in ((funcs, declss', tr_context', ctxt'), ts) end + (* FIXME: also return necessary datatype and record theorems *) + + +(** eta-expand quantifiers, let expressions and built-ins *) + +local + fun eta f T t = Abs (Name.uu, T, f (Term.incr_boundvars 1 t $ Bound 0)) + + fun exp f T = eta f (Term.domain_type (Term.domain_type T)) + + fun exp2 T q = + let val U = Term.domain_type T + in Abs (Name.uu, U, q $ eta I (Term.domain_type U) (Bound 0)) end + + fun exp2' T l = + let val (U1, U2) = Term.dest_funT T ||> Term.domain_type + in Abs (Name.uu, U1, eta I U2 (l $ Bound 0)) end + + fun expf k i T t = + let val Ts = drop i (fst (Old_SMT_Utils.dest_funT k T)) + in + Term.incr_boundvars (length Ts) t + |> fold_rev (fn i => fn u => u $ Bound i) (0 upto length Ts - 1) + |> fold_rev (fn T => fn u => Abs (Name.uu, T, u)) Ts + end +in + +fun eta_expand ctxt is_fol funcs = + let + fun exp_func t T ts = + (case Termtab.lookup funcs t of + SOME k => + Term.list_comb (t, ts) + |> k <> length ts ? expf k (length ts) T + | NONE => Term.list_comb (t, ts)) + + fun expand ((q as Const (@{const_name All}, _)) $ Abs a) = q $ abs_expand a + | expand ((q as Const (@{const_name All}, T)) $ t) = q $ exp expand T t + | expand (q as Const (@{const_name All}, T)) = exp2 T q + | expand ((q as Const (@{const_name Ex}, _)) $ Abs a) = q $ abs_expand a + | expand ((q as Const (@{const_name Ex}, T)) $ t) = q $ exp expand T t + | expand (q as Const (@{const_name Ex}, T)) = exp2 T q + | expand ((l as Const (@{const_name Let}, _)) $ t $ Abs a) = + if is_fol then expand (Term.betapply (Abs a, t)) + else l $ expand t $ abs_expand a + | expand ((l as Const (@{const_name Let}, T)) $ t $ u) = + if is_fol then expand (u $ t) + else l $ expand t $ exp expand (Term.range_type T) u + | expand ((l as Const (@{const_name Let}, T)) $ t) = + if is_fol then + let val U = Term.domain_type (Term.range_type T) + in Abs (Name.uu, U, Bound 0 $ Term.incr_boundvars 1 t) end + else exp2 T (l $ expand t) + | expand (l as Const (@{const_name Let}, T)) = + if is_fol then + let val U = Term.domain_type (Term.range_type T) + in + Abs (Name.uu, Term.domain_type T, Abs (Name.uu, U, + Bound 0 $ Bound 1)) + end + else exp2' T l + | expand t = + (case Term.strip_comb t of + (u as Const (c as (_, T)), ts) => + (case Old_SMT_Builtin.dest_builtin ctxt c ts of + SOME (_, k, us, mk) => + if k = length us then mk (map expand us) + else if k < length us then + chop k (map expand us) |>> mk |> Term.list_comb + else expf k (length ts) T (mk (map expand us)) + | NONE => exp_func u T (map expand ts)) + | (u as Free (_, T), ts) => exp_func u T (map expand ts) + | (Abs a, ts) => Term.list_comb (abs_expand a, map expand ts) + | (u, ts) => Term.list_comb (u, map expand ts)) + + and abs_expand (n, T, t) = Abs (n, T, expand t) + + in map expand end + +end + + +(** introduce explicit applications **) + +local + (* + Make application explicit for functions with varying number of arguments. + *) + + fun add t i = apfst (Termtab.map_default (t, i) (Integer.min i)) + fun add_type T = apsnd (Typtab.update (T, ())) + + fun min_arities t = + (case Term.strip_comb t of + (u as Const _, ts) => add u (length ts) #> fold min_arities ts + | (u as Free _, ts) => add u (length ts) #> fold min_arities ts + | (Abs (_, T, u), ts) => add_type T #> min_arities u #> fold min_arities ts + | (_, ts) => fold min_arities ts) + + fun minimize types t i = + let + fun find_min j [] _ = j + | find_min j (U :: Us) T = + if Typtab.defined types T then j + else find_min (j + 1) Us (U --> T) + + val (Ts, T) = Term.strip_type (Term.type_of t) + in find_min 0 (take i (rev Ts)) T end + + fun app u (t, T) = + (Const (@{const_name fun_app}, T --> T) $ t $ u, Term.range_type T) + + fun apply i t T ts = + let + val (ts1, ts2) = chop i ts + val (_, U) = Old_SMT_Utils.dest_funT i T + in fst (fold app ts2 (Term.list_comb (t, ts1), U)) end +in + +fun intro_explicit_application ctxt funcs ts = + let + val (arities, types) = fold min_arities ts (Termtab.empty, Typtab.empty) + val arities' = Termtab.map (minimize types) arities + + fun app_func t T ts = + if is_some (Termtab.lookup funcs t) then Term.list_comb (t, ts) + else apply (the (Termtab.lookup arities' t)) t T ts + + fun in_list T f t = HOLogic.mk_list T (map f (HOLogic.dest_list t)) + + fun traverse Ts t = + (case Term.strip_comb t of + (q as Const (@{const_name All}, _), [Abs (x, T, u)]) => + q $ Abs (x, T, in_trigger (T :: Ts) u) + | (q as Const (@{const_name Ex}, _), [Abs (x, T, u)]) => + q $ Abs (x, T, in_trigger (T :: Ts) u) + | (q as Const (@{const_name Let}, _), [u1, u2 as Abs _]) => + q $ traverse Ts u1 $ traverse Ts u2 + | (u as Const (c as (_, T)), ts) => + (case Old_SMT_Builtin.dest_builtin ctxt c ts of + SOME (_, k, us, mk) => + let + val (ts1, ts2) = chop k (map (traverse Ts) us) + val U = Term.strip_type T |>> snd o chop k |> (op --->) + in apply 0 (mk ts1) U ts2 end + | NONE => app_func u T (map (traverse Ts) ts)) + | (u as Free (_, T), ts) => app_func u T (map (traverse Ts) ts) + | (u as Bound i, ts) => apply 0 u (nth Ts i) (map (traverse Ts) ts) + | (Abs (n, T, u), ts) => traverses Ts (Abs (n, T, traverse (T::Ts) u)) ts + | (u, ts) => traverses Ts u ts) + and in_trigger Ts ((c as @{const trigger}) $ p $ t) = + c $ in_pats Ts p $ in_weight Ts t + | in_trigger Ts t = in_weight Ts t + and in_pats Ts ps = + in_list @{typ "pattern list"} + (in_list @{typ pattern} (in_pat Ts)) ps + and in_pat Ts ((p as Const (@{const_name pat}, _)) $ t) = + p $ traverse Ts t + | in_pat Ts ((p as Const (@{const_name nopat}, _)) $ t) = + p $ traverse Ts t + | in_pat _ t = raise TERM ("bad pattern", [t]) + and in_weight Ts ((c as @{const weight}) $ w $ t) = + c $ w $ traverse Ts t + | in_weight Ts t = traverse Ts t + and traverses Ts t ts = Term.list_comb (t, map (traverse Ts) ts) + in map (traverse []) ts end + +val fun_app_eq = mk_meta_eq @{thm fun_app_def} + +end + + +(** map HOL formulas to FOL formulas (i.e., separate formulas froms terms) **) + +local + val term_bool = @{lemma "term_true ~= term_false" + by (simp add: term_true_def term_false_def)} + + val is_quant = member (op =) [@{const_name All}, @{const_name Ex}] + + val fol_rules = [ + Let_def, + mk_meta_eq @{thm term_true_def}, + mk_meta_eq @{thm term_false_def}, + @{lemma "P = True == P" by (rule eq_reflection) simp}, + @{lemma "if P then True else False == P" by (rule eq_reflection) simp}] + + fun as_term t = @{const HOL.eq (bool)} $ t $ @{const term_true} + + exception BAD_PATTERN of unit + + fun wrap_in_if pat t = + if pat then + raise BAD_PATTERN () + else + @{const If (bool)} $ t $ @{const term_true} $ @{const term_false} + + fun is_builtin_conn_or_pred ctxt c ts = + is_some (Old_SMT_Builtin.dest_builtin_conn ctxt c ts) orelse + is_some (Old_SMT_Builtin.dest_builtin_pred ctxt c ts) + + fun builtin b ctxt c ts = + (case (Const c, ts) of + (@{const HOL.eq (bool)}, [t, u]) => + if t = @{const term_true} orelse u = @{const term_true} then + Old_SMT_Builtin.dest_builtin_eq ctxt t u + else b ctxt c ts + | _ => b ctxt c ts) +in + +fun folify ctxt = + let + fun in_list T f t = HOLogic.mk_list T (map_filter f (HOLogic.dest_list t)) + + fun in_term pat t = + (case Term.strip_comb t of + (@{const True}, []) => @{const term_true} + | (@{const False}, []) => @{const term_false} + | (u as Const (@{const_name If}, _), [t1, t2, t3]) => + if pat then raise BAD_PATTERN () + else u $ in_form t1 $ in_term pat t2 $ in_term pat t3 + | (Const (c as (n, _)), ts) => + if is_builtin_conn_or_pred ctxt c ts then wrap_in_if pat (in_form t) + else if is_quant n then wrap_in_if pat (in_form t) + else Term.list_comb (Const c, map (in_term pat) ts) + | (Free c, ts) => Term.list_comb (Free c, map (in_term pat) ts) + | _ => t) + + and in_weight ((c as @{const weight}) $ w $ t) = c $ w $ in_form t + | in_weight t = in_form t + + and in_pat ((p as Const (@{const_name pat}, _)) $ t) = + p $ in_term true t + | in_pat ((p as Const (@{const_name nopat}, _)) $ t) = + p $ in_term true t + | in_pat t = raise TERM ("bad pattern", [t]) + + and in_pats ps = + in_list @{typ "pattern list"} + (SOME o in_list @{typ pattern} (try in_pat)) ps + + and in_trigger ((c as @{const trigger}) $ p $ t) = + c $ in_pats p $ in_weight t + | in_trigger t = in_weight t + + and in_form t = + (case Term.strip_comb t of + (q as Const (qn, _), [Abs (n, T, u)]) => + if is_quant qn then q $ Abs (n, T, in_trigger u) + else as_term (in_term false t) + | (Const c, ts) => + (case Old_SMT_Builtin.dest_builtin_conn ctxt c ts of + SOME (_, _, us, mk) => mk (map in_form us) + | NONE => + (case Old_SMT_Builtin.dest_builtin_pred ctxt c ts of + SOME (_, _, us, mk) => mk (map (in_term false) us) + | NONE => as_term (in_term false t))) + | _ => as_term (in_term false t)) + in + map in_form #> + cons (Old_SMT_Utils.prop_of term_bool) #> + pair (fol_rules, [term_bool], builtin) + end + +end + + +(* translation into intermediate format *) + +(** utility functions **) + +val quantifier = (fn + @{const_name All} => SOME SForall + | @{const_name Ex} => SOME SExists + | _ => NONE) + +fun group_quant qname Ts (t as Const (q, _) $ Abs (_, T, u)) = + if q = qname then group_quant qname (T :: Ts) u else (Ts, t) + | group_quant _ Ts t = (Ts, t) + +fun dest_weight (@{const weight} $ w $ t) = + (SOME (snd (HOLogic.dest_number w)), t) + | dest_weight t = (NONE, t) + +fun dest_pat (Const (@{const_name pat}, _) $ t) = (t, true) + | dest_pat (Const (@{const_name nopat}, _) $ t) = (t, false) + | dest_pat t = raise TERM ("bad pattern", [t]) + +fun dest_pats [] = I + | dest_pats ts = + (case map dest_pat ts |> split_list ||> distinct (op =) of + (ps, [true]) => cons (SPat ps) + | (ps, [false]) => cons (SNoPat ps) + | _ => raise TERM ("bad multi-pattern", ts)) + +fun dest_trigger (@{const trigger} $ tl $ t) = + (rev (fold (dest_pats o HOLogic.dest_list) (HOLogic.dest_list tl) []), t) + | dest_trigger t = ([], t) + +fun dest_quant qn T t = quantifier qn |> Option.map (fn q => + let + val (Ts, u) = group_quant qn [T] t + val (ps, p) = dest_trigger u + val (w, b) = dest_weight p + in (q, rev Ts, ps, w, b) end) + +fun fold_map_pat f (SPat ts) = fold_map f ts #>> SPat + | fold_map_pat f (SNoPat ts) = fold_map f ts #>> SNoPat + + +(** translation from Isabelle terms into SMT intermediate terms **) + +fun intermediate header dtyps builtin ctxt ts trx = + let + fun transT (T as TFree _) = add_typ T true + | transT (T as TVar _) = (fn _ => raise TYPE ("bad SMT type", [T], [])) + | transT (T as Type _) = + (case Old_SMT_Builtin.dest_builtin_typ ctxt T of + SOME n => pair n + | NONE => add_typ T true) + + fun app n ts = SApp (n, ts) + + fun trans t = + (case Term.strip_comb t of + (Const (qn, _), [Abs (_, T, t1)]) => + (case dest_quant qn T t1 of + SOME (q, Ts, ps, w, b) => + fold_map transT Ts ##>> fold_map (fold_map_pat trans) ps ##>> + trans b #>> (fn ((Ts', ps'), b') => SQua (q, Ts', ps', w, b')) + | NONE => raise TERM ("unsupported quantifier", [t])) + | (Const (@{const_name Let}, _), [t1, Abs (_, T, t2)]) => + transT T ##>> trans t1 ##>> trans t2 #>> + (fn ((U, u1), u2) => SLet (U, u1, u2)) + | (u as Const (c as (_, T)), ts) => + (case builtin ctxt c ts of + SOME (n, _, us, _) => fold_map trans us #>> app n + | NONE => transs u T ts) + | (u as Free (_, T), ts) => transs u T ts + | (Bound i, []) => pair (SVar i) + | _ => raise TERM ("bad SMT term", [t])) + + and transs t T ts = + let val (Us, U) = Old_SMT_Utils.dest_funT (length ts) T + in + fold_map transT Us ##>> transT U #-> (fn Up => + add_fun t (SOME Up) ##>> fold_map trans ts #>> SApp) + end + + val (us, trx') = fold_map trans ts trx + in ((sign_of (header ts) dtyps trx', us), trx') end + + + +(* translation *) + +structure Configs = Generic_Data +( + type T = (Proof.context -> config) Old_SMT_Utils.dict + val empty = [] + val extend = I + fun merge data = Old_SMT_Utils.dict_merge fst data +) + +fun add_config (cs, cfg) = Configs.map (Old_SMT_Utils.dict_update (cs, cfg)) + +fun get_config ctxt = + let val cs = Old_SMT_Config.solver_class_of ctxt + in + (case Old_SMT_Utils.dict_get (Configs.get (Context.Proof ctxt)) cs of + SOME cfg => cfg ctxt + | NONE => error ("SMT: no translation configuration found " ^ + "for solver class " ^ quote (Old_SMT_Utils.string_of_class cs))) + end + +fun translate ctxt comments ithms = + let + val {prefixes, is_fol, header, has_datatypes, serialize} = get_config ctxt + + val with_datatypes = + has_datatypes andalso Config.get ctxt Old_SMT_Config.datatypes + + fun no_dtyps (tr_context, ctxt) ts = + ((Termtab.empty, [], tr_context, ctxt), ts) + + val ts1 = map (Envir.beta_eta_contract o Old_SMT_Utils.prop_of o snd) ithms + + val ((funcs, dtyps, tr_context, ctxt1), ts2) = + ((make_tr_context prefixes, ctxt), ts1) + |-> (if with_datatypes then collect_datatypes_and_records else no_dtyps) + + fun is_binder (Const (@{const_name Let}, _) $ _) = true + | is_binder t = Lambda_Lifting.is_quantifier t + + fun mk_trigger ((q as Const (@{const_name All}, _)) $ Abs (n, T, t)) = + q $ Abs (n, T, mk_trigger t) + | mk_trigger (eq as (Const (@{const_name HOL.eq}, T) $ lhs $ _)) = + Term.domain_type T --> @{typ pattern} + |> (fn T => Const (@{const_name pat}, T) $ lhs) + |> HOLogic.mk_list @{typ pattern} o single + |> HOLogic.mk_list @{typ "pattern list"} o single + |> (fn t => @{const trigger} $ t $ eq) + | mk_trigger t = t + + val (ctxt2, ts3) = + ts2 + |> eta_expand ctxt1 is_fol funcs + |> rpair ctxt1 + |-> Lambda_Lifting.lift_lambdas NONE is_binder + |-> (fn (ts', defs) => fn ctxt' => + map mk_trigger defs @ ts' + |> intro_explicit_application ctxt' funcs + |> pair ctxt') + + val ((rewrite_rules, extra_thms, builtin), ts4) = + (if is_fol then folify ctxt2 else pair ([], [], I)) ts3 + + val rewrite_rules' = fun_app_eq :: rewrite_rules + in + (ts4, tr_context) + |-> intermediate header dtyps (builtin Old_SMT_Builtin.dest_builtin) ctxt2 + |>> uncurry (serialize comments) + ||> recon_of ctxt2 rewrite_rules' extra_thms ithms + end + +end diff --git a/src/test-gen/src/main/Old_SMT/old_smt_utils.ML b/src/test-gen/src/main/Old_SMT/old_smt_utils.ML new file mode 100644 index 0000000..8603f1a --- /dev/null +++ b/src/test-gen/src/main/Old_SMT/old_smt_utils.ML @@ -0,0 +1,221 @@ +(* Title: HOL/Library/Old_SMT/old_smt_utils.ML + Author: Sascha Boehme, TU Muenchen + +General utility functions. +*) + +signature OLD_SMT_UTILS = +sig + (*basic combinators*) + val repeat: ('a -> 'a option) -> 'a -> 'a + val repeat_yield: ('a -> 'b -> ('a * 'b) option) -> 'a -> 'b -> 'a * 'b + + (*class dictionaries*) + type class = string list + val basicC: class + val string_of_class: class -> string + type 'a dict = (class * 'a) Ord_List.T + val dict_map_default: class * 'a -> ('a -> 'a) -> 'a dict -> 'a dict + val dict_update: class * 'a -> 'a dict -> 'a dict + val dict_merge: ('a * 'a -> 'a) -> 'a dict * 'a dict -> 'a dict + val dict_lookup: 'a dict -> class -> 'a list + val dict_get: 'a dict -> class -> 'a option + + (*types*) + val dest_funT: int -> typ -> typ list * typ + + (*terms*) + val dest_conj: term -> term * term + val dest_disj: term -> term * term + val under_quant: (term -> 'a) -> term -> 'a + val is_number: term -> bool + + (*patterns and instantiations*) + val mk_const_pat: theory -> string -> (ctyp -> 'a) -> 'a * cterm + val destT1: ctyp -> ctyp + val destT2: ctyp -> ctyp + val instTs: ctyp list -> ctyp list * cterm -> cterm + val instT: ctyp -> ctyp * cterm -> cterm + val instT': cterm -> ctyp * cterm -> cterm + + (*certified terms*) + val dest_cabs: cterm -> Proof.context -> cterm * Proof.context + val dest_all_cabs: cterm -> Proof.context -> cterm * Proof.context + val dest_cbinder: cterm -> Proof.context -> cterm * Proof.context + val dest_all_cbinders: cterm -> Proof.context -> cterm * Proof.context + val mk_cprop: cterm -> cterm + val dest_cprop: cterm -> cterm + val mk_cequals: cterm -> cterm -> cterm + val term_of: cterm -> term + val prop_of: thm -> term + + (*conversions*) + val if_conv: (term -> bool) -> conv -> conv -> conv + val if_true_conv: (term -> bool) -> conv -> conv + val if_exists_conv: (term -> bool) -> conv -> conv + val binders_conv: (Proof.context -> conv) -> Proof.context -> conv + val under_quant_conv: (Proof.context * cterm list -> conv) -> + Proof.context -> conv + val prop_conv: conv -> conv +end + +structure Old_SMT_Utils: OLD_SMT_UTILS = +struct + +(* basic combinators *) + +fun repeat f = + let fun rep x = (case f x of SOME y => rep y | NONE => x) + in rep end + +fun repeat_yield f = + let fun rep x y = (case f x y of SOME (x', y') => rep x' y' | NONE => (x, y)) + in rep end + + +(* class dictionaries *) + +type class = string list + +val basicC = [] + +fun string_of_class [] = "basic" + | string_of_class cs = "basic." ^ space_implode "." cs + +type 'a dict = (class * 'a) Ord_List.T + +fun class_ord ((cs1, _), (cs2, _)) = + rev_order (list_ord fast_string_ord (cs1, cs2)) + +fun dict_insert (cs, x) d = + if AList.defined (op =) d cs then d + else Ord_List.insert class_ord (cs, x) d + +fun dict_map_default (cs, x) f = + dict_insert (cs, x) #> AList.map_entry (op =) cs f + +fun dict_update (e as (_, x)) = dict_map_default e (K x) + +fun dict_merge val_merge = sort class_ord o AList.join (op =) (K val_merge) + +fun dict_lookup d cs = + let fun match (cs', x) = if is_prefix (op =) cs' cs then SOME x else NONE + in map_filter match d end + +fun dict_get d cs = + (case AList.lookup (op =) d cs of + NONE => (case cs of [] => NONE | _ => dict_get d (take (length cs - 1) cs)) + | SOME x => SOME x) + + +(* types *) + +val dest_funT = + let + fun dest Ts 0 T = (rev Ts, T) + | dest Ts i (Type ("fun", [T, U])) = dest (T::Ts) (i-1) U + | dest _ _ T = raise TYPE ("not a function type", [T], []) + in dest [] end + + +(* terms *) + +fun dest_conj (@{const HOL.conj} $ t $ u) = (t, u) + | dest_conj t = raise TERM ("not a conjunction", [t]) + +fun dest_disj (@{const HOL.disj} $ t $ u) = (t, u) + | dest_disj t = raise TERM ("not a disjunction", [t]) + +fun under_quant f t = + (case t of + Const (@{const_name All}, _) $ Abs (_, _, u) => under_quant f u + | Const (@{const_name Ex}, _) $ Abs (_, _, u) => under_quant f u + | _ => f t) + +val is_number = + let + fun is_num env (Const (@{const_name If}, _) $ _ $ t $ u) = + is_num env t andalso is_num env u + | is_num env (Const (@{const_name Let}, _) $ t $ Abs (_, _, u)) = + is_num (t :: env) u + | is_num env (Bound i) = i < length env andalso is_num env (nth env i) + | is_num _ t = can HOLogic.dest_number t + in is_num [] end + + +(* patterns and instantiations *) + +fun mk_const_pat thy name destT = + let val cpat = Thm.global_cterm_of thy (Const (name, Sign.the_const_type thy name)) + in (destT (Thm.ctyp_of_cterm cpat), cpat) end + +val destT1 = hd o Thm.dest_ctyp +val destT2 = hd o tl o Thm.dest_ctyp + +fun instTs cUs (cTs, ct) = Thm.instantiate_cterm (map (dest_TVar o Thm.typ_of) cTs ~~ cUs, []) ct +fun instT cU (cT, ct) = instTs [cU] ([cT], ct) +fun instT' ct = instT (Thm.ctyp_of_cterm ct) + + +(* certified terms *) + +fun dest_cabs ct ctxt = + (case Thm.term_of ct of + Abs _ => + let val (n, ctxt') = yield_singleton Variable.variant_fixes Name.uu ctxt + in (snd (Thm.dest_abs (SOME n) ct), ctxt') end + | _ => raise CTERM ("no abstraction", [ct])) + +val dest_all_cabs = repeat_yield (try o dest_cabs) + +fun dest_cbinder ct ctxt = + (case Thm.term_of ct of + Const _ $ Abs _ => dest_cabs (Thm.dest_arg ct) ctxt + | _ => raise CTERM ("not a binder", [ct])) + +val dest_all_cbinders = repeat_yield (try o dest_cbinder) + +val mk_cprop = Thm.apply (Thm.cterm_of @{context} @{const Trueprop}) + +fun dest_cprop ct = + (case Thm.term_of ct of + @{const Trueprop} $ _ => Thm.dest_arg ct + | _ => raise CTERM ("not a property", [ct])) + +val equals = mk_const_pat @{theory} @{const_name Pure.eq} destT1 +fun mk_cequals ct cu = Thm.mk_binop (instT' ct equals) ct cu + +val dest_prop = (fn @{const Trueprop} $ t => t | t => t) +fun term_of ct = dest_prop (Thm.term_of ct) +fun prop_of thm = dest_prop (Thm.prop_of thm) + + +(* conversions *) + +fun if_conv pred cv1 cv2 ct = if pred (Thm.term_of ct) then cv1 ct else cv2 ct + +fun if_true_conv pred cv = if_conv pred cv Conv.all_conv + +fun if_exists_conv pred = if_true_conv (Term.exists_subterm pred) + +fun binders_conv cv ctxt = + Conv.binder_conv (binders_conv cv o snd) ctxt else_conv cv ctxt + +fun under_quant_conv cv ctxt = + let + fun quant_conv inside ctxt cvs ct = + (case Thm.term_of ct of + Const (@{const_name All}, _) $ Abs _ => + Conv.binder_conv (under_conv cvs) ctxt + | Const (@{const_name Ex}, _) $ Abs _ => + Conv.binder_conv (under_conv cvs) ctxt + | _ => if inside then cv (ctxt, cvs) else Conv.all_conv) ct + and under_conv cvs (cv, ctxt) = quant_conv true ctxt (cv :: cvs) + in quant_conv false ctxt [] end + +fun prop_conv cv ct = + (case Thm.term_of ct of + @{const Trueprop} $ _ => Conv.arg_conv cv ct + | _ => raise CTERM ("not a property", [ct])) + +end diff --git a/src/test-gen/src/main/Old_SMT/old_smt_word.ML b/src/test-gen/src/main/Old_SMT/old_smt_word.ML new file mode 100644 index 0000000..4303aba --- /dev/null +++ b/src/test-gen/src/main/Old_SMT/old_smt_word.ML @@ -0,0 +1,146 @@ +(* Title: HOL/Library/Old_SMT/old_smt_word.ML + Author: Sascha Boehme, TU Muenchen + +SMT setup for words. +*) + +structure Old_SMT_Word: sig end = +struct + +open Word_Lib + +(* SMT-LIB logic *) + +fun smtlib_logic ts = + if exists (Term.exists_type (Term.exists_subtype is_wordT)) ts + then SOME "QF_AUFBV" + else NONE + + +(* SMT-LIB builtins *) + +local + val smtlibC = Old_SMTLIB_Interface.smtlibC + + val wordT = @{typ "'a::len word"} + + fun index1 n i = n ^ "[" ^ string_of_int i ^ "]" + fun index2 n i j = n ^ "[" ^ string_of_int i ^ ":" ^ string_of_int j ^ "]" + + fun word_typ (Type (@{type_name word}, [T])) = + Option.map (index1 "BitVec") (try dest_binT T) + | word_typ _ = NONE + + fun word_num (Type (@{type_name word}, [T])) i = + Option.map (index1 ("bv" ^ string_of_int i)) (try dest_binT T) + | word_num _ _ = NONE + + fun if_fixed pred m n T ts = + let val (Us, U) = Term.strip_type T + in + if pred (U, Us) then + SOME (n, length Us, ts, Term.list_comb o pair (Const (m, T))) + else NONE + end + + fun if_fixed_all m = if_fixed (forall (can dest_wordT) o (op ::)) m + fun if_fixed_args m = if_fixed (forall (can dest_wordT) o snd) m + + fun add_word_fun f (t, n) = + let val (m, _) = Term.dest_Const t + in Old_SMT_Builtin.add_builtin_fun smtlibC (Term.dest_Const t, K (f m n)) end + + fun hd2 xs = hd (tl xs) + + fun mk_nat i = @{const nat} $ HOLogic.mk_number @{typ nat} i + + fun dest_nat (@{const nat} $ n) = snd (HOLogic.dest_number n) + | dest_nat t = raise TERM ("not a natural number", [t]) + + fun mk_shift c [t, u] = Const c $ t $ mk_nat (snd (HOLogic.dest_number u)) + | mk_shift c ts = raise TERM ("bad arguments", Const c :: ts) + + fun shift m n T ts = + let val U = Term.domain_type T + in + (case (can dest_wordT U, try (dest_nat o hd2) ts) of + (true, SOME i) => + SOME (n, 2, [hd ts, HOLogic.mk_number U i], mk_shift (m, T)) + | _ => NONE) (* FIXME: also support non-numerical shifts *) + end + + fun mk_extract c i ts = Term.list_comb (Const c, mk_nat i :: ts) + + fun extract m n T ts = + let val U = Term.range_type (Term.range_type T) + in + (case (try (dest_nat o hd) ts, try dest_wordT U) of + (SOME lb, SOME i) => + SOME (index2 n (i + lb - 1) lb, 1, tl ts, mk_extract (m, T) lb) + | _ => NONE) + end + + fun mk_extend c ts = Term.list_comb (Const c, ts) + + fun extend m n T ts = + let val (U1, U2) = Term.dest_funT T + in + (case (try dest_wordT U1, try dest_wordT U2) of + (SOME i, SOME j) => + if j-i >= 0 then SOME (index1 n (j-i), 1, ts, mk_extend (m, T)) + else NONE + | _ => NONE) + end + + fun mk_rotate c i ts = Term.list_comb (Const c, mk_nat i :: ts) + + fun rotate m n T ts = + let val U = Term.domain_type (Term.range_type T) + in + (case (can dest_wordT U, try (dest_nat o hd) ts) of + (true, SOME i) => SOME (index1 n i, 1, tl ts, mk_rotate (m, T) i) + | _ => NONE) + end +in + +val setup_builtins = + Old_SMT_Builtin.add_builtin_typ smtlibC (wordT, word_typ, word_num) #> + fold (add_word_fun if_fixed_all) [ + (@{term "uminus :: 'a::len word => _"}, "bvneg"), + (@{term "plus :: 'a::len word => _"}, "bvadd"), + (@{term "minus :: 'a::len word => _"}, "bvsub"), + (@{term "times :: 'a::len word => _"}, "bvmul"), + (@{term "bitNOT :: 'a::len word => _"}, "bvnot"), + (@{term "bitAND :: 'a::len word => _"}, "bvand"), + (@{term "bitOR :: 'a::len word => _"}, "bvor"), + (@{term "bitXOR :: 'a::len word => _"}, "bvxor"), + (@{term "word_cat :: 'a::len word => _"}, "concat") ] #> + fold (add_word_fun shift) [ + (@{term "shiftl :: 'a::len word => _ "}, "bvshl"), + (@{term "shiftr :: 'a::len word => _"}, "bvlshr"), + (@{term "sshiftr :: 'a::len word => _"}, "bvashr") ] #> + add_word_fun extract + (@{term "slice :: _ => 'a::len word => _"}, "extract") #> + fold (add_word_fun extend) [ + (@{term "ucast :: 'a::len word => _"}, "zero_extend"), + (@{term "scast :: 'a::len word => _"}, "sign_extend") ] #> + fold (add_word_fun rotate) [ + (@{term word_rotl}, "rotate_left"), + (@{term word_rotr}, "rotate_right") ] #> + fold (add_word_fun if_fixed_args) [ + (@{term "less :: 'a::len word => _"}, "bvult"), + (@{term "less_eq :: 'a::len word => _"}, "bvule"), + (@{term word_sless}, "bvslt"), + (@{term word_sle}, "bvsle") ] + +end + + +(* setup *) + +val _ = + Theory.setup + (Context.theory_map + (Old_SMTLIB_Interface.add_logic (20, smtlib_logic) #> setup_builtins)) + +end diff --git a/src/test-gen/src/main/Old_SMT/old_smtlib_interface.ML b/src/test-gen/src/main/Old_SMT/old_smtlib_interface.ML new file mode 100644 index 0000000..dc00faa --- /dev/null +++ b/src/test-gen/src/main/Old_SMT/old_smtlib_interface.ML @@ -0,0 +1,161 @@ +(* Title: HOL/Library/Old_SMT/old_smtlib_interface.ML + Author: Sascha Boehme, TU Muenchen + +Interface to SMT solvers based on the SMT-LIB format. +*) + +signature OLD_SMTLIB_INTERFACE = +sig + val smtlibC: Old_SMT_Utils.class + val add_logic: int * (term list -> string option) -> Context.generic -> + Context.generic + val translate_config: Proof.context -> Old_SMT_Translate.config + val setup: theory -> theory +end + +structure Old_SMTLIB_Interface: OLD_SMTLIB_INTERFACE = +struct + + +val smtlibC = ["smtlib"] + + +(* builtins *) + +local + fun int_num _ i = SOME (string_of_int i) + + fun is_linear [t] = Old_SMT_Utils.is_number t + | is_linear [t, u] = Old_SMT_Utils.is_number t orelse Old_SMT_Utils.is_number u + | is_linear _ = false + + fun times _ _ ts = + let val mk = Term.list_comb o pair @{const times (int)} + in if is_linear ts then SOME ("*", 2, ts, mk) else NONE end +in + +val setup_builtins = + Old_SMT_Builtin.add_builtin_typ smtlibC (@{typ int}, K (SOME "Int"), int_num) #> + fold (Old_SMT_Builtin.add_builtin_fun' smtlibC) [ + (@{const True}, "true"), + (@{const False}, "false"), + (@{const Not}, "not"), + (@{const HOL.conj}, "and"), + (@{const HOL.disj}, "or"), + (@{const HOL.implies}, "implies"), + (@{const HOL.eq (bool)}, "iff"), + (@{const HOL.eq ('a)}, "="), + (@{const If (bool)}, "if_then_else"), + (@{const If ('a)}, "ite"), + (@{const less (int)}, "<"), + (@{const less_eq (int)}, "<="), + (@{const uminus (int)}, "~"), + (@{const plus (int)}, "+"), + (@{const minus (int)}, "-") ] #> + Old_SMT_Builtin.add_builtin_fun smtlibC + (Term.dest_Const @{const times (int)}, times) + +end + + +(* serialization *) + +(** header **) + +fun fst_int_ord ((i1, _), (i2, _)) = int_ord (i1, i2) + +structure Logics = Generic_Data +( + type T = (int * (term list -> string option)) list + val empty = [] + val extend = I + fun merge data = Ord_List.merge fst_int_ord data +) + +fun add_logic pf = Logics.map (Ord_List.insert fst_int_ord pf) + +fun choose_logic ctxt ts = + let + fun choose [] = "AUFLIA" + | choose ((_, f) :: fs) = (case f ts of SOME s => s | NONE => choose fs) + in [":logic " ^ choose (Logics.get (Context.Proof ctxt))] end + + +(** serialization **) + +val add = Buffer.add +fun sep f = add " " #> f +fun enclose l r f = sep (add l #> f #> add r) +val par = enclose "(" ")" +fun app n f = (fn [] => sep (add n) | xs => par (add n #> fold f xs)) +fun line f = f #> add "\n" + +fun var i = add "?v" #> add (string_of_int i) + +fun sterm l (Old_SMT_Translate.SVar i) = sep (var (l - i - 1)) + | sterm l (Old_SMT_Translate.SApp (n, ts)) = app n (sterm l) ts + | sterm _ (Old_SMT_Translate.SLet _) = + raise Fail "SMT-LIB: unsupported let expression" + | sterm l (Old_SMT_Translate.SQua (q, ss, ps, w, t)) = + let + fun quant Old_SMT_Translate.SForall = add "forall" + | quant Old_SMT_Translate.SExists = add "exists" + val vs = map_index (apfst (Integer.add l)) ss + fun var_decl (i, s) = par (var i #> sep (add s)) + val sub = sterm (l + length ss) + fun pat kind ts = sep (add kind #> enclose "{" " }" (fold sub ts)) + fun pats (Old_SMT_Translate.SPat ts) = pat ":pat" ts + | pats (Old_SMT_Translate.SNoPat ts) = pat ":nopat" ts + fun weight NONE = I + | weight (SOME i) = + sep (add ":weight { " #> add (string_of_int i) #> add " }") + in + par (quant q #> fold var_decl vs #> sub t #> fold pats ps #> weight w) + end + +fun ssort sorts = sort fast_string_ord sorts +fun fsort funcs = sort (prod_ord fast_string_ord (K EQUAL)) funcs + +fun sdatatypes decls = + let + fun con (n, []) = sep (add n) + | con (n, sels) = par (add n #> + fold (fn (n, s) => par (add n #> sep (add s))) sels) + fun dtyp (n, decl) = add n #> fold con decl + in line (add ":datatypes " #> par (fold (par o dtyp) decls)) end + +fun serialize comments {header, sorts, dtyps, funcs} ts = + Buffer.empty + |> line (add "(benchmark Isabelle") + |> line (add ":status unknown") + |> fold (line o add) header + |> length sorts > 0 ? + line (add ":extrasorts" #> par (fold (sep o add) (ssort sorts))) + |> fold sdatatypes dtyps + |> length funcs > 0 ? ( + line (add ":extrafuns" #> add " (") #> + fold (fn (f, (ss, s)) => + line (sep (app f (sep o add) (ss @ [s])))) (fsort funcs) #> + line (add ")")) + |> fold (fn t => line (add ":assumption" #> sterm 0 t)) ts + |> line (add ":formula true)") + |> fold (fn str => line (add "; " #> add str)) comments + |> Buffer.content + + +(* interface *) + +fun translate_config ctxt = { + prefixes = { + sort_prefix = "S", + func_prefix = "f"}, + header = choose_logic ctxt, + is_fol = true, + has_datatypes = false, + serialize = serialize} + +val setup = Context.theory_map ( + setup_builtins #> + Old_SMT_Translate.add_config (smtlibC, translate_config)) + +end diff --git a/src/test-gen/src/main/Old_SMT/old_z3_interface.ML b/src/test-gen/src/main/Old_SMT/old_z3_interface.ML new file mode 100644 index 0000000..ec9f3d6 --- /dev/null +++ b/src/test-gen/src/main/Old_SMT/old_z3_interface.ML @@ -0,0 +1,239 @@ +(* Title: HOL/Library/Old_SMT/old_z3_interface.ML + Author: Sascha Boehme, TU Muenchen + +Interface to Z3 based on a relaxed version of SMT-LIB. +*) + +signature OLD_Z3_INTERFACE = +sig + val smtlib_z3C: Old_SMT_Utils.class + val setup: theory -> theory + + datatype sym = Sym of string * sym list + type mk_builtins = { + mk_builtin_typ: sym -> typ option, + mk_builtin_num: theory -> int -> typ -> cterm option, + mk_builtin_fun: theory -> sym -> cterm list -> cterm option } + val add_mk_builtins: mk_builtins -> Context.generic -> Context.generic + val mk_builtin_typ: Proof.context -> sym -> typ option + val mk_builtin_num: Proof.context -> int -> typ -> cterm option + val mk_builtin_fun: Proof.context -> sym -> cterm list -> cterm option + + val is_builtin_theory_term: Proof.context -> term -> bool +end + +structure Old_Z3_Interface: OLD_Z3_INTERFACE = +struct + +val smtlib_z3C = Old_SMTLIB_Interface.smtlibC @ ["z3"] + + + +(* interface *) + +local + fun translate_config ctxt = + let + val {prefixes, header, is_fol, serialize, ...} = + Old_SMTLIB_Interface.translate_config ctxt + in + {prefixes=prefixes, header=header, is_fol=is_fol, serialize=serialize, + has_datatypes=true} + end + + fun is_div_mod @{const divide (int)} = true + | is_div_mod @{const modulo (int)} = true + | is_div_mod _ = false + + val div_by_z3div = @{lemma + "ALL k l. k div l = ( + if k = 0 | l = 0 then 0 + else if (0 < k & 0 < l) | (k < 0 & 0 < l) then z3div k l + else z3div (-k) (-l))" + by (simp add: z3div_def)} + + val mod_by_z3mod = @{lemma + "ALL k l. k mod l = ( + if l = 0 then k + else if k = 0 then 0 + else if (0 < k & 0 < l) | (k < 0 & 0 < l) then z3mod k l + else - z3mod (-k) (-l))" + by (simp add: z3mod_def)} + + val have_int_div_mod = + exists (Term.exists_subterm is_div_mod o Thm.prop_of) + + fun add_div_mod _ (thms, extra_thms) = + if have_int_div_mod thms orelse have_int_div_mod extra_thms then + (thms, div_by_z3div :: mod_by_z3mod :: extra_thms) + else (thms, extra_thms) + + val setup_builtins = + Old_SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const times (int)}, "*") #> + Old_SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const z3div}, "div") #> + Old_SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const z3mod}, "mod") +in + +val setup = Context.theory_map ( + setup_builtins #> + Old_SMT_Normalize.add_extra_norm (smtlib_z3C, add_div_mod) #> + Old_SMT_Translate.add_config (smtlib_z3C, translate_config)) + +end + + + +(* constructors *) + +datatype sym = Sym of string * sym list + + +(** additional constructors **) + +type mk_builtins = { + mk_builtin_typ: sym -> typ option, + mk_builtin_num: theory -> int -> typ -> cterm option, + mk_builtin_fun: theory -> sym -> cterm list -> cterm option } + +fun chained _ [] = NONE + | chained f (b :: bs) = (case f b of SOME y => SOME y | NONE => chained f bs) + +fun chained_mk_builtin_typ bs sym = + chained (fn {mk_builtin_typ=mk, ...} : mk_builtins => mk sym) bs + +fun chained_mk_builtin_num ctxt bs i T = + let val thy = Proof_Context.theory_of ctxt + in chained (fn {mk_builtin_num=mk, ...} : mk_builtins => mk thy i T) bs end + +fun chained_mk_builtin_fun ctxt bs s cts = + let val thy = Proof_Context.theory_of ctxt + in chained (fn {mk_builtin_fun=mk, ...} : mk_builtins => mk thy s cts) bs end + +fun fst_int_ord ((i1, _), (i2, _)) = int_ord (i1, i2) + +structure Mk_Builtins = Generic_Data +( + type T = (int * mk_builtins) list + val empty = [] + val extend = I + fun merge data = Ord_List.merge fst_int_ord data +) + +fun add_mk_builtins mk = + Mk_Builtins.map (Ord_List.insert fst_int_ord (serial (), mk)) + +fun get_mk_builtins ctxt = map snd (Mk_Builtins.get (Context.Proof ctxt)) + + +(** basic and additional constructors **) + +fun mk_builtin_typ _ (Sym ("Bool", _)) = SOME @{typ bool} + | mk_builtin_typ _ (Sym ("Int", _)) = SOME @{typ int} + | mk_builtin_typ _ (Sym ("bool", _)) = SOME @{typ bool} (*FIXME: legacy*) + | mk_builtin_typ _ (Sym ("int", _)) = SOME @{typ int} (*FIXME: legacy*) + | mk_builtin_typ ctxt sym = chained_mk_builtin_typ (get_mk_builtins ctxt) sym + +fun mk_builtin_num _ i @{typ int} = SOME (Numeral.mk_cnumber @{ctyp int} i) + | mk_builtin_num ctxt i T = + chained_mk_builtin_num ctxt (get_mk_builtins ctxt) i T + +val mk_true = Thm.cterm_of @{context} (@{const Not} $ @{const False}) +val mk_false = Thm.cterm_of @{context} @{const False} +val mk_not = Thm.apply (Thm.cterm_of @{context} @{const Not}) +val mk_implies = Thm.mk_binop (Thm.cterm_of @{context} @{const HOL.implies}) +val mk_iff = Thm.mk_binop (Thm.cterm_of @{context} @{const HOL.eq (bool)}) +val conj = Thm.cterm_of @{context} @{const HOL.conj} +val disj = Thm.cterm_of @{context} @{const HOL.disj} + +fun mk_nary _ cu [] = cu + | mk_nary ct _ cts = uncurry (fold_rev (Thm.mk_binop ct)) (split_last cts) + +val eq = Old_SMT_Utils.mk_const_pat @{theory} @{const_name HOL.eq} Old_SMT_Utils.destT1 +fun mk_eq ct cu = Thm.mk_binop (Old_SMT_Utils.instT' ct eq) ct cu + +val if_term = + Old_SMT_Utils.mk_const_pat @{theory} @{const_name If} + (Old_SMT_Utils.destT1 o Old_SMT_Utils.destT2) +fun mk_if cc ct cu = + Thm.mk_binop (Thm.apply (Old_SMT_Utils.instT' ct if_term) cc) ct cu + +val nil_term = + Old_SMT_Utils.mk_const_pat @{theory} @{const_name Nil} Old_SMT_Utils.destT1 +val cons_term = + Old_SMT_Utils.mk_const_pat @{theory} @{const_name Cons} Old_SMT_Utils.destT1 +fun mk_list cT cts = + fold_rev (Thm.mk_binop (Old_SMT_Utils.instT cT cons_term)) cts + (Old_SMT_Utils.instT cT nil_term) + +val distinct = Old_SMT_Utils.mk_const_pat @{theory} @{const_name distinct} + (Old_SMT_Utils.destT1 o Old_SMT_Utils.destT1) +fun mk_distinct [] = mk_true + | mk_distinct (cts as (ct :: _)) = + Thm.apply (Old_SMT_Utils.instT' ct distinct) + (mk_list (Thm.ctyp_of_cterm ct) cts) + +val access = + Old_SMT_Utils.mk_const_pat @{theory} @{const_name fun_app} Old_SMT_Utils.destT1 +fun mk_access array = Thm.apply (Old_SMT_Utils.instT' array access) array + +val update = Old_SMT_Utils.mk_const_pat @{theory} @{const_name fun_upd} + (Thm.dest_ctyp o Old_SMT_Utils.destT1) +fun mk_update array index value = + let val cTs = Thm.dest_ctyp (Thm.ctyp_of_cterm array) + in + Thm.apply (Thm.mk_binop (Old_SMT_Utils.instTs cTs update) array index) value + end + +val mk_uminus = Thm.apply (Thm.cterm_of @{context} @{const uminus (int)}) +val add = Thm.cterm_of @{context} @{const plus (int)} +val int0 = Numeral.mk_cnumber @{ctyp int} 0 +val mk_sub = Thm.mk_binop (Thm.cterm_of @{context} @{const minus (int)}) +val mk_mul = Thm.mk_binop (Thm.cterm_of @{context} @{const times (int)}) +val mk_div = Thm.mk_binop (Thm.cterm_of @{context} @{const z3div}) +val mk_mod = Thm.mk_binop (Thm.cterm_of @{context} @{const z3mod}) +val mk_lt = Thm.mk_binop (Thm.cterm_of @{context} @{const less (int)}) +val mk_le = Thm.mk_binop (Thm.cterm_of @{context} @{const less_eq (int)}) + +fun mk_builtin_fun ctxt sym cts = + (case (sym, cts) of + (Sym ("true", _), []) => SOME mk_true + | (Sym ("false", _), []) => SOME mk_false + | (Sym ("not", _), [ct]) => SOME (mk_not ct) + | (Sym ("and", _), _) => SOME (mk_nary conj mk_true cts) + | (Sym ("or", _), _) => SOME (mk_nary disj mk_false cts) + | (Sym ("implies", _), [ct, cu]) => SOME (mk_implies ct cu) + | (Sym ("iff", _), [ct, cu]) => SOME (mk_iff ct cu) + | (Sym ("~", _), [ct, cu]) => SOME (mk_iff ct cu) + | (Sym ("xor", _), [ct, cu]) => SOME (mk_not (mk_iff ct cu)) + | (Sym ("if", _), [ct1, ct2, ct3]) => SOME (mk_if ct1 ct2 ct3) + | (Sym ("ite", _), [ct1, ct2, ct3]) => SOME (mk_if ct1 ct2 ct3) (* FIXME: remove *) + | (Sym ("=", _), [ct, cu]) => SOME (mk_eq ct cu) + | (Sym ("distinct", _), _) => SOME (mk_distinct cts) + | (Sym ("select", _), [ca, ck]) => SOME (Thm.apply (mk_access ca) ck) + | (Sym ("store", _), [ca, ck, cv]) => SOME (mk_update ca ck cv) + | _ => + (case (sym, try (Thm.typ_of_cterm o hd) cts, cts) of + (Sym ("+", _), SOME @{typ int}, _) => SOME (mk_nary add int0 cts) + | (Sym ("-", _), SOME @{typ int}, [ct]) => SOME (mk_uminus ct) + | (Sym ("-", _), SOME @{typ int}, [ct, cu]) => SOME (mk_sub ct cu) + | (Sym ("*", _), SOME @{typ int}, [ct, cu]) => SOME (mk_mul ct cu) + | (Sym ("div", _), SOME @{typ int}, [ct, cu]) => SOME (mk_div ct cu) + | (Sym ("mod", _), SOME @{typ int}, [ct, cu]) => SOME (mk_mod ct cu) + | (Sym ("<", _), SOME @{typ int}, [ct, cu]) => SOME (mk_lt ct cu) + | (Sym ("<=", _), SOME @{typ int}, [ct, cu]) => SOME (mk_le ct cu) + | (Sym (">", _), SOME @{typ int}, [ct, cu]) => SOME (mk_lt cu ct) + | (Sym (">=", _), SOME @{typ int}, [ct, cu]) => SOME (mk_le cu ct) + | _ => chained_mk_builtin_fun ctxt (get_mk_builtins ctxt) sym cts)) + + + +(* abstraction *) + +fun is_builtin_theory_term ctxt t = + if Old_SMT_Builtin.is_builtin_num ctxt t then true + else + (case Term.strip_comb t of + (Const c, ts) => Old_SMT_Builtin.is_builtin_fun ctxt c ts + | _ => false) + +end diff --git a/src/test-gen/src/main/Old_SMT/old_z3_model.ML b/src/test-gen/src/main/Old_SMT/old_z3_model.ML new file mode 100644 index 0000000..b61f104 --- /dev/null +++ b/src/test-gen/src/main/Old_SMT/old_z3_model.ML @@ -0,0 +1,337 @@ +(* Title: HOL/Library/Old_SMT/old_z3_model.ML + Author: Sascha Boehme and Philipp Meyer, TU Muenchen + +Parser for counterexamples generated by Z3. +*) + +signature OLD_Z3_MODEL = +sig + val parse_counterex: Proof.context -> Old_SMT_Translate.recon -> string list -> + term list * term list +end + +structure Old_Z3_Model: OLD_Z3_MODEL = +struct + + +(* counterexample expressions *) + +datatype expr = True | False | Number of int * int option | Value of int | + Array of array | App of string * expr list +and array = Fresh of expr | Store of (array * expr) * expr + + +(* parsing *) + +val space = Scan.many Symbol.is_ascii_blank +fun spaced p = p --| space +fun in_parens p = spaced (Scan.$$ "(") |-- p --| spaced (Scan.$$ ")") +fun in_braces p = spaced (Scan.$$ "{") |-- p --| spaced (Scan.$$ "}") + +val digit = (fn + "0" => SOME 0 | "1" => SOME 1 | "2" => SOME 2 | "3" => SOME 3 | + "4" => SOME 4 | "5" => SOME 5 | "6" => SOME 6 | "7" => SOME 7 | + "8" => SOME 8 | "9" => SOME 9 | _ => NONE) + +val nat_num = spaced (Scan.repeat1 (Scan.some digit) >> + (fn ds => fold (fn d => fn i => i * 10 + d) ds 0)) +val int_num = spaced (Scan.optional ($$ "-" >> K (fn i => ~i)) I :|-- + (fn sign => nat_num >> sign)) + +val is_char = Symbol.is_ascii_letter orf Symbol.is_ascii_digit orf + member (op =) (raw_explode "_+*-/%~=<>$&|?!.@^#") +val name = spaced (Scan.many1 is_char >> implode) + +fun $$$ s = spaced (Scan.this_string s) + +fun array_expr st = st |> in_parens ( + $$$ "const" |-- expr >> Fresh || + $$$ "store" |-- array_expr -- expr -- expr >> Store) + +and expr st = st |> ( + $$$ "true" >> K True || + $$$ "false" >> K False || + int_num -- Scan.option ($$$ "/" |-- int_num) >> Number || + $$$ "val!" |-- nat_num >> Value || + name >> (App o rpair []) || + array_expr >> Array || + in_parens (name -- Scan.repeat1 expr) >> App) + +fun args st = ($$$ "->" >> K [] || expr ::: args) st +val args_case = args -- expr +val else_case = $$$ "else" -- $$$ "->" |-- expr >> pair ([] : expr list) + +val func = + let fun cases st = (else_case >> single || args_case ::: cases) st + in in_braces cases end + +val cex = space |-- + Scan.repeat (name --| $$$ "->" -- (func || expr >> (single o pair []))) + +fun resolve terms ((n, k), cases) = + (case Symtab.lookup terms n of + NONE => NONE + | SOME t => SOME ((t, k), cases)) + +fun annotate _ (_, []) = NONE + | annotate terms (n, [([], c)]) = resolve terms ((n, 0), (c, [])) + | annotate _ (_, [_]) = NONE + | annotate terms (n, cases as (args, _) :: _) = + let val (cases', (_, else_case)) = split_last cases + in resolve terms ((n, length args), (else_case, cases')) end + +fun read_cex terms ls = + maps (cons "\n" o raw_explode) ls + |> try (fst o Scan.finite Symbol.stopper cex) + |> the_default [] + |> map_filter (annotate terms) + + +(* translation into terms *) + +fun max_value vs = + let + fun max_val_expr (Value i) = Integer.max i + | max_val_expr (App (_, es)) = fold max_val_expr es + | max_val_expr (Array a) = max_val_array a + | max_val_expr _ = I + + and max_val_array (Fresh e) = max_val_expr e + | max_val_array (Store ((a, e1), e2)) = + max_val_array a #> max_val_expr e1 #> max_val_expr e2 + + fun max_val (_, (ec, cs)) = + max_val_expr ec #> fold (fn (es, e) => fold max_val_expr (e :: es)) cs + + in fold max_val vs ~1 end + +fun with_context terms f vs = fst (fold_map f vs (terms, max_value vs + 1)) + +fun get_term n T es (cx as (terms, next_val)) = + (case Symtab.lookup terms n of + SOME t => ((t, es), cx) + | NONE => + let val t = Var (("skolem", next_val), T) + in ((t, []), (Symtab.update (n, t) terms, next_val + 1)) end) + +fun trans_expr _ True = pair @{const True} + | trans_expr _ False = pair @{const False} + | trans_expr T (Number (i, NONE)) = pair (HOLogic.mk_number T i) + | trans_expr T (Number (i, SOME j)) = + pair (Const (@{const_name divide}, [T, T] ---> T) $ + HOLogic.mk_number T i $ HOLogic.mk_number T j) + | trans_expr T (Value i) = pair (Var (("value", i), T)) + | trans_expr T (Array a) = trans_array T a + | trans_expr T (App (n, es)) = get_term n T es #-> (fn (t, es') => + let val Ts = fst (Old_SMT_Utils.dest_funT (length es') (Term.fastype_of t)) + in + fold_map (uncurry trans_expr) (Ts ~~ es') #>> Term.list_comb o pair t + end) + +and trans_array T a = + let val (dT, rT) = Term.dest_funT T + in + (case a of + Fresh e => trans_expr rT e #>> (fn t => Abs ("x", dT, t)) + | Store ((a', e1), e2) => + trans_array T a' ##>> trans_expr dT e1 ##>> trans_expr rT e2 #>> + (fn ((m, k), v) => + Const (@{const_name fun_upd}, [T, dT, rT] ---> T) $ m $ k $ v)) + end + +fun trans_pattern T ([], e) = trans_expr T e #>> pair [] + | trans_pattern T (arg :: args, e) = + trans_expr (Term.domain_type T) arg ##>> + trans_pattern (Term.range_type T) (args, e) #>> + (fn (arg', (args', e')) => (arg' :: args', e')) + +fun mk_fun_upd T U = Const (@{const_name fun_upd}, [T --> U, T, U, T] ---> U) + +fun mk_update ([], u) _ = u + | mk_update ([t], u) f = + uncurry mk_fun_upd (Term.dest_funT (Term.fastype_of f)) $ f $ t $ u + | mk_update (t :: ts, u) f = + let + val (dT, rT) = Term.dest_funT (Term.fastype_of f) + val (dT', rT') = Term.dest_funT rT + in + mk_fun_upd dT rT $ f $ t $ + mk_update (ts, u) (absdummy dT' (Const ("_", rT'))) + end + +fun mk_lambda Ts (t, pats) = + fold_rev absdummy Ts t |> fold mk_update pats + +fun translate ((t, k), (e, cs)) = + let + val T = Term.fastype_of t + val (Us, U) = Old_SMT_Utils.dest_funT k (Term.fastype_of t) + + fun mk_full_def u' pats = + pats + |> filter_out (fn (_, u) => u aconv u') + |> HOLogic.mk_eq o pair t o mk_lambda Us o pair u' + + fun mk_eq (us, u) = HOLogic.mk_eq (Term.list_comb (t, us), u) + fun mk_eqs u' [] = [HOLogic.mk_eq (t, u')] + | mk_eqs _ pats = map mk_eq pats + in + trans_expr U e ##>> + (if k = 0 then pair [] else fold_map (trans_pattern T) cs) #>> + (fn (u', pats) => (mk_eqs u' pats, mk_full_def u' pats)) + end + + +(* normalization *) + +fun partition_eqs f = + let + fun part t (xs, ts) = + (case try HOLogic.dest_eq t of + SOME (l, r) => (case f l r of SOME x => (x::xs, ts) | _ => (xs, t::ts)) + | NONE => (xs, t :: ts)) + in (fn ts => fold part ts ([], [])) end + +fun first_eq pred = + let + fun part _ [] = NONE + | part us (t :: ts) = + (case try (pred o HOLogic.dest_eq) t of + SOME (SOME lr) => SOME (lr, fold cons us ts) + | _ => part (t :: us) ts) + in (fn ts => part [] ts) end + +fun replace_vars tab = + let + fun repl v = the_default v (AList.lookup (op aconv) tab v) + fun replace (v as Var _) = repl v + | replace (v as Free _) = repl v + | replace t = t + in map (Term.map_aterms replace) end + +fun remove_int_nat_coercions (eqs, defs) = + let + fun mk_nat_num t i = + (case try HOLogic.dest_number i of + SOME (_, n) => SOME (t, HOLogic.mk_number @{typ nat} n) + | NONE => NONE) + fun nat_of (@{const of_nat (int)} $ (t as Var _)) i = mk_nat_num t i + | nat_of (@{const nat} $ i) (t as Var _) = mk_nat_num t i + | nat_of _ _ = NONE + val (nats, eqs') = partition_eqs nat_of eqs + + fun is_coercion t = + (case try HOLogic.dest_eq t of + SOME (@{const of_nat (int)}, _) => true + | SOME (@{const nat}, _) => true + | _ => false) + in apply2 (replace_vars nats) (eqs', filter_out is_coercion defs) end + +fun unfold_funapp (eqs, defs) = + let + fun unfold_app (Const (@{const_name fun_app}, _) $ f $ t) = f $ t + | unfold_app t = t + fun unfold_eq ((eq as Const (@{const_name HOL.eq}, _)) $ t $ u) = + eq $ unfold_app t $ u + | unfold_eq t = t + + fun is_fun_app t = + (case try HOLogic.dest_eq t of + SOME (Const (@{const_name fun_app}, _), _) => true + | _ => false) + + in (map unfold_eq eqs, filter_out is_fun_app defs) end + +val unfold_eqs = + let + val is_ground = not o Term.exists_subterm Term.is_Var + fun is_non_rec (v, t) = not (Term.exists_subterm (equal v) t) + + fun rewr_var (l as Var _, r) = if is_ground r then SOME (l, r) else NONE + | rewr_var (r, l as Var _) = if is_ground r then SOME (l, r) else NONE + | rewr_var _ = NONE + + fun rewr_free' e = if is_non_rec e then SOME e else NONE + fun rewr_free (e as (Free _, _)) = rewr_free' e + | rewr_free (e as (_, Free _)) = rewr_free' (swap e) + | rewr_free _ = NONE + + fun is_trivial (Const (@{const_name HOL.eq}, _) $ t $ u) = t aconv u + | is_trivial _ = false + + fun replace r = replace_vars [r] #> filter_out is_trivial + + fun unfold_vars (es, ds) = + (case first_eq rewr_var es of + SOME (lr, es') => unfold_vars (apply2 (replace lr) (es', ds)) + | NONE => (es, ds)) + + fun unfold_frees ues (es, ds) = + (case first_eq rewr_free es of + SOME (lr, es') => + apply2 (replace lr) (es', ds) + |> unfold_frees (HOLogic.mk_eq lr :: replace lr ues) + | NONE => (ues @ es, ds)) + + in unfold_vars #> unfold_frees [] end + +fun swap_free ((eq as Const (@{const_name HOL.eq}, _)) $ t $ (u as Free _)) = + eq $ u $ t + | swap_free t = t + +fun frees_for_vars ctxt (eqs, defs) = + let + fun fresh_free i T (cx as (frees, ctxt)) = + (case Inttab.lookup frees i of + SOME t => (t, cx) + | NONE => + let + val (n, ctxt') = yield_singleton Variable.variant_fixes "" ctxt + val t = Free (n, T) + in (t, (Inttab.update (i, t) frees, ctxt')) end) + + fun repl_var (Var ((_, i), T)) = fresh_free i T + | repl_var (t $ u) = repl_var t ##>> repl_var u #>> op $ + | repl_var (Abs (n, T, t)) = repl_var t #>> (fn t' => Abs (n, T, t')) + | repl_var t = pair t + in + (Inttab.empty, ctxt) + |> fold_map repl_var eqs + ||>> fold_map repl_var defs + |> fst + end + + +(* overall procedure *) + +val is_free_constraint = Term.exists_subterm (fn Free _ => true | _ => false) + +fun is_free_def (Const (@{const_name HOL.eq}, _) $ Free _ $ _) = true + | is_free_def _ = false + +fun defined tp = + try (apply2 (fst o HOLogic.dest_eq)) tp + |> the_default false o Option.map (op aconv) + +fun add_free_defs free_cs defs = + let val (free_defs, defs') = List.partition is_free_def defs + in (free_cs @ filter_out (member defined free_cs) free_defs, defs') end + +fun is_const_def (Const (@{const_name HOL.eq}, _) $ Const _ $ _) = true + | is_const_def _ = false + +fun parse_counterex ctxt ({terms, ...} : Old_SMT_Translate.recon) ls = + read_cex terms ls + |> with_context terms translate + |> apfst flat o split_list + |> remove_int_nat_coercions + |> unfold_funapp + |> unfold_eqs + |>> map swap_free + |>> filter is_free_constraint + |-> add_free_defs + |> frees_for_vars ctxt + ||> filter is_const_def + +end + diff --git a/src/test-gen/src/main/Old_SMT/old_z3_proof_literals.ML b/src/test-gen/src/main/Old_SMT/old_z3_proof_literals.ML new file mode 100644 index 0000000..89ce7d1 --- /dev/null +++ b/src/test-gen/src/main/Old_SMT/old_z3_proof_literals.ML @@ -0,0 +1,363 @@ +(* Title: HOL/Library/Old_SMT/old_z3_proof_literals.ML + Author: Sascha Boehme, TU Muenchen + +Proof tools related to conjunctions and disjunctions. +*) + +signature OLD_Z3_PROOF_LITERALS = +sig + (*literal table*) + type littab = thm Termtab.table + val make_littab: thm list -> littab + val insert_lit: thm -> littab -> littab + val delete_lit: thm -> littab -> littab + val lookup_lit: littab -> term -> thm option + val get_first_lit: (term -> bool) -> littab -> thm option + + (*rules*) + val true_thm: thm + val rewrite_true: thm + + (*properties*) + val is_conj: term -> bool + val is_disj: term -> bool + val exists_lit: bool -> (term -> bool) -> term -> bool + val negate: cterm -> cterm + + (*proof tools*) + val explode: bool -> bool -> bool -> term list -> thm -> thm list + val join: bool -> littab -> term -> thm + val prove_conj_disj_eq: cterm -> thm +end + +structure Old_Z3_Proof_Literals: OLD_Z3_PROOF_LITERALS = +struct + + + +(* literal table *) + +type littab = thm Termtab.table + +fun make_littab thms = + fold (Termtab.update o `Old_SMT_Utils.prop_of) thms Termtab.empty + +fun insert_lit thm = Termtab.update (`Old_SMT_Utils.prop_of thm) +fun delete_lit thm = Termtab.delete (Old_SMT_Utils.prop_of thm) +fun lookup_lit lits = Termtab.lookup lits +fun get_first_lit f = + Termtab.get_first (fn (t, thm) => if f t then SOME thm else NONE) + + + +(* rules *) + +val true_thm = @{lemma "~False" by simp} +val rewrite_true = @{lemma "True == ~ False" by simp} + + + +(* properties and term operations *) + +val is_neg = (fn @{const Not} $ _ => true | _ => false) +fun is_neg' f = (fn @{const Not} $ t => f t | _ => false) +val is_dneg = is_neg' is_neg +val is_conj = (fn @{const HOL.conj} $ _ $ _ => true | _ => false) +val is_disj = (fn @{const HOL.disj} $ _ $ _ => true | _ => false) + +fun dest_disj_term' f = (fn + @{const Not} $ (@{const HOL.disj} $ t $ u) => SOME (f t, f u) + | _ => NONE) + +val dest_conj_term = (fn @{const HOL.conj} $ t $ u => SOME (t, u) | _ => NONE) +val dest_disj_term = + dest_disj_term' (fn @{const Not} $ t => t | t => @{const Not} $ t) + +fun exists_lit is_conj P = + let + val dest = if is_conj then dest_conj_term else dest_disj_term + fun exists t = P t orelse + (case dest t of + SOME (t1, t2) => exists t1 orelse exists t2 + | NONE => false) + in exists end + +val negate = Thm.apply (Thm.cterm_of @{context} @{const Not}) + + + +(* proof tools *) + +(** explosion of conjunctions and disjunctions **) + +local + val precomp = Old_Z3_Proof_Tools.precompose2 + + fun destc ct = Thm.dest_binop (Thm.dest_arg ct) + val dest_conj1 = precomp destc @{thm conjunct1} + val dest_conj2 = precomp destc @{thm conjunct2} + fun dest_conj_rules t = + dest_conj_term t |> Option.map (K (dest_conj1, dest_conj2)) + + fun destd f ct = f (Thm.dest_binop (Thm.dest_arg (Thm.dest_arg ct))) + val dn1 = apfst Thm.dest_arg and dn2 = apsnd Thm.dest_arg + val dest_disj1 = precomp (destd I) @{lemma "~(P | Q) ==> ~P" by fast} + val dest_disj2 = precomp (destd dn1) @{lemma "~(~P | Q) ==> P" by fast} + val dest_disj3 = precomp (destd I) @{lemma "~(P | Q) ==> ~Q" by fast} + val dest_disj4 = precomp (destd dn2) @{lemma "~(P | ~Q) ==> Q" by fast} + + fun dest_disj_rules t = + (case dest_disj_term' is_neg t of + SOME (true, true) => SOME (dest_disj2, dest_disj4) + | SOME (true, false) => SOME (dest_disj2, dest_disj3) + | SOME (false, true) => SOME (dest_disj1, dest_disj4) + | SOME (false, false) => SOME (dest_disj1, dest_disj3) + | NONE => NONE) + + fun destn ct = [Thm.dest_arg (Thm.dest_arg (Thm.dest_arg ct))] + val dneg_rule = Old_Z3_Proof_Tools.precompose destn @{thm notnotD} +in + +(* + explode a term into literals and collect all rules to be able to deduce + particular literals afterwards +*) +fun explode_term is_conj = + let + val dest = if is_conj then dest_conj_term else dest_disj_term + val dest_rules = if is_conj then dest_conj_rules else dest_disj_rules + + fun add (t, rs) = Termtab.map_default (t, rs) + (fn rs' => if length rs' < length rs then rs' else rs) + + fun explode1 rules t = + (case dest t of + SOME (t1, t2) => + let val (rule1, rule2) = the (dest_rules t) + in + explode1 (rule1 :: rules) t1 #> + explode1 (rule2 :: rules) t2 #> + add (t, rev rules) + end + | NONE => add (t, rev rules)) + + fun explode0 (@{const Not} $ (@{const Not} $ t)) = + Termtab.make [(t, [dneg_rule])] + | explode0 t = explode1 [] t Termtab.empty + + in explode0 end + +(* + extract a literal by applying previously collected rules +*) +fun extract_lit thm rules = fold Old_Z3_Proof_Tools.compose rules thm + + +(* + explode a theorem into its literals +*) +fun explode is_conj full keep_intermediate stop_lits = + let + val dest_rules = if is_conj then dest_conj_rules else dest_disj_rules + val tab = fold (Termtab.update o rpair ()) stop_lits Termtab.empty + + fun explode1 thm = + if Termtab.defined tab (Old_SMT_Utils.prop_of thm) then cons thm + else + (case dest_rules (Old_SMT_Utils.prop_of thm) of + SOME (rule1, rule2) => + explode2 rule1 thm #> + explode2 rule2 thm #> + keep_intermediate ? cons thm + | NONE => cons thm) + + and explode2 dest_rule thm = + if full orelse + exists_lit is_conj (Termtab.defined tab) (Old_SMT_Utils.prop_of thm) + then explode1 (Old_Z3_Proof_Tools.compose dest_rule thm) + else cons (Old_Z3_Proof_Tools.compose dest_rule thm) + + fun explode0 thm = + if not is_conj andalso is_dneg (Old_SMT_Utils.prop_of thm) + then [Old_Z3_Proof_Tools.compose dneg_rule thm] + else explode1 thm [] + + in explode0 end + +end + + + +(** joining of literals to conjunctions or disjunctions **) + +local + fun on_cprem i f thm = f (Thm.cprem_of thm i) + fun on_cprop f thm = f (Thm.cprop_of thm) + fun precomp2 f g thm = (on_cprem 1 f thm, on_cprem 2 g thm, f, g, thm) + fun comp2 (cv1, cv2, f, g, rule) thm1 thm2 = + Thm.instantiate ([], + [(dest_Var (Thm.term_of cv1), on_cprop f thm1), + (dest_Var (Thm.term_of cv2), on_cprop g thm2)]) rule + |> Old_Z3_Proof_Tools.discharge thm1 |> Old_Z3_Proof_Tools.discharge thm2 + + fun d1 ct = Thm.dest_arg ct and d2 ct = Thm.dest_arg (Thm.dest_arg ct) + + val conj_rule = precomp2 d1 d1 @{thm conjI} + fun comp_conj ((_, thm1), (_, thm2)) = comp2 conj_rule thm1 thm2 + + val disj1 = precomp2 d2 d2 @{lemma "~P ==> ~Q ==> ~(P | Q)" by fast} + val disj2 = precomp2 d2 d1 @{lemma "~P ==> Q ==> ~(P | ~Q)" by fast} + val disj3 = precomp2 d1 d2 @{lemma "P ==> ~Q ==> ~(~P | Q)" by fast} + val disj4 = precomp2 d1 d1 @{lemma "P ==> Q ==> ~(~P | ~Q)" by fast} + + fun comp_disj ((false, thm1), (false, thm2)) = comp2 disj1 thm1 thm2 + | comp_disj ((false, thm1), (true, thm2)) = comp2 disj2 thm1 thm2 + | comp_disj ((true, thm1), (false, thm2)) = comp2 disj3 thm1 thm2 + | comp_disj ((true, thm1), (true, thm2)) = comp2 disj4 thm1 thm2 + + fun dest_conj (@{const HOL.conj} $ t $ u) = ((false, t), (false, u)) + | dest_conj t = raise TERM ("dest_conj", [t]) + + val neg = (fn @{const Not} $ t => (true, t) | t => (false, @{const Not} $ t)) + fun dest_disj (@{const Not} $ (@{const HOL.disj} $ t $ u)) = (neg t, neg u) + | dest_disj t = raise TERM ("dest_disj", [t]) + + val precomp = Old_Z3_Proof_Tools.precompose + val dnegE = precomp (single o d2 o d1) @{thm notnotD} + val dnegI = precomp (single o d1) @{lemma "P ==> ~~P" by fast} + fun as_dneg f t = f (@{const Not} $ (@{const Not} $ t)) + + val precomp2 = Old_Z3_Proof_Tools.precompose2 + fun dni f = apsnd f o Thm.dest_binop o f o d1 + val negIffE = precomp2 (dni d1) @{lemma "~(P = (~Q)) ==> Q = P" by fast} + val negIffI = precomp2 (dni I) @{lemma "P = Q ==> ~(Q = (~P))" by fast} + val iff_const = @{const HOL.eq (bool)} + fun as_negIff f (@{const HOL.eq (bool)} $ t $ u) = + f (@{const Not} $ (iff_const $ u $ (@{const Not} $ t))) + | as_negIff _ _ = NONE +in + +fun join is_conj littab t = + let + val comp = if is_conj then comp_conj else comp_disj + val dest = if is_conj then dest_conj else dest_disj + + val lookup = lookup_lit littab + + fun lookup_rule t = + (case t of + @{const Not} $ (@{const Not} $ t) => + (Old_Z3_Proof_Tools.compose dnegI, lookup t) + | @{const Not} $ (@{const HOL.eq (bool)} $ t $ (@{const Not} $ u)) => + (Old_Z3_Proof_Tools.compose negIffI, lookup (iff_const $ u $ t)) + | @{const Not} $ ((eq as Const (@{const_name HOL.eq}, _)) $ t $ u) => + let fun rewr lit = lit COMP @{thm not_sym} + in (rewr, lookup (@{const Not} $ (eq $ u $ t))) end + | _ => + (case as_dneg lookup t of + NONE => (Old_Z3_Proof_Tools.compose negIffE, as_negIff lookup t) + | x => (Old_Z3_Proof_Tools.compose dnegE, x))) + + fun join1 (s, t) = + (case lookup t of + SOME lit => (s, lit) + | NONE => + (case lookup_rule t of + (rewrite, SOME lit) => (s, rewrite lit) + | (_, NONE) => (s, comp (apply2 join1 (dest t))))) + + in snd (join1 (if is_conj then (false, t) else (true, t))) end + +end + + + +(** proving equality of conjunctions or disjunctions **) + +fun iff_intro thm1 thm2 = thm2 COMP (thm1 COMP @{thm iffI}) + +local + val cp1 = @{lemma "(~P) = (~Q) ==> P = Q" by simp} + val cp2 = @{lemma "(~P) = Q ==> P = (~Q)" by fastforce} + val cp3 = @{lemma "P = (~Q) ==> (~P) = Q" by simp} +in +fun contrapos1 prove (ct, cu) = prove (negate ct, negate cu) COMP cp1 +fun contrapos2 prove (ct, cu) = prove (negate ct, Thm.dest_arg cu) COMP cp2 +fun contrapos3 prove (ct, cu) = prove (Thm.dest_arg ct, negate cu) COMP cp3 +end + + +local + val contra_rule = @{lemma "P ==> ~P ==> False" by (rule notE)} + fun contra_left conj thm = + let + val rules = explode_term conj (Old_SMT_Utils.prop_of thm) + fun contra_lits (t, rs) = + (case t of + @{const Not} $ u => Termtab.lookup rules u |> Option.map (pair rs) + | _ => NONE) + in + (case Termtab.lookup rules @{const False} of + SOME rs => extract_lit thm rs + | NONE => + the (Termtab.get_first contra_lits rules) + |> apply2 (extract_lit thm) + |> (fn (nlit, plit) => nlit COMP (plit COMP contra_rule))) + end + + val falseE_v = dest_Var (Thm.term_of (Thm.dest_arg (Thm.dest_arg (Thm.cprop_of @{thm FalseE})))) + fun contra_right ct = Thm.instantiate ([], [(falseE_v, ct)]) @{thm FalseE} +in +fun contradict conj ct = + iff_intro (Old_Z3_Proof_Tools.under_assumption (contra_left conj) ct) + (contra_right ct) +end + + +local + fun prove_eq l r (cl, cr) = + let + fun explode' is_conj = explode is_conj true (l <> r) [] + fun make_tab is_conj thm = make_littab (true_thm :: explode' is_conj thm) + fun prove is_conj ct tab = join is_conj tab (Thm.term_of ct) + + val thm1 = Old_Z3_Proof_Tools.under_assumption (prove r cr o make_tab l) cl + val thm2 = Old_Z3_Proof_Tools.under_assumption (prove l cl o make_tab r) cr + in iff_intro thm1 thm2 end + + datatype conj_disj = CONJ | DISJ | NCON | NDIS + fun kind_of t = + if is_conj t then SOME CONJ + else if is_disj t then SOME DISJ + else if is_neg' is_conj t then SOME NCON + else if is_neg' is_disj t then SOME NDIS + else NONE +in + +fun prove_conj_disj_eq ct = + let val cp as (cl, cr) = Thm.dest_binop (Thm.dest_arg ct) + in + (case (kind_of (Thm.term_of cl), Thm.term_of cr) of + (SOME CONJ, @{const False}) => contradict true cl + | (SOME DISJ, @{const Not} $ @{const False}) => + contrapos2 (contradict false o fst) cp + | (kl, _) => + (case (kl, kind_of (Thm.term_of cr)) of + (SOME CONJ, SOME CONJ) => prove_eq true true cp + | (SOME CONJ, SOME NDIS) => prove_eq true false cp + | (SOME CONJ, _) => prove_eq true true cp + | (SOME DISJ, SOME DISJ) => contrapos1 (prove_eq false false) cp + | (SOME DISJ, SOME NCON) => contrapos2 (prove_eq false true) cp + | (SOME DISJ, _) => contrapos1 (prove_eq false false) cp + | (SOME NCON, SOME NCON) => contrapos1 (prove_eq true true) cp + | (SOME NCON, SOME DISJ) => contrapos3 (prove_eq true false) cp + | (SOME NCON, NONE) => contrapos3 (prove_eq true false) cp + | (SOME NDIS, SOME NDIS) => prove_eq false false cp + | (SOME NDIS, SOME CONJ) => prove_eq false true cp + | (SOME NDIS, NONE) => prove_eq false true cp + | _ => raise CTERM ("prove_conj_disj_eq", [ct]))) + end + +end + +end diff --git a/src/test-gen/src/main/Old_SMT/old_z3_proof_methods.ML b/src/test-gen/src/main/Old_SMT/old_z3_proof_methods.ML new file mode 100644 index 0000000..c27174d --- /dev/null +++ b/src/test-gen/src/main/Old_SMT/old_z3_proof_methods.ML @@ -0,0 +1,149 @@ +(* Title: HOL/Library/Old_SMT/old_z3_proof_methods.ML + Author: Sascha Boehme, TU Muenchen + +Proof methods for Z3 proof reconstruction. +*) + +signature OLD_Z3_PROOF_METHODS = +sig + val prove_injectivity: Proof.context -> cterm -> thm + val prove_ite: Proof.context -> cterm -> thm +end + +structure Old_Z3_Proof_Methods: OLD_Z3_PROOF_METHODS = +struct + + +fun apply tac st = + (case Seq.pull (tac 1 st) of + NONE => raise THM ("tactic failed", 1, [st]) + | SOME (st', _) => st') + + + +(* if-then-else *) + +val pull_ite = mk_meta_eq + @{lemma "f (if P then x else y) = (if P then f x else f y)" by simp} + +fun pull_ites_conv ct = + (Conv.rewr_conv pull_ite then_conv + Conv.binop_conv (Conv.try_conv pull_ites_conv)) ct + +fun prove_ite ctxt = + Old_Z3_Proof_Tools.by_tac ctxt ( + CONVERSION (Conv.arg_conv (Conv.arg1_conv pull_ites_conv)) + THEN' resolve_tac ctxt @{thms refl}) + + + +(* injectivity *) + +local + +val B = @{typ bool} +fun mk_univ T = Const (@{const_name top}, HOLogic.mk_setT T) +fun mk_inj_on T U = + Const (@{const_name inj_on}, (T --> U) --> HOLogic.mk_setT T --> B) +fun mk_inv_into T U = + Const (@{const_name inv_into}, [HOLogic.mk_setT T, T --> U, U] ---> T) + +fun mk_inv_of ctxt ct = + let + val (dT, rT) = Term.dest_funT (Thm.typ_of_cterm ct) + val inv = Thm.cterm_of ctxt (mk_inv_into dT rT) + val univ = Thm.cterm_of ctxt (mk_univ dT) + in Thm.mk_binop inv univ ct end + +fun mk_inj_prop ctxt ct = + let + val (dT, rT) = Term.dest_funT (Thm.typ_of_cterm ct) + val inj = Thm.cterm_of ctxt (mk_inj_on dT rT) + val univ = Thm.cterm_of ctxt (mk_univ dT) + in Old_SMT_Utils.mk_cprop (Thm.mk_binop inj ct univ) end + + +val disjE = @{lemma "~P | Q ==> P ==> Q" by fast} + +fun prove_inj_prop ctxt def lhs = + let + val (ct, ctxt') = Old_SMT_Utils.dest_all_cabs (Thm.rhs_of def) ctxt + val rule = disjE OF [Object_Logic.rulify ctxt' (Thm.assume lhs)] + in + Goal.init (mk_inj_prop ctxt' (Thm.dest_arg ct)) + |> apply (resolve_tac ctxt' @{thms injI}) + |> apply (Tactic.solve_tac ctxt' [rule, rule RS @{thm sym}]) + |> Goal.norm_result ctxt' o Goal.finish ctxt' + |> singleton (Variable.export ctxt' ctxt) + end + +fun prove_rhs ctxt def lhs = + Old_Z3_Proof_Tools.by_tac ctxt ( + CONVERSION (Conv.top_sweep_conv (K (Conv.rewr_conv def)) ctxt) + THEN' REPEAT_ALL_NEW (match_tac ctxt @{thms allI}) + THEN' resolve_tac ctxt [@{thm inv_f_f} OF [prove_inj_prop ctxt def lhs]]) + + +fun expand thm ct = + let + val cpat = Thm.dest_arg (Thm.rhs_of thm) + val (cl, cr) = Thm.dest_binop (Thm.dest_arg (Thm.dest_arg1 ct)) + val thm1 = Thm.instantiate (Thm.match (cpat, cl)) thm + val thm2 = Thm.instantiate (Thm.match (cpat, cr)) thm + in Conv.arg_conv (Conv.binop_conv (Conv.rewrs_conv [thm1, thm2])) ct end + +fun prove_lhs ctxt rhs = + let + val eq = Thm.symmetric (mk_meta_eq (Object_Logic.rulify ctxt (Thm.assume rhs))) + val conv = Old_SMT_Utils.binders_conv (K (expand eq)) ctxt + in + Old_Z3_Proof_Tools.by_tac ctxt ( + CONVERSION (Old_SMT_Utils.prop_conv conv) + THEN' Simplifier.simp_tac (put_simpset HOL_ss ctxt)) + end + + +fun mk_inv_def ctxt rhs = + let + val (ct, ctxt') = + Old_SMT_Utils.dest_all_cbinders (Old_SMT_Utils.dest_cprop rhs) ctxt + val (cl, cv) = Thm.dest_binop ct + val (cg, (cargs, cf)) = Drule.strip_comb cl ||> split_last + val cu = fold_rev Thm.lambda cargs (mk_inv_of ctxt' (Thm.lambda cv cf)) + in Thm.assume (Old_SMT_Utils.mk_cequals cg cu) end + +fun prove_inj_eq ctxt ct = + let + val (lhs, rhs) = + apply2 Old_SMT_Utils.mk_cprop (Thm.dest_binop (Old_SMT_Utils.dest_cprop ct)) + val lhs_thm = Thm.implies_intr rhs (prove_lhs ctxt rhs lhs) + val rhs_thm = + Thm.implies_intr lhs (prove_rhs ctxt (mk_inv_def ctxt rhs) lhs rhs) + in lhs_thm COMP (rhs_thm COMP @{thm iffI}) end + + +val swap_eq_thm = mk_meta_eq @{thm eq_commute} +val swap_disj_thm = mk_meta_eq @{thm disj_commute} + +fun swap_conv dest eq = + Old_SMT_Utils.if_true_conv ((op <) o apply2 Term.size_of_term o dest) + (Conv.rewr_conv eq) + +val swap_eq_conv = swap_conv HOLogic.dest_eq swap_eq_thm +val swap_disj_conv = swap_conv Old_SMT_Utils.dest_disj swap_disj_thm + +fun norm_conv ctxt = + swap_eq_conv then_conv + Conv.arg1_conv (Old_SMT_Utils.binders_conv (K swap_disj_conv) ctxt) then_conv + Conv.arg_conv (Old_SMT_Utils.binders_conv (K swap_eq_conv) ctxt) + +in + +fun prove_injectivity ctxt = + Old_Z3_Proof_Tools.by_tac ctxt ( + CONVERSION (Old_SMT_Utils.prop_conv (norm_conv ctxt)) + THEN' CSUBGOAL (uncurry (resolve_tac ctxt o single o prove_inj_eq ctxt))) + +end + +end diff --git a/src/test-gen/src/main/Old_SMT/old_z3_proof_parser.ML b/src/test-gen/src/main/Old_SMT/old_z3_proof_parser.ML new file mode 100644 index 0000000..aa44b11 --- /dev/null +++ b/src/test-gen/src/main/Old_SMT/old_z3_proof_parser.ML @@ -0,0 +1,446 @@ +(* Title: HOL/Library/Old_SMT/old_z3_proof_parser.ML + Author: Sascha Boehme, TU Muenchen + +Parser for Z3 proofs. +*) + +signature OLD_Z3_PROOF_PARSER = +sig + (*proof rules*) + datatype rule = True_Axiom | Asserted | Goal | Modus_Ponens | Reflexivity | + Symmetry | Transitivity | Transitivity_Star | Monotonicity | Quant_Intro | + Distributivity | And_Elim | Not_Or_Elim | Rewrite | Rewrite_Star | + Pull_Quant | Pull_Quant_Star | Push_Quant | Elim_Unused_Vars | + Dest_Eq_Res | Quant_Inst | Hypothesis | Lemma | Unit_Resolution | + Iff_True | Iff_False | Commutativity | Def_Axiom | Intro_Def | Apply_Def | + Iff_Oeq | Nnf_Pos | Nnf_Neg | Nnf_Star | Cnf_Star | Skolemize | + Modus_Ponens_Oeq | Th_Lemma of string list + val string_of_rule: rule -> string + + (*proof parser*) + datatype proof_step = Proof_Step of { + rule: rule, + args: cterm list, + prems: int list, + prop: cterm } + val parse: Proof.context -> typ Symtab.table -> term Symtab.table -> + string list -> + (int * cterm) list * (int * proof_step) list * string list * Proof.context +end + +structure Old_Z3_Proof_Parser: OLD_Z3_PROOF_PARSER = +struct + + +(* proof rules *) + +datatype rule = True_Axiom | Asserted | Goal | Modus_Ponens | Reflexivity | + Symmetry | Transitivity | Transitivity_Star | Monotonicity | Quant_Intro | + Distributivity | And_Elim | Not_Or_Elim | Rewrite | Rewrite_Star | + Pull_Quant | Pull_Quant_Star | Push_Quant | Elim_Unused_Vars | Dest_Eq_Res | + Quant_Inst | Hypothesis | Lemma | Unit_Resolution | Iff_True | Iff_False | + Commutativity | Def_Axiom | Intro_Def | Apply_Def | Iff_Oeq | Nnf_Pos | + Nnf_Neg | Nnf_Star | Cnf_Star | Skolemize | Modus_Ponens_Oeq | + Th_Lemma of string list + +val rule_names = Symtab.make [ + ("true-axiom", True_Axiom), + ("asserted", Asserted), + ("goal", Goal), + ("mp", Modus_Ponens), + ("refl", Reflexivity), + ("symm", Symmetry), + ("trans", Transitivity), + ("trans*", Transitivity_Star), + ("monotonicity", Monotonicity), + ("quant-intro", Quant_Intro), + ("distributivity", Distributivity), + ("and-elim", And_Elim), + ("not-or-elim", Not_Or_Elim), + ("rewrite", Rewrite), + ("rewrite*", Rewrite_Star), + ("pull-quant", Pull_Quant), + ("pull-quant*", Pull_Quant_Star), + ("push-quant", Push_Quant), + ("elim-unused", Elim_Unused_Vars), + ("der", Dest_Eq_Res), + ("quant-inst", Quant_Inst), + ("hypothesis", Hypothesis), + ("lemma", Lemma), + ("unit-resolution", Unit_Resolution), + ("iff-true", Iff_True), + ("iff-false", Iff_False), + ("commutativity", Commutativity), + ("def-axiom", Def_Axiom), + ("intro-def", Intro_Def), + ("apply-def", Apply_Def), + ("iff~", Iff_Oeq), + ("nnf-pos", Nnf_Pos), + ("nnf-neg", Nnf_Neg), + ("nnf*", Nnf_Star), + ("cnf*", Cnf_Star), + ("sk", Skolemize), + ("mp~", Modus_Ponens_Oeq), + ("th-lemma", Th_Lemma [])] + +fun string_of_rule (Th_Lemma args) = space_implode " " ("th-lemma" :: args) + | string_of_rule r = + let fun eq_rule (s, r') = if r = r' then SOME s else NONE + in the (Symtab.get_first eq_rule rule_names) end + + + +(* certified terms and variables *) + +val (var_prefix, decl_prefix) = ("v", "sk") +(* + "decl_prefix" is for skolem constants (represented by free variables), + "var_prefix" is for pseudo-schematic variables (schematic with respect + to the Z3 proof, but represented by free variables). + + Both prefixes must be distinct to avoid name interferences. + More precisely, the naming of pseudo-schematic variables must be + context-independent modulo the current proof context to be able to + use fast inference kernel rules during proof reconstruction. +*) + +fun mk_inst ctxt vars = + let + val max = fold (Integer.max o fst) vars 0 + val ns = fst (Variable.variant_fixes (replicate (max + 1) var_prefix) ctxt) + fun mk (i, v) = + (dest_Var (Thm.term_of v), Thm.cterm_of ctxt (Free (nth ns i, Thm.typ_of_cterm v))) + in map mk vars end + +fun close ctxt (ct, vars) = + let + val inst = mk_inst ctxt vars + val names = fold (Term.add_free_names o Thm.term_of o snd) inst [] + in (Thm.instantiate_cterm ([], inst) ct, names) end + + +fun mk_bound ctxt (i, T) = + let val ct = Thm.cterm_of ctxt (Var ((Name.uu, 0), T)) + in (ct, [(i, ct)]) end + +local + fun mk_quant1 ctxt q T (ct, vars) = + let + val cv = + (case AList.lookup (op =) vars 0 of + SOME cv => cv + | _ => Thm.cterm_of ctxt (Var ((Name.uu, Thm.maxidx_of_cterm ct + 1), T))) + fun dec (i, v) = if i = 0 then NONE else SOME (i-1, v) + val vars' = map_filter dec vars + in (Thm.apply (Old_SMT_Utils.instT' cv q) (Thm.lambda cv ct), vars') end + + fun quant name = + Old_SMT_Utils.mk_const_pat @{theory} name (Old_SMT_Utils.destT1 o Old_SMT_Utils.destT1) + val forall = quant @{const_name All} + val exists = quant @{const_name Ex} +in + +fun mk_quant is_forall ctxt = + fold_rev (mk_quant1 ctxt (if is_forall then forall else exists)) + +end + +local + fun prep (ct, vars) (maxidx, all_vars) = + let + val maxidx' = maxidx + Thm.maxidx_of_cterm ct + 1 + + fun part (i, cv) = + (case AList.lookup (op =) all_vars i of + SOME cu => apfst (if cu aconvc cv then I else cons (cv, cu)) + | NONE => + let val cv' = Thm.incr_indexes_cterm maxidx cv + in apfst (cons (cv, cv')) #> apsnd (cons (i, cv')) end) + + val (inst, vars') = + if null vars then ([], vars) + else fold part vars ([], []) + + in + (Thm.instantiate_cterm ([], map (apfst (dest_Var o Thm.term_of)) inst) ct, + (maxidx', vars' @ all_vars)) + end +in +fun mk_fun f ts = + let val (cts, (_, vars)) = fold_map prep ts (0, []) + in f cts |> Option.map (rpair vars) end +end + + + +(* proof parser *) + +datatype proof_step = Proof_Step of { + rule: rule, + args: cterm list, + prems: int list, + prop: cterm } + + +(** parser context **) + +val not_false = Thm.cterm_of @{context} (@{const Not} $ @{const False}) + +fun make_context ctxt typs terms = + let + val ctxt' = + ctxt + |> Symtab.fold (Variable.declare_typ o snd) typs + |> Symtab.fold (Variable.declare_term o snd) terms + + fun cert @{const True} = not_false + | cert t = Thm.cterm_of ctxt' t + + in (typs, Symtab.map (K cert) terms, Inttab.empty, [], [], ctxt') end + +fun fresh_name n (typs, terms, exprs, steps, vars, ctxt) = + let val (n', ctxt') = yield_singleton Variable.variant_fixes n ctxt + in (n', (typs, terms, exprs, steps, vars, ctxt')) end + +fun context_of (_, _, _, _, _, ctxt) = ctxt + +fun add_decl (n, T) (cx as (_, terms, _, _, _, _)) = + (case Symtab.lookup terms n of + SOME _ => cx + | NONE => cx |> fresh_name (decl_prefix ^ n) + |> (fn (m, (typs, terms, exprs, steps, vars, ctxt)) => + let + val upd = Symtab.update (n, Thm.cterm_of ctxt (Free (m, T))) + in (typs, upd terms, exprs, steps, vars, ctxt) end)) + +fun mk_typ (typs, _, _, _, _, ctxt) (s as Old_Z3_Interface.Sym (n, _)) = + (case Old_Z3_Interface.mk_builtin_typ ctxt s of + SOME T => SOME T + | NONE => Symtab.lookup typs n) + +fun mk_num (_, _, _, _, _, ctxt) (i, T) = + mk_fun (K (Old_Z3_Interface.mk_builtin_num ctxt i T)) [] + +fun mk_app (_, terms, _, _, _, ctxt) (s as Old_Z3_Interface.Sym (n, _), es) = + mk_fun (fn cts => + (case Old_Z3_Interface.mk_builtin_fun ctxt s cts of + SOME ct => SOME ct + | NONE => + Symtab.lookup terms n |> Option.map (Drule.list_comb o rpair cts))) es + +fun add_expr k t (typs, terms, exprs, steps, vars, ctxt) = + (typs, terms, Inttab.update (k, t) exprs, steps, vars, ctxt) + +fun lookup_expr (_, _, exprs, _, _, _) = Inttab.lookup exprs + +fun add_proof_step k ((r, args), prop) cx = + let + val (typs, terms, exprs, steps, vars, ctxt) = cx + val (ct, vs) = close ctxt prop + fun part (SOME e, _) (cts, ps) = (close ctxt e :: cts, ps) + | part (NONE, i) (cts, ps) = (cts, i :: ps) + val (args', prems) = fold (part o `(lookup_expr cx)) args ([], []) + val (cts, vss) = split_list args' + val step = Proof_Step {rule=r, args=rev cts, prems=rev prems, + prop = Old_SMT_Utils.mk_cprop ct} + val vars' = fold (union (op =)) (vs :: vss) vars + in (typs, terms, exprs, (k, step) :: steps, vars', ctxt) end + +fun finish (_, _, _, steps, vars, ctxt) = + let + fun coll (p as (k, Proof_Step {prems, rule, prop, ...})) (ars, ps, ids) = + (case rule of + Asserted => ((k, prop) :: ars, ps, ids) + | Goal => ((k, prop) :: ars, ps, ids) + | _ => + if Inttab.defined ids k then + (ars, p :: ps, fold (Inttab.update o rpair ()) prems ids) + else (ars, ps, ids)) + + val (ars, steps', _) = fold coll steps ([], [], Inttab.make [(~1, ())]) + in (ars, steps', vars, ctxt) end + + +(** core parser **) + +fun parse_exn line_no msg = raise Old_SMT_Failure.SMT (Old_SMT_Failure.Other_Failure + ("Z3 proof parser (line " ^ string_of_int line_no ^ "): " ^ msg)) + +fun scan_exn msg ((line_no, _), _) = parse_exn line_no msg + +fun with_info f cx = + (case f ((NONE, 1), cx) of + ((SOME _, _), cx') => cx' + | ((_, line_no), _) => parse_exn line_no "bad proof") + +fun parse_line _ _ (st as ((SOME _, _), _)) = st + | parse_line scan line ((_, line_no), cx) = + let val st = ((line_no, cx), raw_explode line) + in + (case Scan.catch (Scan.finite' Symbol.stopper (Scan.option scan)) st of + (SOME r, ((_, cx'), _)) => ((r, line_no+1), cx') + | (NONE, _) => parse_exn line_no ("bad proof line: " ^ quote line)) + end + +fun with_context f x ((line_no, cx), st) = + let val (y, cx') = f x cx + in (y, ((line_no, cx'), st)) end + + +fun lookup_context f x (st as ((_, cx), _)) = (f cx x, st) + + +(** parser combinators and parsers for basic entities **) + +fun $$ s = Scan.lift (Scan.$$ s) +fun this s = Scan.lift (Scan.this_string s) +val is_blank = Symbol.is_ascii_blank +fun blank st = Scan.lift (Scan.many1 is_blank) st +fun sep scan = blank |-- scan +fun seps scan = Scan.repeat (sep scan) +fun seps1 scan = Scan.repeat1 (sep scan) +fun seps_by scan_sep scan = scan ::: Scan.repeat (scan_sep |-- scan) + +val lpar = "(" and rpar = ")" +val lbra = "[" and rbra = "]" +fun par scan = $$ lpar |-- scan --| $$ rpar +fun bra scan = $$ lbra |-- scan --| $$ rbra + +val digit = (fn + "0" => SOME 0 | "1" => SOME 1 | "2" => SOME 2 | "3" => SOME 3 | + "4" => SOME 4 | "5" => SOME 5 | "6" => SOME 6 | "7" => SOME 7 | + "8" => SOME 8 | "9" => SOME 9 | _ => NONE) + +fun digits st = (Scan.lift (Scan.many1 Symbol.is_ascii_digit) >> implode) st + +fun nat_num st = (Scan.lift (Scan.repeat1 (Scan.some digit)) >> (fn ds => + fold (fn d => fn i => i * 10 + d) ds 0)) st + +fun int_num st = (Scan.optional ($$ "-" >> K (fn i => ~i)) I :|-- + (fn sign => nat_num >> sign)) st + +val is_char = Symbol.is_ascii_letter orf Symbol.is_ascii_digit orf + member (op =) (raw_explode "_+*-/%~=<>$&|?!.@^#") + +fun name st = (Scan.lift (Scan.many1 is_char) >> implode) st + +fun sym st = (name -- + Scan.optional (bra (seps_by ($$ ":") sym)) [] >> Old_Z3_Interface.Sym) st + +fun id st = ($$ "#" |-- nat_num) st + + +(** parsers for various parts of Z3 proofs **) + +fun sort st = Scan.first [ + this "array" |-- bra (sort --| $$ ":" -- sort) >> (op -->), + par (this "->" |-- seps1 sort) >> ((op --->) o split_last), + sym :|-- (fn s as Old_Z3_Interface.Sym (n, _) => lookup_context mk_typ s :|-- (fn + SOME T => Scan.succeed T + | NONE => scan_exn ("unknown sort: " ^ quote n)))] st + +fun bound st = (par (this ":var" |-- sep nat_num -- sep sort) :|-- + lookup_context (mk_bound o context_of)) st + +fun numb (n as (i, _)) = lookup_context mk_num n :|-- (fn + SOME n' => Scan.succeed n' + | NONE => scan_exn ("unknown number: " ^ quote (string_of_int i))) + +fun appl (app as (Old_Z3_Interface.Sym (n, _), _)) = + lookup_context mk_app app :|-- (fn + SOME app' => Scan.succeed app' + | NONE => scan_exn ("unknown function symbol: " ^ quote n)) + +fun bv_size st = (digits >> (fn sz => + Old_Z3_Interface.Sym ("bv", [Old_Z3_Interface.Sym (sz, [])]))) st + +fun bv_number_sort st = (bv_size :|-- lookup_context mk_typ :|-- (fn + SOME cT => Scan.succeed cT + | NONE => scan_exn ("unknown sort: " ^ quote "bv"))) st + +fun bv_number st = + (this "bv" |-- bra (nat_num --| $$ ":" -- bv_number_sort) :|-- numb) st + +fun frac_number st = ( + int_num --| $$ "/" -- int_num --| this "::" -- sort :|-- (fn ((i, j), T) => + numb (i, T) -- numb (j, T) :|-- (fn (n, m) => + appl (Old_Z3_Interface.Sym ("/", []), [n, m])))) st + +fun plain_number st = (int_num --| this "::" -- sort :|-- numb) st + +fun number st = Scan.first [bv_number, frac_number, plain_number] st + +fun constant st = ((sym >> rpair []) :|-- appl) st + +fun expr_id st = (id :|-- (fn i => lookup_context lookup_expr i :|-- (fn + SOME e => Scan.succeed e + | NONE => scan_exn ("unknown term id: " ^ quote (string_of_int i))))) st + +fun arg st = Scan.first [expr_id, number, constant] st + +fun application st = par ((sym -- Scan.repeat1 (sep arg)) :|-- appl) st + +fun variables st = par (this "vars" |-- seps1 (par (name |-- sep sort))) st + +fun pats st = seps (par ((this ":pat" || this ":nopat") |-- seps1 id)) st + +val ctrue = Thm.cterm_of @{context} @{const True} + +fun pattern st = par (this "pattern" |-- Scan.repeat1 (sep arg) >> + (the o mk_fun (K (SOME ctrue)))) st + +fun quant_kind st = st |> ( + this "forall" >> K (mk_quant true o context_of) || + this "exists" >> K (mk_quant false o context_of)) + +fun quantifier st = + (par (quant_kind -- sep variables --| pats -- sep arg) :|-- + lookup_context (fn cx => fn ((mk_q, Ts), body) => mk_q cx Ts body)) st + +fun expr k = + Scan.first [bound, quantifier, pattern, application, number, constant] :|-- + with_context (pair NONE oo add_expr k) + +val rule_arg = id + (* if this is modified, then 'th_lemma_arg' needs reviewing *) + +fun th_lemma_arg st = Scan.unless (sep rule_arg >> K "" || $$ rbra) (sep name) st + +fun rule_name st = ((name >> `(Symtab.lookup rule_names)) :|-- (fn + (SOME (Th_Lemma _), _) => Scan.repeat th_lemma_arg >> Th_Lemma + | (SOME r, _) => Scan.succeed r + | (NONE, n) => scan_exn ("unknown proof rule: " ^ quote n))) st + +fun rule f k = + bra (rule_name -- seps id) --| $$ ":" -- sep arg #-> + with_context (pair (f k) oo add_proof_step k) + +fun decl st = (this "decl" |-- sep name --| sep (this "::") -- sep sort :|-- + with_context (pair NONE oo add_decl)) st + +fun def st = (id --| sep (this ":=")) st + +fun node st = st |> ( + decl || + def :|-- (fn k => sep (expr k) || sep (rule (K NONE) k)) || + rule SOME ~1) + + +(** overall parser **) + +(* + Currently, terms are parsed bottom-up (i.e., along with parsing the proof + text line by line), but proofs are reconstructed top-down (i.e. by an + in-order top-down traversal of the proof tree/graph). The latter approach + was taken because some proof texts comprise irrelevant proof steps which + will thus not be reconstructed. This approach might also be beneficial + for constructing terms, but it would also increase the complexity of the + (otherwise rather modular) code. +*) + +fun parse ctxt typs terms proof_text = + make_context ctxt typs terms + |> with_info (fold (parse_line node) proof_text) + |> finish + +end diff --git a/src/test-gen/src/main/Old_SMT/old_z3_proof_reconstruction.ML b/src/test-gen/src/main/Old_SMT/old_z3_proof_reconstruction.ML new file mode 100644 index 0000000..e2302cd --- /dev/null +++ b/src/test-gen/src/main/Old_SMT/old_z3_proof_reconstruction.ML @@ -0,0 +1,891 @@ +(* Title: HOL/Library/Old_SMT/old_z3_proof_reconstruction.ML + Author: Sascha Boehme, TU Muenchen + +Proof reconstruction for proofs found by Z3. +*) + +signature OLD_Z3_PROOF_RECONSTRUCTION = +sig + val add_z3_rule: thm -> Context.generic -> Context.generic + val reconstruct: Proof.context -> Old_SMT_Translate.recon -> string list -> int list * thm +end + +structure Old_Z3_Proof_Reconstruction: OLD_Z3_PROOF_RECONSTRUCTION = +struct + + +fun z3_exn msg = raise Old_SMT_Failure.SMT (Old_SMT_Failure.Other_Failure + ("Z3 proof reconstruction: " ^ msg)) + + + +(* net of schematic rules *) + +local + val description = "declaration of Z3 proof rules" + + val eq = Thm.eq_thm + + structure Old_Z3_Rules = Generic_Data + ( + type T = thm Net.net + val empty = Net.empty + val extend = I + val merge = Net.merge eq + ) + + fun prep context = + `Thm.prop_of o rewrite_rule (Context.proof_of context) [Old_Z3_Proof_Literals.rewrite_true] + + fun ins thm context = + context |> Old_Z3_Rules.map (fn net => Net.insert_term eq (prep context thm) net handle Net.INSERT => net) + fun rem thm context = + context |> Old_Z3_Rules.map (fn net => Net.delete_term eq (prep context thm) net handle Net.DELETE => net) + + val add = Thm.declaration_attribute ins + val del = Thm.declaration_attribute rem +in + +val add_z3_rule = ins + +fun by_schematic_rule ctxt ct = + the (Old_Z3_Proof_Tools.net_instance (Old_Z3_Rules.get (Context.Proof ctxt)) ct) + +val _ = Theory.setup + (Attrib.setup @{binding old_z3_rule} (Attrib.add_del add del) description #> + Global_Theory.add_thms_dynamic (@{binding old_z3_rule}, Net.content o Old_Z3_Rules.get)) + +end + + + +(* proof tools *) + +fun named ctxt name prover ct = + let val _ = Old_SMT_Config.trace_msg ctxt I ("Z3: trying " ^ name ^ " ...") + in prover ct end + +fun NAMED ctxt name tac i st = + let val _ = Old_SMT_Config.trace_msg ctxt I ("Z3: trying " ^ name ^ " ...") + in tac i st end + +fun pretty_goal ctxt thms t = + [Pretty.block [Pretty.str "proposition: ", Syntax.pretty_term ctxt t]] + |> not (null thms) ? cons (Pretty.big_list "assumptions:" + (map (Thm.pretty_thm ctxt) thms)) + +fun try_apply ctxt thms = + let + fun try_apply_err ct = Pretty.string_of (Pretty.chunks [ + Pretty.big_list ("Z3 found a proof," ^ + " but proof reconstruction failed at the following subgoal:") + (pretty_goal ctxt thms (Thm.term_of ct)), + Pretty.str ("Declaring a rule as [old_z3_rule] might solve this problem.")]) + + fun apply [] ct = error (try_apply_err ct) + | apply (prover :: provers) ct = + (case try prover ct of + SOME thm => (Old_SMT_Config.trace_msg ctxt I "Z3: succeeded"; thm) + | NONE => apply provers ct) + + fun schematic_label full = "schematic rules" |> full ? suffix " (full)" + fun schematic ctxt full ct = + ct + |> full ? fold_rev (curry Drule.mk_implies o Thm.cprop_of) thms + |> named ctxt (schematic_label full) (by_schematic_rule ctxt) + |> fold Thm.elim_implies thms + + in apply o cons (schematic ctxt false) o cons (schematic ctxt true) end + +local + val rewr_if = + @{lemma "(if P then Q1 else Q2) = ((P --> Q1) & (~P --> Q2))" by simp} +in + +fun HOL_fast_tac ctxt = + Classical.fast_tac (put_claset HOL_cs ctxt) + +fun simp_fast_tac ctxt = + Simplifier.simp_tac (put_simpset HOL_ss ctxt addsimps [rewr_if]) + THEN_ALL_NEW HOL_fast_tac ctxt + +end + + + +(* theorems and proofs *) + +(** theorem incarnations **) + +datatype theorem = + Thm of thm | (* theorem without special features *) + MetaEq of thm | (* meta equality "t == s" *) + Literals of thm * Old_Z3_Proof_Literals.littab + (* "P1 & ... & Pn" and table of all literals P1, ..., Pn *) + +fun thm_of (Thm thm) = thm + | thm_of (MetaEq thm) = thm COMP @{thm meta_eq_to_obj_eq} + | thm_of (Literals (thm, _)) = thm + +fun meta_eq_of (MetaEq thm) = thm + | meta_eq_of p = mk_meta_eq (thm_of p) + +fun literals_of (Literals (_, lits)) = lits + | literals_of p = Old_Z3_Proof_Literals.make_littab [thm_of p] + + + +(** core proof rules **) + +(* assumption *) + +local + val remove_trigger = mk_meta_eq @{thm trigger_def} + val remove_weight = mk_meta_eq @{thm weight_def} + val remove_fun_app = mk_meta_eq @{thm fun_app_def} + + fun rewrite_conv _ [] = Conv.all_conv + | rewrite_conv ctxt eqs = Simplifier.full_rewrite (empty_simpset ctxt addsimps eqs) + + val prep_rules = [@{thm Let_def}, remove_trigger, remove_weight, + remove_fun_app, Old_Z3_Proof_Literals.rewrite_true] + + fun rewrite _ [] = I + | rewrite ctxt eqs = Conv.fconv_rule (rewrite_conv ctxt eqs) + + fun lookup_assm assms_net ct = + Old_Z3_Proof_Tools.net_instances assms_net ct + |> map (fn ithm as (_, thm) => (ithm, Thm.cprop_of thm aconvc ct)) +in + +fun add_asserted outer_ctxt rewrite_rules assms asserted ctxt = + let + val eqs = map (rewrite ctxt [Old_Z3_Proof_Literals.rewrite_true]) rewrite_rules + val eqs' = union Thm.eq_thm eqs prep_rules + + val assms_net = + assms + |> map (apsnd (rewrite ctxt eqs')) + |> map (apsnd (Conv.fconv_rule Thm.eta_conversion)) + |> Old_Z3_Proof_Tools.thm_net_of snd + + fun revert_conv ctxt = rewrite_conv ctxt eqs' then_conv Thm.eta_conversion + + fun assume thm ctxt = + let + val ct = Thm.cprem_of thm 1 + val (thm', ctxt') = yield_singleton Assumption.add_assumes ct ctxt + in (Thm.implies_elim thm thm', ctxt') end + + fun add1 idx thm1 ((i, th), exact) ((is, thms), (ctxt, ptab)) = + let + val (thm, ctxt') = + if exact then (Thm.implies_elim thm1 th, ctxt) + else assume thm1 ctxt + val thms' = if exact then thms else th :: thms + in + ((insert (op =) i is, thms'), + (ctxt', Inttab.update (idx, Thm thm) ptab)) + end + + fun add (idx, ct) (cx as ((is, thms), (ctxt, ptab))) = + let + val thm1 = + Thm.trivial ct + |> Conv.fconv_rule (Conv.arg1_conv (revert_conv outer_ctxt)) + val thm2 = singleton (Variable.export ctxt outer_ctxt) thm1 + in + (case lookup_assm assms_net (Thm.cprem_of thm2 1) of + [] => + let val (thm, ctxt') = assume thm1 ctxt + in ((is, thms), (ctxt', Inttab.update (idx, Thm thm) ptab)) end + | ithms => fold (add1 idx thm1) ithms cx) + end + in fold add asserted (([], []), (ctxt, Inttab.empty)) end + +end + + +(* P = Q ==> P ==> Q or P --> Q ==> P ==> Q *) +local + val precomp = Old_Z3_Proof_Tools.precompose2 + val comp = Old_Z3_Proof_Tools.compose + + val meta_iffD1 = @{lemma "P == Q ==> P ==> (Q::bool)" by simp} + val meta_iffD1_c = precomp Thm.dest_binop meta_iffD1 + + val iffD1_c = precomp (Thm.dest_binop o Thm.dest_arg) @{thm iffD1} + val mp_c = precomp (Thm.dest_binop o Thm.dest_arg) @{thm mp} +in +fun mp (MetaEq thm) p = Thm (Thm.implies_elim (comp meta_iffD1_c thm) p) + | mp p_q p = + let + val pq = thm_of p_q + val thm = comp iffD1_c pq handle THM _ => comp mp_c pq + in Thm (Thm.implies_elim thm p) end +end + + +(* and_elim: P1 & ... & Pn ==> Pi *) +(* not_or_elim: ~(P1 | ... | Pn) ==> ~Pi *) +local + fun is_sublit conj t = Old_Z3_Proof_Literals.exists_lit conj (fn u => u aconv t) + + fun derive conj t lits idx ptab = + let + val lit = the (Old_Z3_Proof_Literals.get_first_lit (is_sublit conj t) lits) + val ls = Old_Z3_Proof_Literals.explode conj false false [t] lit + val lits' = fold Old_Z3_Proof_Literals.insert_lit ls + (Old_Z3_Proof_Literals.delete_lit lit lits) + + fun upd thm = Literals (thm_of thm, lits') + val ptab' = Inttab.map_entry idx upd ptab + in (the (Old_Z3_Proof_Literals.lookup_lit lits' t), ptab') end + + fun lit_elim conj (p, idx) ct ptab = + let val lits = literals_of p + in + (case Old_Z3_Proof_Literals.lookup_lit lits (Old_SMT_Utils.term_of ct) of + SOME lit => (Thm lit, ptab) + | NONE => apfst Thm (derive conj (Old_SMT_Utils.term_of ct) lits idx ptab)) + end +in +val and_elim = lit_elim true +val not_or_elim = lit_elim false +end + + +(* P1, ..., Pn |- False ==> |- ~P1 | ... | ~Pn *) +local + fun step lit thm = + Thm.implies_elim (Thm.implies_intr (Thm.cprop_of lit) thm) lit + val explode_disj = Old_Z3_Proof_Literals.explode false false false + fun intro hyps thm th = fold step (explode_disj hyps th) thm + + fun dest_ccontr ct = [Thm.dest_arg (Thm.dest_arg (Thm.dest_arg1 ct))] + val ccontr = Old_Z3_Proof_Tools.precompose dest_ccontr @{thm ccontr} +in +fun lemma thm ct = + let + val cu = Old_Z3_Proof_Literals.negate (Thm.dest_arg ct) + val hyps = map_filter (try HOLogic.dest_Trueprop) (Thm.hyps_of thm) + val th = Old_Z3_Proof_Tools.under_assumption (intro hyps thm) cu + in Thm (Old_Z3_Proof_Tools.compose ccontr th) end +end + + +(* \/{P1, ..., Pn, Q1, ..., Qn}, ~P1, ..., ~Pn ==> \/{Q1, ..., Qn} *) +local + val explode_disj = Old_Z3_Proof_Literals.explode false true false + val join_disj = Old_Z3_Proof_Literals.join false + fun unit thm thms th = + let + val t = @{const Not} $ Old_SMT_Utils.prop_of thm + val ts = map Old_SMT_Utils.prop_of thms + in + join_disj (Old_Z3_Proof_Literals.make_littab (thms @ explode_disj ts th)) t + end + + fun dest_arg2 ct = Thm.dest_arg (Thm.dest_arg ct) + fun dest ct = apply2 dest_arg2 (Thm.dest_binop ct) + val contrapos = + Old_Z3_Proof_Tools.precompose2 dest @{lemma "(~P ==> ~Q) ==> Q ==> P" by fast} +in +fun unit_resolution thm thms ct = + Old_Z3_Proof_Literals.negate (Thm.dest_arg ct) + |> Old_Z3_Proof_Tools.under_assumption (unit thm thms) + |> Thm o Old_Z3_Proof_Tools.discharge thm o Old_Z3_Proof_Tools.compose contrapos +end + + +(* P ==> P == True or P ==> P == False *) +local + val iff1 = @{lemma "P ==> P == (~ False)" by simp} + val iff2 = @{lemma "~P ==> P == False" by simp} +in +fun iff_true thm = MetaEq (thm COMP iff1) +fun iff_false thm = MetaEq (thm COMP iff2) +end + + +(* distributivity of | over & *) +fun distributivity ctxt = Thm o try_apply ctxt [] [ + named ctxt "fast" (Old_Z3_Proof_Tools.by_tac ctxt (HOL_fast_tac ctxt))] + (* FIXME: not very well tested *) + + +(* Tseitin-like axioms *) +local + val disjI1 = @{lemma "(P ==> Q) ==> ~P | Q" by fast} + val disjI2 = @{lemma "(~P ==> Q) ==> P | Q" by fast} + val disjI3 = @{lemma "(~Q ==> P) ==> P | Q" by fast} + val disjI4 = @{lemma "(Q ==> P) ==> P | ~Q" by fast} + + fun prove' conj1 conj2 ct2 thm = + let + val littab = + Old_Z3_Proof_Literals.explode conj1 true (conj1 <> conj2) [] thm + |> cons Old_Z3_Proof_Literals.true_thm + |> Old_Z3_Proof_Literals.make_littab + in Old_Z3_Proof_Literals.join conj2 littab (Thm.term_of ct2) end + + fun prove rule (ct1, conj1) (ct2, conj2) = + Old_Z3_Proof_Tools.under_assumption (prove' conj1 conj2 ct2) ct1 COMP rule + + fun prove_def_axiom ct = + let val (ct1, ct2) = Thm.dest_binop (Thm.dest_arg ct) + in + (case Thm.term_of ct1 of + @{const Not} $ (@{const HOL.conj} $ _ $ _) => + prove disjI1 (Thm.dest_arg ct1, true) (ct2, true) + | @{const HOL.conj} $ _ $ _ => + prove disjI3 (Old_Z3_Proof_Literals.negate ct2, false) (ct1, true) + | @{const Not} $ (@{const HOL.disj} $ _ $ _) => + prove disjI3 (Old_Z3_Proof_Literals.negate ct2, false) (ct1, false) + | @{const HOL.disj} $ _ $ _ => + prove disjI2 (Old_Z3_Proof_Literals.negate ct1, false) (ct2, true) + | Const (@{const_name distinct}, _) $ _ => + let + fun dis_conv cv = Conv.arg_conv (Conv.arg1_conv cv) + val unfold_dis_conv = dis_conv Old_Z3_Proof_Tools.unfold_distinct_conv + fun prv cu = + let val (cu1, cu2) = Thm.dest_binop (Thm.dest_arg cu) + in prove disjI4 (Thm.dest_arg cu2, true) (cu1, true) end + in Old_Z3_Proof_Tools.with_conv unfold_dis_conv prv ct end + | @{const Not} $ (Const (@{const_name distinct}, _) $ _) => + let + fun dis_conv cv = Conv.arg_conv (Conv.arg1_conv (Conv.arg_conv cv)) + val unfold_dis_conv = dis_conv Old_Z3_Proof_Tools.unfold_distinct_conv + fun prv cu = + let val (cu1, cu2) = Thm.dest_binop (Thm.dest_arg cu) + in prove disjI1 (Thm.dest_arg cu1, true) (cu2, true) end + in Old_Z3_Proof_Tools.with_conv unfold_dis_conv prv ct end + | _ => raise CTERM ("prove_def_axiom", [ct])) + end +in +fun def_axiom ctxt = Thm o try_apply ctxt [] [ + named ctxt "conj/disj/distinct" prove_def_axiom, + Old_Z3_Proof_Tools.by_abstraction 0 (true, false) ctxt [] (fn ctxt' => + named ctxt' "simp+fast" (Old_Z3_Proof_Tools.by_tac ctxt (simp_fast_tac ctxt')))] +end + + +(* local definitions *) +local + val intro_rules = [ + @{lemma "n == P ==> (~n | P) & (n | ~P)" by simp}, + @{lemma "n == (if P then s else t) ==> (~P | n = s) & (P | n = t)" + by simp}, + @{lemma "n == P ==> n = P" by (rule meta_eq_to_obj_eq)} ] + + val apply_rules = [ + @{lemma "(~n | P) & (n | ~P) ==> P == n" by (atomize(full)) fast}, + @{lemma "(~P | n = s) & (P | n = t) ==> (if P then s else t) == n" + by (atomize(full)) fastforce} ] + + val inst_rule = Old_Z3_Proof_Tools.match_instantiate Thm.dest_arg + + fun apply_rule ct = + (case get_first (try (inst_rule ct)) intro_rules of + SOME thm => thm + | NONE => raise CTERM ("intro_def", [ct])) +in +fun intro_def ct = Old_Z3_Proof_Tools.make_hyp_def (apply_rule ct) #>> Thm + +fun apply_def thm = + get_first (try (fn rule => MetaEq (thm COMP rule))) apply_rules + |> the_default (Thm thm) +end + + +(* negation normal form *) +local + val quant_rules1 = ([ + @{lemma "(!!x. P x == Q) ==> ALL x. P x == Q" by simp}, + @{lemma "(!!x. P x == Q) ==> EX x. P x == Q" by simp}], [ + @{lemma "(!!x. P x == Q x) ==> ALL x. P x == ALL x. Q x" by simp}, + @{lemma "(!!x. P x == Q x) ==> EX x. P x == EX x. Q x" by simp}]) + + val quant_rules2 = ([ + @{lemma "(!!x. ~P x == Q) ==> ~(ALL x. P x) == Q" by simp}, + @{lemma "(!!x. ~P x == Q) ==> ~(EX x. P x) == Q" by simp}], [ + @{lemma "(!!x. ~P x == Q x) ==> ~(ALL x. P x) == EX x. Q x" by simp}, + @{lemma "(!!x. ~P x == Q x) ==> ~(EX x. P x) == ALL x. Q x" by simp}]) + + fun nnf_quant_tac ctxt thm (qs as (qs1, qs2)) i st = ( + resolve_tac ctxt [thm] ORELSE' + (match_tac ctxt qs1 THEN' nnf_quant_tac ctxt thm qs) ORELSE' + (match_tac ctxt qs2 THEN' nnf_quant_tac ctxt thm qs)) i st + + fun nnf_quant_tac_varified ctxt vars eq = + nnf_quant_tac ctxt (Old_Z3_Proof_Tools.varify vars eq) + + fun nnf_quant ctxt vars qs p ct = + Old_Z3_Proof_Tools.as_meta_eq ct + |> Old_Z3_Proof_Tools.by_tac ctxt (nnf_quant_tac_varified ctxt vars (meta_eq_of p) qs) + + fun prove_nnf ctxt = try_apply ctxt [] [ + named ctxt "conj/disj" Old_Z3_Proof_Literals.prove_conj_disj_eq, + Old_Z3_Proof_Tools.by_abstraction 0 (true, false) ctxt [] (fn ctxt' => + named ctxt' "simp+fast" (Old_Z3_Proof_Tools.by_tac ctxt' (simp_fast_tac ctxt')))] +in +fun nnf ctxt vars ps ct = + (case Old_SMT_Utils.term_of ct of + _ $ (l as Const _ $ Abs _) $ (r as Const _ $ Abs _) => + if l aconv r + then MetaEq (Thm.reflexive (Thm.dest_arg (Thm.dest_arg ct))) + else MetaEq (nnf_quant ctxt vars quant_rules1 (hd ps) ct) + | _ $ (@{const Not} $ (Const _ $ Abs _)) $ (Const _ $ Abs _) => + MetaEq (nnf_quant ctxt vars quant_rules2 (hd ps) ct) + | _ => + let + val nnf_rewr_conv = Conv.arg_conv (Conv.arg_conv + (Old_Z3_Proof_Tools.unfold_eqs ctxt + (map (Thm.symmetric o meta_eq_of) ps))) + in Thm (Old_Z3_Proof_Tools.with_conv nnf_rewr_conv (prove_nnf ctxt) ct) end) +end + + + +(** equality proof rules **) + +(* |- t = t *) +fun refl ct = MetaEq (Thm.reflexive (Thm.dest_arg (Thm.dest_arg ct))) + + +(* s = t ==> t = s *) +local + val symm_rule = @{lemma "s = t ==> t == s" by simp} +in +fun symm (MetaEq thm) = MetaEq (Thm.symmetric thm) + | symm p = MetaEq (thm_of p COMP symm_rule) +end + + +(* s = t ==> t = u ==> s = u *) +local + val trans1 = @{lemma "s == t ==> t = u ==> s == u" by simp} + val trans2 = @{lemma "s = t ==> t == u ==> s == u" by simp} + val trans3 = @{lemma "s = t ==> t = u ==> s == u" by simp} +in +fun trans (MetaEq thm1) (MetaEq thm2) = MetaEq (Thm.transitive thm1 thm2) + | trans (MetaEq thm) q = MetaEq (thm_of q COMP (thm COMP trans1)) + | trans p (MetaEq thm) = MetaEq (thm COMP (thm_of p COMP trans2)) + | trans p q = MetaEq (thm_of q COMP (thm_of p COMP trans3)) +end + + +(* t1 = s1 ==> ... ==> tn = sn ==> f t1 ... tn = f s1 .. sn + (reflexive antecendents are droppped) *) +local + exception MONO + + fun prove_refl (ct, _) = Thm.reflexive ct + fun prove_comb f g cp = + let val ((ct1, ct2), (cu1, cu2)) = apply2 Thm.dest_comb cp + in Thm.combination (f (ct1, cu1)) (g (ct2, cu2)) end + fun prove_arg f = prove_comb prove_refl f + + fun prove f cp = prove_comb (prove f) f cp handle CTERM _ => prove_refl cp + + fun prove_nary is_comb f = + let + fun prove (cp as (ct, _)) = f cp handle MONO => + if is_comb (Thm.term_of ct) + then prove_comb (prove_arg prove) prove cp + else prove_refl cp + in prove end + + fun prove_list f n cp = + if n = 0 then prove_refl cp + else prove_comb (prove_arg f) (prove_list f (n-1)) cp + + fun with_length f (cp as (cl, _)) = + f (length (HOLogic.dest_list (Thm.term_of cl))) cp + + fun prove_distinct f = prove_arg (with_length (prove_list f)) + + fun prove_eq exn lookup cp = + (case lookup (Logic.mk_equals (apply2 Thm.term_of cp)) of + SOME eq => eq + | NONE => if exn then raise MONO else prove_refl cp) + + val prove_exn = prove_eq true + and prove_safe = prove_eq false + + fun mono f (cp as (cl, _)) = + (case Term.head_of (Thm.term_of cl) of + @{const HOL.conj} => prove_nary Old_Z3_Proof_Literals.is_conj (prove_exn f) + | @{const HOL.disj} => prove_nary Old_Z3_Proof_Literals.is_disj (prove_exn f) + | Const (@{const_name distinct}, _) => prove_distinct (prove_safe f) + | _ => prove (prove_safe f)) cp +in +fun monotonicity eqs ct = + let + fun and_symmetric (t, thm) = [(t, thm), (t, Thm.symmetric thm)] + val teqs = maps (and_symmetric o `Thm.prop_of o meta_eq_of) eqs + val lookup = AList.lookup (op aconv) teqs + val cp = Thm.dest_binop (Thm.dest_arg ct) + in MetaEq (prove_exn lookup cp handle MONO => mono lookup cp) end +end + + +(* |- f a b = f b a (where f is equality) *) +local + val rule = @{lemma "a = b == b = a" by (atomize(full)) (rule eq_commute)} +in +fun commutativity ct = + MetaEq (Old_Z3_Proof_Tools.match_instantiate I + (Old_Z3_Proof_Tools.as_meta_eq ct) rule) +end + + + +(** quantifier proof rules **) + +(* P ?x = Q ?x ==> (ALL x. P x) = (ALL x. Q x) + P ?x = Q ?x ==> (EX x. P x) = (EX x. Q x) *) +local + val rules = [ + @{lemma "(!!x. P x == Q x) ==> (ALL x. P x) == (ALL x. Q x)" by simp}, + @{lemma "(!!x. P x == Q x) ==> (EX x. P x) == (EX x. Q x)" by simp}] +in +fun quant_intro ctxt vars p ct = + let + val thm = meta_eq_of p + val rules' = Old_Z3_Proof_Tools.varify vars thm :: rules + val cu = Old_Z3_Proof_Tools.as_meta_eq ct + val tac = REPEAT_ALL_NEW (match_tac ctxt rules') + in MetaEq (Old_Z3_Proof_Tools.by_tac ctxt tac cu) end +end + + +(* |- ((ALL x. P x) | Q) = (ALL x. P x | Q) *) +fun pull_quant ctxt = Thm o try_apply ctxt [] [ + named ctxt "fast" (Old_Z3_Proof_Tools.by_tac ctxt (HOL_fast_tac ctxt))] + (* FIXME: not very well tested *) + + +(* |- (ALL x. P x & Q x) = ((ALL x. P x) & (ALL x. Q x)) *) +fun push_quant ctxt = Thm o try_apply ctxt [] [ + named ctxt "fast" (Old_Z3_Proof_Tools.by_tac ctxt (HOL_fast_tac ctxt))] + (* FIXME: not very well tested *) + + +(* |- (ALL x1 ... xn y1 ... yn. P x1 ... xn) = (ALL x1 ... xn. P x1 ... xn) *) +local + val elim_all = @{lemma "P = Q ==> (ALL x. P) = Q" by fast} + val elim_ex = @{lemma "P = Q ==> (EX x. P) = Q" by fast} + + fun elim_unused_tac ctxt i st = ( + match_tac ctxt [@{thm refl}] + ORELSE' (match_tac ctxt [elim_all, elim_ex] THEN' elim_unused_tac ctxt) + ORELSE' ( + match_tac ctxt [@{thm iff_allI}, @{thm iff_exI}] + THEN' elim_unused_tac ctxt)) i st +in + +fun elim_unused_vars ctxt = Thm o Old_Z3_Proof_Tools.by_tac ctxt (elim_unused_tac ctxt) + +end + + +(* |- (ALL x1 ... xn. ~(x1 = t1 & ... xn = tn) | P x1 ... xn) = P t1 ... tn *) +fun dest_eq_res ctxt = Thm o try_apply ctxt [] [ + named ctxt "fast" (Old_Z3_Proof_Tools.by_tac ctxt (HOL_fast_tac ctxt))] + (* FIXME: not very well tested *) + + +(* |- ~(ALL x1...xn. P x1...xn) | P a1...an *) +local + val rule = @{lemma "~ P x | Q ==> ~(ALL x. P x) | Q" by fast} +in +fun quant_inst ctxt = Thm o Old_Z3_Proof_Tools.by_tac ctxt ( + REPEAT_ALL_NEW (match_tac ctxt [rule]) + THEN' resolve_tac ctxt @{thms excluded_middle}) +end + + +(* |- (EX x. P x) = P c |- ~(ALL x. P x) = ~ P c *) +local + val forall = + Old_SMT_Utils.mk_const_pat @{theory} @{const_name Pure.all} + (Old_SMT_Utils.destT1 o Old_SMT_Utils.destT1) + fun mk_forall cv ct = + Thm.apply (Old_SMT_Utils.instT' cv forall) (Thm.lambda cv ct) + + fun get_vars f mk pred ctxt t = + Term.fold_aterms f t [] + |> map_filter (fn v => + if pred v then SOME (Thm.cterm_of ctxt (mk v)) else NONE) + + fun close vars f ct ctxt = + let + val frees_of = get_vars Term.add_frees Free (member (op =) vars o fst) + val vs = frees_of ctxt (Thm.term_of ct) + val (thm, ctxt') = f (fold_rev mk_forall vs ct) ctxt + val vars_of = get_vars Term.add_vars Var (K true) ctxt' + in + (Thm.instantiate ([], map (dest_Var o Thm.term_of) (vars_of (Thm.prop_of thm)) ~~ vs) thm, + ctxt') + end + + val sk_rules = @{lemma + "c = (SOME x. P x) ==> (EX x. P x) = P c" + "c = (SOME x. ~P x) ==> (~(ALL x. P x)) = (~P c)" + by (metis someI_ex)+} +in + +fun skolemize vars = + apfst Thm oo close vars (yield_singleton Assumption.add_assumes) + +fun discharge_sk_tac ctxt i st = ( + resolve_tac ctxt @{thms trans} i + THEN resolve_tac ctxt sk_rules i + THEN (resolve_tac ctxt @{thms refl} ORELSE' discharge_sk_tac ctxt) (i+1) + THEN resolve_tac ctxt @{thms refl} i) st + +end + + + +(** theory proof rules **) + +(* theory lemmas: linear arithmetic, arrays *) +fun th_lemma ctxt simpset thms = Thm o try_apply ctxt thms [ + Old_Z3_Proof_Tools.by_abstraction 0 (false, true) ctxt thms (fn ctxt' => + Old_Z3_Proof_Tools.by_tac ctxt' ( + NAMED ctxt' "arith" (Arith_Data.arith_tac ctxt') + ORELSE' NAMED ctxt' "simp+arith" ( + Simplifier.asm_full_simp_tac (put_simpset simpset ctxt') + THEN_ALL_NEW Arith_Data.arith_tac ctxt')))] + + +(* rewriting: prove equalities: + * ACI of conjunction/disjunction + * contradiction, excluded middle + * logical rewriting rules (for negation, implication, equivalence, + distinct) + * normal forms for polynoms (integer/real arithmetic) + * quantifier elimination over linear arithmetic + * ... ? **) +local + fun spec_meta_eq_of thm = + (case try (fn th => th RS @{thm spec}) thm of + SOME thm' => spec_meta_eq_of thm' + | NONE => mk_meta_eq thm) + + fun prep (Thm thm) = spec_meta_eq_of thm + | prep (MetaEq thm) = thm + | prep (Literals (thm, _)) = spec_meta_eq_of thm + + fun unfold_conv ctxt ths = + Conv.arg_conv (Conv.binop_conv (Old_Z3_Proof_Tools.unfold_eqs ctxt + (map prep ths))) + + fun with_conv _ [] prv = prv + | with_conv ctxt ths prv = + Old_Z3_Proof_Tools.with_conv (unfold_conv ctxt ths) prv + + val unfold_conv = + Conv.arg_conv (Conv.binop_conv + (Conv.try_conv Old_Z3_Proof_Tools.unfold_distinct_conv)) + val prove_conj_disj_eq = + Old_Z3_Proof_Tools.with_conv unfold_conv Old_Z3_Proof_Literals.prove_conj_disj_eq + + fun declare_hyps ctxt thm = + (thm, snd (Assumption.add_assumes (Thm.chyps_of thm) ctxt)) +in + +val abstraction_depth = 3 + (* + This value was chosen large enough to potentially catch exceptions, + yet small enough to not cause too much harm. The value might be + increased in the future, if reconstructing 'rewrite' fails on problems + that get too much abstracted to be reconstructable. + *) + +fun rewrite simpset ths ct ctxt = + apfst Thm (declare_hyps ctxt (with_conv ctxt ths (try_apply ctxt [] [ + named ctxt "conj/disj/distinct" prove_conj_disj_eq, + named ctxt "pull-ite" Old_Z3_Proof_Methods.prove_ite ctxt, + Old_Z3_Proof_Tools.by_abstraction 0 (true, false) ctxt [] (fn ctxt' => + Old_Z3_Proof_Tools.by_tac ctxt' ( + NAMED ctxt' "simp (logic)" (Simplifier.simp_tac (put_simpset simpset ctxt')) + THEN_ALL_NEW NAMED ctxt' "fast (logic)" (fast_tac ctxt'))), + Old_Z3_Proof_Tools.by_abstraction 0 (false, true) ctxt [] (fn ctxt' => + Old_Z3_Proof_Tools.by_tac ctxt' ( + (resolve_tac ctxt' @{thms iff_allI} ORELSE' K all_tac) + THEN' NAMED ctxt' "simp (theory)" (Simplifier.simp_tac (put_simpset simpset ctxt')) + THEN_ALL_NEW ( + NAMED ctxt' "fast (theory)" (HOL_fast_tac ctxt') + ORELSE' NAMED ctxt' "arith (theory)" (Arith_Data.arith_tac ctxt')))), + Old_Z3_Proof_Tools.by_abstraction 0 (true, true) ctxt [] (fn ctxt' => + Old_Z3_Proof_Tools.by_tac ctxt' ( + (resolve_tac ctxt' @{thms iff_allI} ORELSE' K all_tac) + THEN' NAMED ctxt' "simp (full)" (Simplifier.simp_tac (put_simpset simpset ctxt')) + THEN_ALL_NEW ( + NAMED ctxt' "fast (full)" (HOL_fast_tac ctxt') + ORELSE' NAMED ctxt' "arith (full)" (Arith_Data.arith_tac ctxt')))), + named ctxt "injectivity" (Old_Z3_Proof_Methods.prove_injectivity ctxt), + Old_Z3_Proof_Tools.by_abstraction abstraction_depth (true, true) ctxt [] + (fn ctxt' => + Old_Z3_Proof_Tools.by_tac ctxt' ( + (resolve_tac ctxt' @{thms iff_allI} ORELSE' K all_tac) + THEN' NAMED ctxt' "simp (deepen)" (Simplifier.simp_tac (put_simpset simpset ctxt')) + THEN_ALL_NEW ( + NAMED ctxt' "fast (deepen)" (HOL_fast_tac ctxt') + ORELSE' NAMED ctxt' "arith (deepen)" (Arith_Data.arith_tac + ctxt'))))]) ct)) + +end + + + +(* proof reconstruction *) + +(** tracing and checking **) + +fun trace_before ctxt idx = Old_SMT_Config.trace_msg ctxt (fn r => + "Z3: #" ^ string_of_int idx ^ ": " ^ Old_Z3_Proof_Parser.string_of_rule r) + +fun check_after idx r ps ct (p, (ctxt, _)) = + if not (Config.get ctxt Old_SMT_Config.trace) then () + else + let val thm = thm_of p |> tap (Thm.join_proofs o single) + in + if (Thm.cprop_of thm) aconvc ct then () + else + z3_exn (Pretty.string_of (Pretty.big_list + ("proof step failed: " ^ quote (Old_Z3_Proof_Parser.string_of_rule r) ^ + " (#" ^ string_of_int idx ^ ")") + (pretty_goal ctxt (map (thm_of o fst) ps) (Thm.prop_of thm) @ + [Pretty.block [Pretty.str "expected: ", + Syntax.pretty_term ctxt (Thm.term_of ct)]]))) + end + + +(** overall reconstruction procedure **) + +local + fun not_supported r = raise Fail ("Z3: proof rule not implemented: " ^ + quote (Old_Z3_Proof_Parser.string_of_rule r)) + + fun prove_step simpset vars r ps ct (cxp as (cx, ptab)) = + (case (r, ps) of + (* core rules *) + (Old_Z3_Proof_Parser.True_Axiom, _) => (Thm Old_Z3_Proof_Literals.true_thm, cxp) + | (Old_Z3_Proof_Parser.Asserted, _) => raise Fail "bad assertion" + | (Old_Z3_Proof_Parser.Goal, _) => raise Fail "bad assertion" + | (Old_Z3_Proof_Parser.Modus_Ponens, [(p, _), (q, _)]) => + (mp q (thm_of p), cxp) + | (Old_Z3_Proof_Parser.Modus_Ponens_Oeq, [(p, _), (q, _)]) => + (mp q (thm_of p), cxp) + | (Old_Z3_Proof_Parser.And_Elim, [(p, i)]) => + and_elim (p, i) ct ptab ||> pair cx + | (Old_Z3_Proof_Parser.Not_Or_Elim, [(p, i)]) => + not_or_elim (p, i) ct ptab ||> pair cx + | (Old_Z3_Proof_Parser.Hypothesis, _) => (Thm (Thm.assume ct), cxp) + | (Old_Z3_Proof_Parser.Lemma, [(p, _)]) => (lemma (thm_of p) ct, cxp) + | (Old_Z3_Proof_Parser.Unit_Resolution, (p, _) :: ps) => + (unit_resolution (thm_of p) (map (thm_of o fst) ps) ct, cxp) + | (Old_Z3_Proof_Parser.Iff_True, [(p, _)]) => (iff_true (thm_of p), cxp) + | (Old_Z3_Proof_Parser.Iff_False, [(p, _)]) => (iff_false (thm_of p), cxp) + | (Old_Z3_Proof_Parser.Distributivity, _) => (distributivity cx ct, cxp) + | (Old_Z3_Proof_Parser.Def_Axiom, _) => (def_axiom cx ct, cxp) + | (Old_Z3_Proof_Parser.Intro_Def, _) => intro_def ct cx ||> rpair ptab + | (Old_Z3_Proof_Parser.Apply_Def, [(p, _)]) => (apply_def (thm_of p), cxp) + | (Old_Z3_Proof_Parser.Iff_Oeq, [(p, _)]) => (p, cxp) + | (Old_Z3_Proof_Parser.Nnf_Pos, _) => (nnf cx vars (map fst ps) ct, cxp) + | (Old_Z3_Proof_Parser.Nnf_Neg, _) => (nnf cx vars (map fst ps) ct, cxp) + + (* equality rules *) + | (Old_Z3_Proof_Parser.Reflexivity, _) => (refl ct, cxp) + | (Old_Z3_Proof_Parser.Symmetry, [(p, _)]) => (symm p, cxp) + | (Old_Z3_Proof_Parser.Transitivity, [(p, _), (q, _)]) => (trans p q, cxp) + | (Old_Z3_Proof_Parser.Monotonicity, _) => (monotonicity (map fst ps) ct, cxp) + | (Old_Z3_Proof_Parser.Commutativity, _) => (commutativity ct, cxp) + + (* quantifier rules *) + | (Old_Z3_Proof_Parser.Quant_Intro, [(p, _)]) => (quant_intro cx vars p ct, cxp) + | (Old_Z3_Proof_Parser.Pull_Quant, _) => (pull_quant cx ct, cxp) + | (Old_Z3_Proof_Parser.Push_Quant, _) => (push_quant cx ct, cxp) + | (Old_Z3_Proof_Parser.Elim_Unused_Vars, _) => (elim_unused_vars cx ct, cxp) + | (Old_Z3_Proof_Parser.Dest_Eq_Res, _) => (dest_eq_res cx ct, cxp) + | (Old_Z3_Proof_Parser.Quant_Inst, _) => (quant_inst cx ct, cxp) + | (Old_Z3_Proof_Parser.Skolemize, _) => skolemize vars ct cx ||> rpair ptab + + (* theory rules *) + | (Old_Z3_Proof_Parser.Th_Lemma _, _) => (* FIXME: use arguments *) + (th_lemma cx simpset (map (thm_of o fst) ps) ct, cxp) + | (Old_Z3_Proof_Parser.Rewrite, _) => rewrite simpset [] ct cx ||> rpair ptab + | (Old_Z3_Proof_Parser.Rewrite_Star, ps) => + rewrite simpset (map fst ps) ct cx ||> rpair ptab + + | (Old_Z3_Proof_Parser.Nnf_Star, _) => not_supported r + | (Old_Z3_Proof_Parser.Cnf_Star, _) => not_supported r + | (Old_Z3_Proof_Parser.Transitivity_Star, _) => not_supported r + | (Old_Z3_Proof_Parser.Pull_Quant_Star, _) => not_supported r + + | _ => raise Fail ("Z3: proof rule " ^ + quote (Old_Z3_Proof_Parser.string_of_rule r) ^ + " has an unexpected number of arguments.")) + + fun lookup_proof ptab idx = + (case Inttab.lookup ptab idx of + SOME p => (p, idx) + | NONE => z3_exn ("unknown proof id: " ^ quote (string_of_int idx))) + + fun prove simpset vars (idx, step) (_, cxp as (ctxt, ptab)) = + let + val Old_Z3_Proof_Parser.Proof_Step {rule=r, prems, prop, ...} = step + val ps = map (lookup_proof ptab) prems + val _ = trace_before ctxt idx r + val (thm, (ctxt', ptab')) = + cxp + |> prove_step simpset vars r ps prop + |> tap (check_after idx r ps prop) + in (thm, (ctxt', Inttab.update (idx, thm) ptab')) end + + fun make_discharge_rules rules = rules @ [@{thm allI}, @{thm refl}, + @{thm reflexive}, Old_Z3_Proof_Literals.true_thm] + + fun discharge_assms_tac ctxt rules = + REPEAT (HEADGOAL (resolve_tac ctxt rules ORELSE' SOLVED' (discharge_sk_tac ctxt))) + + fun discharge_assms ctxt rules thm = + if Thm.nprems_of thm = 0 then Goal.norm_result ctxt thm + else + (case Seq.pull (discharge_assms_tac ctxt rules thm) of + SOME (thm', _) => Goal.norm_result ctxt thm' + | NONE => raise THM ("failed to discharge premise", 1, [thm])) + + fun discharge rules outer_ctxt (p, (inner_ctxt, _)) = + thm_of p + |> singleton (Proof_Context.export inner_ctxt outer_ctxt) + |> discharge_assms outer_ctxt (make_discharge_rules rules) +in + +fun reconstruct outer_ctxt recon output = + let + val {context=ctxt, typs, terms, rewrite_rules, assms} = recon + val (asserted, steps, vars, ctxt1) = + Old_Z3_Proof_Parser.parse ctxt typs terms output + + val simpset = + Old_Z3_Proof_Tools.make_simpset ctxt1 (Named_Theorems.get ctxt1 @{named_theorems old_z3_simp}) + + val ((is, rules), cxp as (ctxt2, _)) = + add_asserted outer_ctxt rewrite_rules assms asserted ctxt1 + in + if Config.get ctxt2 Old_SMT_Config.filter_only_facts then (is, @{thm TrueI}) + else + (Thm @{thm TrueI}, cxp) + |> fold (prove simpset vars) steps + |> discharge rules outer_ctxt + |> pair [] + end + +end + +end diff --git a/src/test-gen/src/main/Old_SMT/old_z3_proof_tools.ML b/src/test-gen/src/main/Old_SMT/old_z3_proof_tools.ML new file mode 100644 index 0000000..8fc65ba --- /dev/null +++ b/src/test-gen/src/main/Old_SMT/old_z3_proof_tools.ML @@ -0,0 +1,374 @@ +(* Title: HOL/Library/Old_SMT/old_z3_proof_tools.ML + Author: Sascha Boehme, TU Muenchen + +Helper functions required for Z3 proof reconstruction. +*) + +signature OLD_Z3_PROOF_TOOLS = +sig + (*modifying terms*) + val as_meta_eq: cterm -> cterm + + (*theorem nets*) + val thm_net_of: ('a -> thm) -> 'a list -> 'a Net.net + val net_instances: (int * thm) Net.net -> cterm -> (int * thm) list + val net_instance: thm Net.net -> cterm -> thm option + + (*proof combinators*) + val under_assumption: (thm -> thm) -> cterm -> thm + val with_conv: conv -> (cterm -> thm) -> cterm -> thm + val discharge: thm -> thm -> thm + val varify: string list -> thm -> thm + val unfold_eqs: Proof.context -> thm list -> conv + val match_instantiate: (cterm -> cterm) -> cterm -> thm -> thm + val by_tac: Proof.context -> (int -> tactic) -> cterm -> thm + val make_hyp_def: thm -> Proof.context -> thm * Proof.context + val by_abstraction: int -> bool * bool -> Proof.context -> thm list -> + (Proof.context -> cterm -> thm) -> cterm -> thm + + (*a faster COMP*) + type compose_data = cterm list * (cterm -> cterm list) * thm + val precompose: (cterm -> cterm list) -> thm -> compose_data + val precompose2: (cterm -> cterm * cterm) -> thm -> compose_data + val compose: compose_data -> thm -> thm + + (*unfolding of 'distinct'*) + val unfold_distinct_conv: conv + + (*simpset*) + val add_simproc: Simplifier.simproc -> Context.generic -> Context.generic + val make_simpset: Proof.context -> thm list -> simpset +end + +structure Old_Z3_Proof_Tools: OLD_Z3_PROOF_TOOLS = +struct + + + +(* modifying terms *) + +fun as_meta_eq ct = + uncurry Old_SMT_Utils.mk_cequals (Thm.dest_binop (Old_SMT_Utils.dest_cprop ct)) + + + +(* theorem nets *) + +fun thm_net_of f xthms = + let fun insert xthm = Net.insert_term (K false) (Thm.prop_of (f xthm), xthm) + in fold insert xthms Net.empty end + +fun maybe_instantiate ct thm = + try Thm.first_order_match (Thm.cprop_of thm, ct) + |> Option.map (fn inst => Thm.instantiate inst thm) + +local + fun instances_from_net match f net ct = + let + val lookup = if match then Net.match_term else Net.unify_term + val xthms = lookup net (Thm.term_of ct) + fun select ct = map_filter (f (maybe_instantiate ct)) xthms + fun select' ct = + let val thm = Thm.trivial ct + in map_filter (f (try (fn rule => rule COMP thm))) xthms end + in (case select ct of [] => select' ct | xthms' => xthms') end +in + +fun net_instances net = + instances_from_net false (fn f => fn (i, thm) => Option.map (pair i) (f thm)) + net + +fun net_instance net = try hd o instances_from_net true I net + +end + + + +(* proof combinators *) + +fun under_assumption f ct = + let val ct' = Old_SMT_Utils.mk_cprop ct + in Thm.implies_intr ct' (f (Thm.assume ct')) end + +fun with_conv conv prove ct = + let val eq = Thm.symmetric (conv ct) + in Thm.equal_elim eq (prove (Thm.lhs_of eq)) end + +fun discharge p pq = Thm.implies_elim pq p + +fun varify vars = Drule.generalize ([], vars) + +fun unfold_eqs _ [] = Conv.all_conv + | unfold_eqs ctxt eqs = + Conv.top_sweep_conv (K (Conv.rewrs_conv eqs)) ctxt + +fun match_instantiate f ct thm = + Thm.instantiate (Thm.match (f (Thm.cprop_of thm), ct)) thm + +fun by_tac ctxt tac ct = Goal.norm_result ctxt (Goal.prove_internal ctxt [] ct (K (tac 1))) + +(* + |- c x == t x ==> P (c x) + --------------------------- + c == t |- P (c x) +*) +fun make_hyp_def thm ctxt = + let + val (lhs, rhs) = Thm.dest_binop (Thm.cprem_of thm 1) + val (cf, cvs) = Drule.strip_comb lhs + val eq = Old_SMT_Utils.mk_cequals cf (fold_rev Thm.lambda cvs rhs) + fun apply cv th = + Thm.combination th (Thm.reflexive cv) + |> Conv.fconv_rule (Conv.arg_conv (Thm.beta_conversion false)) + in + yield_singleton Assumption.add_assumes eq ctxt + |>> Thm.implies_elim thm o fold apply cvs + end + + + +(* abstraction *) + +local + +fun abs_context ctxt = (ctxt, Termtab.empty, 1, false) + +fun context_of (ctxt, _, _, _) = ctxt + +fun replace (_, (cv, ct)) = Thm.forall_elim ct o Thm.forall_intr cv + +fun abs_instantiate (_, tab, _, beta_norm) = + fold replace (Termtab.dest tab) #> + beta_norm ? Conv.fconv_rule (Thm.beta_conversion true) + +fun lambda_abstract cvs t = + let + val frees = map Free (Term.add_frees t []) + val cvs' = filter (fn cv => member (op aconv) frees (Thm.term_of cv)) cvs + val vs = map (Term.dest_Free o Thm.term_of) cvs' + in (fold_rev absfree vs t, cvs') end + +fun fresh_abstraction (_, cvs) ct (cx as (ctxt, tab, idx, beta_norm)) = + let val (t, cvs') = lambda_abstract cvs (Thm.term_of ct) + in + (case Termtab.lookup tab t of + SOME (cv, _) => (Drule.list_comb (cv, cvs'), cx) + | NONE => + let + val (n, ctxt') = yield_singleton Variable.variant_fixes "x" ctxt + val cv = Thm.cterm_of ctxt' + (Free (n, map Thm.typ_of_cterm cvs' ---> Thm.typ_of_cterm ct)) + val cu = Drule.list_comb (cv, cvs') + val e = (t, (cv, fold_rev Thm.lambda cvs' ct)) + val beta_norm' = beta_norm orelse not (null cvs') + in (cu, (ctxt', Termtab.update e tab, idx + 1, beta_norm')) end) + end + +fun abs_comb f g dcvs ct = + let val (cf, cu) = Thm.dest_comb ct + in f dcvs cf ##>> g dcvs cu #>> uncurry Thm.apply end + +fun abs_arg f = abs_comb (K pair) f + +fun abs_args f dcvs ct = + (case Thm.term_of ct of + _ $ _ => abs_comb (abs_args f) f dcvs ct + | _ => pair ct) + +fun abs_list f g dcvs ct = + (case Thm.term_of ct of + Const (@{const_name Nil}, _) => pair ct + | Const (@{const_name Cons}, _) $ _ $ _ => + abs_comb (abs_arg f) (abs_list f g) dcvs ct + | _ => g dcvs ct) + +fun abs_abs f (depth, cvs) ct = + let val (cv, cu) = Thm.dest_abs NONE ct + in f (depth, cv :: cvs) cu #>> Thm.lambda cv end + +val is_atomic = + (fn Free _ => true | Var _ => true | Bound _ => true | _ => false) + +fun abstract depth (ext_logic, with_theories) = + let + fun abstr1 cvs ct = abs_arg abstr cvs ct + and abstr2 cvs ct = abs_comb abstr1 abstr cvs ct + and abstr3 cvs ct = abs_comb abstr2 abstr cvs ct + and abstr_abs cvs ct = abs_arg (abs_abs abstr) cvs ct + + and abstr (dcvs as (d, cvs)) ct = + (case Thm.term_of ct of + @{const Trueprop} $ _ => abstr1 dcvs ct + | @{const Pure.imp} $ _ $ _ => abstr2 dcvs ct + | @{const True} => pair ct + | @{const False} => pair ct + | @{const Not} $ _ => abstr1 dcvs ct + | @{const HOL.conj} $ _ $ _ => abstr2 dcvs ct + | @{const HOL.disj} $ _ $ _ => abstr2 dcvs ct + | @{const HOL.implies} $ _ $ _ => abstr2 dcvs ct + | Const (@{const_name HOL.eq}, _) $ _ $ _ => abstr2 dcvs ct + | Const (@{const_name distinct}, _) $ _ => + if ext_logic then abs_arg (abs_list abstr fresh_abstraction) dcvs ct + else fresh_abstraction dcvs ct + | Const (@{const_name If}, _) $ _ $ _ $ _ => + if ext_logic then abstr3 dcvs ct else fresh_abstraction dcvs ct + | Const (@{const_name All}, _) $ _ => + if ext_logic then abstr_abs dcvs ct else fresh_abstraction dcvs ct + | Const (@{const_name Ex}, _) $ _ => + if ext_logic then abstr_abs dcvs ct else fresh_abstraction dcvs ct + | t => (fn cx => + if is_atomic t orelse can HOLogic.dest_number t then (ct, cx) + else if with_theories andalso + Old_Z3_Interface.is_builtin_theory_term (context_of cx) t + then abs_args abstr dcvs ct cx + else if d = 0 then fresh_abstraction dcvs ct cx + else + (case Term.strip_comb t of + (Const _, _) => abs_args abstr (d-1, cvs) ct cx + | (Free _, _) => abs_args abstr (d-1, cvs) ct cx + | _ => fresh_abstraction dcvs ct cx))) + in abstr (depth, []) end + +val cimp = Thm.cterm_of @{context} @{const Pure.imp} + +fun deepen depth f x = + if depth = 0 then f depth x + else (case try (f depth) x of SOME y => y | NONE => deepen (depth - 1) f x) + +fun with_prems depth thms f ct = + fold_rev (Thm.mk_binop cimp o Thm.cprop_of) thms ct + |> deepen depth f + |> fold (fn prem => fn th => Thm.implies_elim th prem) thms + +in + +fun by_abstraction depth mode ctxt thms prove = + with_prems depth thms (fn d => fn ct => + let val (cu, cx) = abstract d mode ct (abs_context ctxt) + in abs_instantiate cx (prove (context_of cx) cu) end) + +end + + + +(* a faster COMP *) + +type compose_data = cterm list * (cterm -> cterm list) * thm + +fun list2 (x, y) = [x, y] + +fun precompose f rule : compose_data = (f (Thm.cprem_of rule 1), f, rule) +fun precompose2 f rule : compose_data = precompose (list2 o f) rule + +fun compose (cvs, f, rule) thm = + discharge thm (Thm.instantiate ([], map (dest_Var o Thm.term_of) cvs ~~ f (Thm.cprop_of thm)) rule) + + + +(* unfolding of 'distinct' *) + +local + val set1 = @{lemma "x ~: set [] == ~False" by simp} + val set2 = @{lemma "x ~: set [x] == False" by simp} + val set3 = @{lemma "x ~: set [y] == x ~= y" by simp} + val set4 = @{lemma "x ~: set (x # ys) == False" by simp} + val set5 = @{lemma "x ~: set (y # ys) == x ~= y & x ~: set ys" by simp} + + fun set_conv ct = + (Conv.rewrs_conv [set1, set2, set3, set4] else_conv + (Conv.rewr_conv set5 then_conv Conv.arg_conv set_conv)) ct + + val dist1 = @{lemma "distinct [] == ~False" by (simp add: distinct_def)} + val dist2 = @{lemma "distinct [x] == ~False" by (simp add: distinct_def)} + val dist3 = @{lemma "distinct (x # xs) == x ~: set xs & distinct xs" + by (simp add: distinct_def)} + + fun binop_conv cv1 cv2 = Conv.combination_conv (Conv.arg_conv cv1) cv2 +in +fun unfold_distinct_conv ct = + (Conv.rewrs_conv [dist1, dist2] else_conv + (Conv.rewr_conv dist3 then_conv binop_conv set_conv unfold_distinct_conv)) ct +end + + + +(* simpset *) + +local + val antisym_le1 = mk_meta_eq @{thm order_class.antisym_conv} + val antisym_le2 = mk_meta_eq @{thm linorder_class.antisym_conv2} + val antisym_less1 = mk_meta_eq @{thm linorder_class.antisym_conv1} + val antisym_less2 = mk_meta_eq @{thm linorder_class.antisym_conv3} + + fun eq_prop t thm = HOLogic.mk_Trueprop t aconv Thm.prop_of thm + fun dest_binop ((c as Const _) $ t $ u) = (c, t, u) + | dest_binop t = raise TERM ("dest_binop", [t]) + + fun prove_antisym_le ctxt ct = + let + val (le, r, s) = dest_binop (Thm.term_of ct) + val less = Const (@{const_name less}, Term.fastype_of le) + val prems = Simplifier.prems_of ctxt + in + (case find_first (eq_prop (le $ s $ r)) prems of + NONE => + find_first (eq_prop (HOLogic.mk_not (less $ r $ s))) prems + |> Option.map (fn thm => thm RS antisym_less1) + | SOME thm => SOME (thm RS antisym_le1)) + end + handle THM _ => NONE + + fun prove_antisym_less ctxt ct = + let + val (less, r, s) = dest_binop (HOLogic.dest_not (Thm.term_of ct)) + val le = Const (@{const_name less_eq}, Term.fastype_of less) + val prems = Simplifier.prems_of ctxt + in + (case find_first (eq_prop (le $ r $ s)) prems of + NONE => + find_first (eq_prop (HOLogic.mk_not (less $ s $ r))) prems + |> Option.map (fn thm => thm RS antisym_less2) + | SOME thm => SOME (thm RS antisym_le2)) + end + handle THM _ => NONE + + val basic_simpset = + simpset_of (put_simpset HOL_ss @{context} + addsimps @{thms field_simps} + addsimps [@{thm times_divide_eq_right}, @{thm times_divide_eq_left}] + addsimps @{thms arith_special} addsimps @{thms arith_simps} + addsimps @{thms rel_simps} + addsimps @{thms array_rules} + addsimps @{thms term_true_def} addsimps @{thms term_false_def} + addsimps @{thms z3div_def} addsimps @{thms z3mod_def} + addsimprocs [@{simproc numeral_divmod}] + addsimprocs [ + Simplifier.make_simproc @{context} "fast_int_arith" + {lhss = [@{term "(m::int) < n"}, @{term "(m::int) \ n"}, @{term "(m::int) = n"}], + proc = K Lin_Arith.simproc}, + Simplifier.make_simproc @{context} "antisym_le" + {lhss = [@{term "(x::'a::order) \ y"}], + proc = K prove_antisym_le}, + Simplifier.make_simproc @{context} "antisym_less" + {lhss = [@{term "\ (x::'a::linorder) < y"}], + proc = K prove_antisym_less}]) + + structure Simpset = Generic_Data + ( + type T = simpset + val empty = basic_simpset + val extend = I + val merge = Simplifier.merge_ss + ) +in + +fun add_simproc simproc context = + Simpset.map (simpset_map (Context.proof_of context) + (fn ctxt => ctxt addsimprocs [simproc])) context + +fun make_simpset ctxt rules = + simpset_of (put_simpset (Simpset.get (Context.Proof ctxt)) ctxt addsimps rules) + +end + +end diff --git a/src/test-gen/src/main/QuickCheckBackend.thy b/src/test-gen/src/main/QuickCheckBackend.thy new file mode 100644 index 0000000..4fb6091 --- /dev/null +++ b/src/test-gen/src/main/QuickCheckBackend.thy @@ -0,0 +1,113 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * TestGen.thy --- the core of the HOL-TestGen System. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2010 ETH Zurich, Switzerland + * 2008-2013 Achim D. Brucker, Germany + * 2009-2013 Universite Paris-Sud, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* The QuickCheck backend *} + +theory QuickCheckBackend +imports + HOL.HOL + HOL.Int + HOL.List + TestEnv + BackendUtils + +begin + + +ML{* + +structure QuickCheckBackend = +struct + +fun list_abs_var t [] = t + | list_abs_var t ((x as Var(_, T))::vars) = Abs(Name.internal Name.uu, T, abstract_over(x, list_abs_var t vars)) + +fun iterate f 0 = NONE + | iterate f k = case f () handle Match => NONE + of NONE => iterate f (k - 1) | SOME q => SOME q; + +fun quickcheck_tac' ctxt iters n thm = + let + val size = 15 + val thy = Proof_Context.theory_of ctxt + val vars = BackendUtils.premvars n thm + val prem = Logic.nth_prem(n, Thm.prop_of thm) + val neg = @{term Not} $ (HOLogic.dest_Trueprop prem) + val neg' = list_abs_var neg vars + + (* TODO: use new code generation + + val tester = Codegen.test_term ctxt n + + *) + fun tester x = NONE + fun with_tester k = iterate (fn () => tester k) iters + fun with_size k = if k > size then NONE + else + (case with_tester k + of NONE => with_size (k + 1) + | SOME q => SOME q); + in case with_size 1 of + SOME insts => let + val instantiated = Drule.instantiate_normalize + ([], BackendUtils.certify_pairs ctxt (vars ~~ insts)) thm + in + full_simp_tac ctxt n instantiated + end + | NONE => Seq.empty + end + +fun quickcheck_tac ctxt iters n thm = let + val tac = Object_Logic.full_atomize_tac ctxt THEN' (quickcheck_tac' ctxt iters) +in + (case (Seq.pull (tac n thm)) of + SOME (x, xq) => Seq.single x + | NONE => Seq.empty) + handle ERROR _ => Seq.empty (* Catch "unable to generate code" exceptions *) +end + +end + +*} + +end diff --git a/src/test-gen/src/main/RandomBackend.thy b/src/test-gen/src/main/RandomBackend.thy new file mode 100644 index 0000000..6cb0f4f --- /dev/null +++ b/src/test-gen/src/main/RandomBackend.thy @@ -0,0 +1,243 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * TestGen.thy --- the core of the HOL-TestGen System. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2010 ETH Zurich, Switzerland + * 2008-2013 Achim D. Brucker, Germany + * 2009-2013 Universite Paris-Sud, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* The random solver *} + +theory RandomBackend +imports + HOL.HOL + HOL.Int + HOL.List + TestEnv + BackendUtils + +begin + + +ML{* + +structure RandomBackend = +struct + +open HOLogic + +(* Backported from Isabelle2011-1 module Pure/Library.ML *) +fun frequency xs = + let + val sum = Library.foldl op + (0, map fst xs); + fun pick n ((k: int, x) :: xs) = if n <= k then x else pick (n - k) xs + in pick (Random.random_range 1 sum) xs end; + + +val (trace, trace_setup) = Attrib.config_bool @{binding testgen_trace} (K false); + +fun calc_constr_list tgt descr = +let val recTs = Old_Datatype_Aux.get_rec_types descr; + val newTs = Library.take (length descr) recTs; + val (_,(_,insts,tgt_constrs)) = hd(filter (fn (_,(n,_,_)) => n = tgt) descr) + val T = hd(filter (fn (Type(n,_)) => n = tgt) newTs) + val typ_of_dtyp = Old_Datatype_Aux.typ_of_dtyp descr + val constr_decls = map (fn (cname, cargs) => + Const(cname, map typ_of_dtyp cargs ---> T)) + (tgt_constrs) +in (map Old_Datatype_Aux.dest_DtTFree insts, constr_decls) end; + + +(* Getting the information associated to an extended record type name *) +fun is_record thy s = + let + fun remove_prefix [] = [] + | remove_prefix (c::cs) = if c = #"." then cs else remove_prefix cs + fun remove_suffix s = String.implode (List.rev (remove_prefix (List.rev (String.explode s)))) + in + if String.isSuffix "_ext" s then + Record.get_info thy (remove_suffix s) + else + NONE + end + + +(* Random value generator for user-defined records + Note: it does not work for extended records *) +fun random_record w n max ctxt cod_term_tab i = + let + (* Generating random values for the fields *) + val fields = #fields(i) + val random_fields = List.map (fn (_,ty) => random_term' w (n+1) max ctxt cod_term_tab ty) fields + + (* Getting the record maker. Another way would be to generate a Const whose name is the same + as the name of the type *) + fun head (a $ _) = head a + | head t = t + val (_ $ app_make $ _) = Thm.concl_of (hd (#defs(i))) + val make = head app_make + + (* Building the record *) + val res = List.foldl (fn (f,h) => h $ f) make random_fields + in + res + end + +(* Random value generator for user-defined data-types *) +and random_datatype w n max ctxt cod_term_tab s S i = + let + val descr = #descr(i) + + val (insts,constrs) = calc_constr_list s descr + + val weighed_constrs = + let + fun ar args = (length (filter (fn t => + case t of + Old_Datatype_Aux.DtRec _ => true + | _ => false ) args) ) + val constr_arity_list = map (fn (f,args) => (f,(ar args))) + (maps (#3 o snd) descr) + in + map (fn (f,a) => if a = 0 then (1,f) else (a * w,f)) + constr_arity_list + end + + val weighed_constrs = if (n >= max) + then filter (fn (w,_) => w =1) weighed_constrs + else weighed_constrs + fun weight_of t = fst(hd ((filter (fn (_,ty) => ty=t)) weighed_constrs)) + + fun frequency' xs = + let + val max = List.foldl Int.max 0 (map fst xs); + val xs' = map (fn (x,a) => (max-x+1,a)) xs + in + frequency xs' + end + + (* the default is a random bias towards constants *) + val constr = frequency' weighed_constrs + val Const(h,ty) = hd (filter (fn Const(h,ty) => h = constr) constrs) + val w = weight_of h + val ty_binds = insts ~~ S + fun ty_inst s = the (AList.lookup (op =) ty_binds s) + val instantiated_ty = map_type_tfree ty_inst ty + val const_head = Const(h,instantiated_ty) + val arg_ty = binder_types instantiated_ty + in list_comb(const_head, + (map(random_term' w (n+1) max ctxt cod_term_tab)(arg_ty))) + end + +(* Random value generator for various types *) +and random_term' w n max ctxt cod_term_tab (Type(s,S)) = +(* w => the weight on the actual level, initial value 1 + n => level counter, inital value 0 + max => maximal allowed number of levels +*) + let val thy = Proof_Context.theory_of ctxt + in + (case Symtab.lookup cod_term_tab s of + NONE => (* default random term generator ... + TODO : should do also something for functions ... *) + (case Type(s,S) of + Type(@{type_name int},_) => mk_number intT (IntInf.fromInt((Random.random_range 0 20) - 10)) + | Type(@{type_name nat},_) => mk_nat (IntInf.fromInt((Random.random_range 0 40))) + | Type(@{type_name set},_) => Const(@{const_name set},dummyT) $ + (random_term' w n max ctxt cod_term_tab (Type(@{type_name list},S))) + | Type(@{type_name fun},[T, Type(@{type_name bool}, [])]) => + Const(@{const_name set}, Type(@{type_name fun}, [Type(@{type_name list}, [T]), Type(s,S)])) $ + (random_term' w n max ctxt cod_term_tab (Type(@{type_name list},[T]))) + | Type(@{type_name fun},[T,U]) => absdummy T (random_term' w n max ctxt cod_term_tab U) + | _ => + (case is_record thy s of + (* The type is a user-defined record *) + SOME i => random_record w n max ctxt cod_term_tab i + | NONE => + (case BNF_LFP_Compat.get_info thy [] s of + (* The type is a user-defined data-type *) + SOME i => random_datatype w n max ctxt cod_term_tab s S i + | NONE => error("Cannot generate random value for type:" ^s^"\nCan only generate random values for int, nat, set, fun, and user-defined records and datatypes") + ) + ) + ) + | SOME R => R S) + end + |random_term' _ _ _ _ _ _ = error "Internal error in random_term: type not ground"; + + +fun random_term thy cod_term_tab typ = random_term' 1 0 10000 thy cod_term_tab typ +(* test section: + +val ttt = [HOLogic.intT,HOLogic.unitT,HOLogic.boolT, + HOLogic.mk_setT HOLogic.intT, + HOLogic.listT HOLogic.intT]; +map (random_term Symtab.empty) ttt; + + *) + +fun random_insts ctxt cod_tab vars () = + map(fn(x as Var(s,t))=> + (Thm.cterm_of ctxt x,Thm.cterm_of ctxt (random_term ctxt cod_tab t))) vars + +fun single_rand_inst_tac ctxt vars thm = let + val te = TestEnv.get_testenv ctxt + val cod_tab = #cod_term_tab(TestEnv.rep_testenv te) + val to_var_index = (fn Var(s,t) => (s,t)) o Thm.term_of + val insts = map (fn(x,y)=> (to_var_index x,y)) (random_insts ctxt cod_tab vars ()) +in + Seq.single (Drule.instantiate_normalize ([], insts) thm) +end + +fun random_inst_tac ctxt iters n thm = let + val _ = if iters > 0 andalso Config.get ctxt trace then tracing ("Random solving subgoal "^Int.toString(n)) else () + val single_tac = (* print_tac "A" THEN *) + (single_rand_inst_tac ctxt (BackendUtils.premvars n thm)) THEN + (* print_tac "B" THEN *) + (BackendUtils.solve_by_simp_tac ctxt n) +in + (FIRST (replicate iters single_tac)) thm +end + +end + +*} + + + +end diff --git a/src/test-gen/src/main/SMT/z3_replay_util.ML b/src/test-gen/src/main/SMT/z3_replay_util.ML new file mode 100644 index 0000000..34419ec --- /dev/null +++ b/src/test-gen/src/main/SMT/z3_replay_util.ML @@ -0,0 +1,155 @@ +(* Title: HOL/Tools/SMT/z3_replay_util.ML + Author: Sascha Boehme, TU Muenchen + +Helper functions required for Z3 proof replay. +*) + +signature Z3_REPLAY_UTIL = +sig + (*theorem nets*) + val thm_net_of: ('a -> thm) -> 'a list -> 'a Net.net + val net_instances: (int * thm) Net.net -> cterm -> (int * thm) list + + (*proof combinators*) + val under_assumption: (thm -> thm) -> cterm -> thm + val discharge: thm -> thm -> thm + + (*a faster COMP*) + type compose_data = cterm list * (cterm -> cterm list) * thm + val precompose: (cterm -> cterm list) -> thm -> compose_data + val precompose2: (cterm -> cterm * cterm) -> thm -> compose_data + val compose: compose_data -> thm -> thm + + (*simpset*) + val add_simproc: Simplifier.simproc -> Context.generic -> Context.generic + val make_simpset: Proof.context -> thm list -> simpset +end; + +structure Z3_Replay_Util: Z3_REPLAY_UTIL = +struct + +(* theorem nets *) + +fun thm_net_of f xthms = + let fun insert xthm = Net.insert_term (K false) (Thm.prop_of (f xthm), xthm) + in fold insert xthms Net.empty end + +fun maybe_instantiate ct thm = + try Thm.first_order_match (Thm.cprop_of thm, ct) + |> Option.map (fn inst => Thm.instantiate inst thm) + +local + fun instances_from_net match f net ct = + let + val lookup = if match then Net.match_term else Net.unify_term + val xthms = lookup net (Thm.term_of ct) + fun select ct = map_filter (f (maybe_instantiate ct)) xthms + fun select' ct = + let val thm = Thm.trivial ct + in map_filter (f (try (fn rule => rule COMP thm))) xthms end + in (case select ct of [] => select' ct | xthms' => xthms') end +in + +fun net_instances net = + instances_from_net false (fn f => fn (i, thm) => Option.map (pair i) (f thm)) + net + +end + + +(* proof combinators *) + +fun under_assumption f ct = + let val ct' = SMT_Util.mk_cprop ct in Thm.implies_intr ct' (f (Thm.assume ct')) end + +fun discharge p pq = Thm.implies_elim pq p + + +(* a faster COMP *) + +type compose_data = cterm list * (cterm -> cterm list) * thm + +fun list2 (x, y) = [x, y] + +fun precompose f rule : compose_data = (f (Thm.cprem_of rule 1), f, rule) +fun precompose2 f rule : compose_data = precompose (list2 o f) rule + +fun compose (cvs, f, rule) thm = + discharge thm + (Thm.instantiate ([], map (dest_Var o Thm.term_of) cvs ~~ f (Thm.cprop_of thm)) rule) + + +(* simpset *) + +local + val antisym_le1 = mk_meta_eq @{thm order_class.antisym_conv} + val antisym_le2 = mk_meta_eq @{thm linorder_class.antisym_conv2} + val antisym_less1 = mk_meta_eq @{thm linorder_class.antisym_conv1} + val antisym_less2 = mk_meta_eq @{thm linorder_class.antisym_conv3} + + fun eq_prop t thm = HOLogic.mk_Trueprop t aconv Thm.prop_of thm + fun dest_binop ((c as Const _) $ t $ u) = (c, t, u) + | dest_binop t = raise TERM ("dest_binop", [t]) + + fun prove_antisym_le ctxt ct = + let + val (le, r, s) = dest_binop (Thm.term_of ct) + val less = Const (@{const_name less}, Term.fastype_of le) + val prems = Simplifier.prems_of ctxt + in + (case find_first (eq_prop (le $ s $ r)) prems of + NONE => + find_first (eq_prop (HOLogic.mk_not (less $ r $ s))) prems + |> Option.map (fn thm => thm RS antisym_less1) + | SOME thm => SOME (thm RS antisym_le1)) + end + handle THM _ => NONE + + fun prove_antisym_less ctxt ct = + let + val (less, r, s) = dest_binop (HOLogic.dest_not (Thm.term_of ct)) + val le = Const (@{const_name less_eq}, Term.fastype_of less) + val prems = Simplifier.prems_of ctxt + in + (case find_first (eq_prop (le $ r $ s)) prems of + NONE => + find_first (eq_prop (HOLogic.mk_not (less $ s $ r))) prems + |> Option.map (fn thm => thm RS antisym_less2) + | SOME thm => SOME (thm RS antisym_le2)) + end + handle THM _ => NONE + + val basic_simpset = + simpset_of (put_simpset HOL_ss @{context} + addsimps @{thms field_simps times_divide_eq_right times_divide_eq_left arith_special + arith_simps rel_simps array_rules z3div_def z3mod_def NO_MATCH_def} + addsimprocs [@{simproc numeral_divmod}, + Simplifier.make_simproc @{context} "fast_int_arith" + {lhss = [@{term "(m::int) < n"}, @{term "(m::int) \ n"}, @{term "(m::int) = n"}], + proc = K Lin_Arith.simproc}, + Simplifier.make_simproc @{context} "antisym_le" + {lhss = [@{term "(x::'a::order) \ y"}], + proc = K prove_antisym_le}, + Simplifier.make_simproc @{context} "antisym_less" + {lhss = [@{term "\ (x::'a::linorder) < y"}], + proc = K prove_antisym_less}]) + + structure Simpset = Generic_Data + ( + type T = simpset + val empty = basic_simpset + val extend = I + val merge = Simplifier.merge_ss + ) +in + +fun add_simproc simproc context = + Simpset.map (simpset_map (Context.proof_of context) + (fn ctxt => ctxt addsimprocs [simproc])) context + +fun make_simpset ctxt rules = + simpset_of (put_simpset (Simpset.get (Context.Proof ctxt)) ctxt addsimps rules) + +end + +end; diff --git a/src/test-gen/src/main/SMTBackend.thy b/src/test-gen/src/main/SMTBackend.thy new file mode 100644 index 0000000..c9df909 --- /dev/null +++ b/src/test-gen/src/main/SMTBackend.thy @@ -0,0 +1,450 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * TestGen.thy --- the core of the HOL-TestGen System. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2010 ETH Zurich, Switzerland + * 2008-2013 Achim D. Brucker, Germany + * 2009-2013 Universite Paris-Sud, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* The SMT backend *} + +theory SMTBackend +imports + clocks + BackendUtils + "smt_patch/Old_SMT_patch" + "new_smt_patch/SMT_patch" + +begin + + +ML{* + +signature SMTBACKEND = sig + (* The main tactic to call the SMT backend on a PO *) + val testgen_smt_tac : Proof.context -> int -> tactic +end + +*} + +(* Common part between the two interfaces *) + +lemma add_bound: "\ P \ (size x) <= k \ \ P" by(auto) + +ML{* + +(* Backported from Isabelle 13-2 *) +fun matches_subterm thy (pat, obj) = + let + fun msub bounds obj = Pattern.matches thy (pat, obj) orelse + (case obj of + Abs (x, T, t) => msub (bounds + 1) (snd (Term.dest_abs (Name.bound bounds, T, t))) + | t $ u => msub bounds t orelse msub bounds u + | _ => false) + in msub 0 obj end; + +*} + + +(* Old interface (deprecated, will be removed in next versions) *) + +declare [[old_smt_solver = z3]] +declare [[old_z3_options = "AUTO_CONFIG=false MBQI=false"]] +declare [[old_smt_oracle = true]] +declare [[old_z3_with_extensions = true]] +declare [[old_smt_datatypes = true]] + +(* abbreviation "trigger == Old_SMT_patch.trigger" +abbreviation "pat == Old_SMT_patch.pat" *) + +lemma old_trigger_intr: "(x = y) \ Old_SMT_patch.trigger [[Old_SMT_patch.pat (x)]] (x = y)" +by(simp add: Old_SMT_patch.trigger_def) + + +ML{* + +structure OldSMTBackend : SMTBACKEND = struct + +fun trig_thm thy thm = + let val is_trig = matches_subterm thy (@{const Old_SMT_patch.trigger}, Thm.prop_of thm) + orelse not (exists_subterm is_Var (Thm.prop_of thm)) + in + if is_trig then thm else thm RS @{thm "old_trigger_intr"} handle THM _ => thm + end + +fun pretty_insts ctxt msg insts = let + fun mkeq (x,y) = Const(@{const_name HOL.eq}, dummyT) $ x $ y + val pretty_insts = map (fn inst => (Syntax.pretty_term ctxt (mkeq inst))) insts +in + Pretty.string_of (Pretty.big_list (msg^":") pretty_insts) +end + +fun scan_def (Const (@{const_name HOL.eq}, _) $ t $ u) = (t,u) + | scan_def _ = error ("Unexpected SMT counterexample format") + +fun mkinsts thy vars ce tr = let + val defs = map scan_def ce + val _ = if tr then tracing (pretty_insts @{context} "Raw SMT counterexample" defs) else () + val defs' = defs @ (map swap defs) + + (* The defs that are directly usable *) + fun is_Free_or_Var t = (is_Free t) orelse (is_Var t) + val real_defs = filter (fn (x,_) => is_Free_or_Var x) defs' + val _ = if tr then tracing (pretty_insts @{context} "SMT counterexample after filtering real defs" real_defs) else () + + (* The defs of functions that we have to infer from their partial definition via equalities + (might want to change this by looking directly at ce) *) + fun insert (t1, t2, u) acc = case acc of + [] => [(t1, [t2], [u])] + | (t1', t2s, us)::acc => if t1' = t1 then (t1', t2::t2s, u::us)::acc else (t1', t2s, us)::(insert (t1, t2, u) acc) + fun aggregate_fun_defs acc l = case l of + [] => acc + | (t1 $ t2, u)::l => if is_Free_or_Var t1 then aggregate_fun_defs (insert (t1, t2, u) acc) l else (aggregate_fun_defs acc l) + | _::l => aggregate_fun_defs acc l + val aggregates = aggregate_fun_defs [] defs' + fun mk_body x Tx Tu t2s us = case t2s of + [t2] => (case us of [u] => u) + | t2::t2s => (case us of u::us => (Const (@{const_name HOL.If}, HOLogic.boolT --> Tu --> Tu --> Tu)) $ ((Const (@{const_name HOL.eq}, Tx --> Tx --> HOLogic.boolT)) $ x $ t2) $ u $ (mk_body x Tx Tu t2s us)) (* if x = t2 then u else mk_body x t2s us *) + fun mk_fun_defs l = case l of + [] => [] + | (t1, t2::t2s, u::us)::l => let + val Tx = fastype_of t2 + val name = "xMkFun" + val x = (name, Tx) + val Tu = fastype_of u + in (t1, absfree x (mk_body (Free x) Tx Tu (t2::t2s) (u::us)))::(mk_fun_defs l) end + val fun_defs = mk_fun_defs aggregates + val _ = if tr then tracing (pretty_insts @{context} "SMT counterexample with partial defs of functions" fun_defs) else () + + (* Generation of the instantiation *) + val all_defs = real_defs @ fun_defs + val var_insts = map (fn x => (x, subst_atomic all_defs x)) vars + val _ = if tr then tracing (pretty_insts @{context} "Generated instantiation from SMT counterexample" var_insts) else () +in + BackendUtils.certify_pairs thy var_insts +end + +val (profiling, profiling_setup) = Attrib.config_bool @{binding testgen_profiling} (K false); + +(* a wrapper around smt_tac' that fixes the provided counterexamples *) +fun smt_ce_tac ctxt rules = +Subgoal.FOCUS(fn {context, prems, schematics, ...} => (fn thm => +let + val prof_name = "SMT" + val start_tac = (if Config.get ctxt profiling + then Clocks.start_clock_tac prof_name + else all_tac) + val stop_tac = (if Config.get ctxt profiling + then Clocks.stop_clock_tac prof_name + else all_tac) +in + (start_tac + THEN (Old_SMT_patch_Solver.smt_tac' context (rules @ prems) 1) + THEN stop_tac) thm + |> Seq.hd + |> Seq.single + + handle Old_SMT_patch_Failure.SMT (Old_SMT_patch_Failure.Counterexample {is_real_cex = is_real_cex', + free_constraints, + const_defs}) => + let + val _ = if Config.get ctxt profiling then Clocks.stop_clock prof_name else () + val inv_insts = map swap (snd schematics) + val term_insts = BackendUtils.uncertify_pairs inv_insts + val free_constraints' = map (subst_atomic term_insts) free_constraints + val const_defs' = map (subst_atomic term_insts) const_defs + in + raise Old_SMT_patch_Failure.SMT + (Old_SMT_patch_Failure.Counterexample {is_real_cex = is_real_cex', + free_constraints = free_constraints', + const_defs = const_defs'}) + end + + | exc => let val _ = if Config.get ctxt profiling + then Clocks.stop_clock prof_name + else () + in + raise exc + end + +end)) ctxt + + +fun try_inst_tac ctxt insts n thm = let + val thm' = Drule.instantiate_normalize ([], insts) thm +in + BackendUtils.solve_by_simp_or_auto_tac ctxt n thm' +end + +fun smt_inst_tac ctxt rules n thm = let + val prem = Logic.nth_prem(n, Thm.prop_of thm) + val neg = @{const Pure.imp} + $ prem + $ (@{const "Trueprop"} $ @{const "False"}) + val goal = Goal.init (Thm.cterm_of ctxt neg) +in + (Seq.hd (smt_ce_tac ctxt rules 1 goal); Seq.empty) + handle Old_SMT_patch_Failure.SMT (Old_SMT_patch_Failure.Counterexample {free_constraints, const_defs, ...}) + => try_inst_tac ctxt (mkinsts ctxt (BackendUtils.premvars n thm) (free_constraints @ const_defs) (Config.get ctxt TestEnv.smt_model)) n thm + | Old_SMT_patch_Failure.SMT _ => Seq.empty +end + +(* fun add_bound_tac ctxt bound t = + Subgoal.FOCUS_PARAMS(fn {context, schematics, ...} => let + val thy = Proof_Context.theory_of context + val ct = Thm.instantiate_cterm schematics (cterm_of thy t) + val bound_term = cterm_of thy (HOLogic.mk_nat bound) + val xtype = ctyp_of thy (TVar (("'a",0), @{sort "Nat.size"})) + val tinst = (xtype, ctyp_of_term ct) + val k = cterm_of thy (Var (("k",0), @{typ "nat"})) + val x = cterm_of thy (Var (("x",0), type_of t)) + val inst1 = (k, bound_term) + val inst2 = (x, ct) + in + TacticPatch.res_terminst_tac [tinst] [inst1, inst2] @{thm add_bound} 1 + end) ctxt + +fun add_bounds_tac ctxt bound ts = let + fun next (t, tac) = tac THEN' (add_bound_tac ctxt bound t) +in + List.foldl next (K all_tac) ts +end + +fun bounded_smt_tac ctxt bound rules = + Subgoal.FOCUS_PARAMS(fn {context, ...} => + (fn thm => let + val thy = Proof_Context.theory_of context + val datatype_vars = filter (fn x => isDataType thy (type_of x)) + (premvars 1 thm) + in + EVERY[add_bounds_tac context bound datatype_vars 1, + smt_inst_tac context rules 1, + ALLGOALS (full_simp_tac context)] thm + end)) ctxt *) + +fun unbounded_smt_tac ctxt rules = + Subgoal.FOCUS_PARAMS(fn {context, ...} => + (fn thm => + EVERY[smt_inst_tac context rules 1, + ALLGOALS (full_simp_tac context)] thm + )) ctxt + +fun testgen_smt_tac ctxt = + let + val thy = Proof_Context.theory_of ctxt + val smt_facts = map (trig_thm thy) (TestEnv.get_smt_facts ctxt) + in + unbounded_smt_tac ctxt smt_facts + (* bounded_smt_tac ctxt (Config.get ctxt TestEnv.depth) smt_facts *) + end + +end + +*} + + +(* New interface *) + +declare [[smt_solver = z3]] +declare [[z3_options = "AUTO_CONFIG=false smt.mbqi=false"]] +declare [[smt_oracle = true]] +declare [[z3_extensions = true]] + +lemma trigger_intr: "(x = y) \ SMT_patch.trigger (SMT_patch.Symb_Cons (SMT_patch.Symb_Cons (SMT_patch.pat (x)) SMT_patch.Symb_Nil) SMT_patch.Symb_Nil) (x = y)" +by(simp add: SMT_patch.trigger_def) + + +ML{* + +structure NewSMTBackend : SMTBACKEND = struct + +fun trig_thm thy thm = + let val is_trig = matches_subterm thy (@{const SMT_patch.trigger}, Thm.prop_of thm) + orelse not (exists_subterm is_Var (Thm.prop_of thm)) + in + if is_trig then thm else thm RS @{thm "trigger_intr"} handle THM _ => thm + end + +fun pretty_insts ctxt msg insts = let + fun mkeq (x,y) = Const(@{const_name HOL.eq}, dummyT) $ x $ y + val pretty_insts = map (fn inst => (Syntax.pretty_term ctxt (mkeq inst))) insts +in + Pretty.string_of (Pretty.big_list (msg^":") pretty_insts) +end + +fun mkinsts thy vars defs tr = let + val _ = if tr then tracing (pretty_insts @{context} "Raw SMT counterexample" defs) else () + val defs' = defs @ (map swap defs) + + (* The defs that are directly usable *) + fun is_Free_or_Var t = (is_Free t) orelse (is_Var t) + val real_defs = filter (fn (x,_) => is_Free_or_Var x) defs' + val _ = if tr then tracing (pretty_insts @{context} "SMT counterexample after filtering real defs" real_defs) else () + + (* The defs of functions that we have to infer from their partial definition via equalities + (might want to change this by looking directly at ce) *) + fun insert (t1, t2, u) acc = case acc of + [] => [(t1, [t2], [u])] + | (t1', t2s, us)::acc => if t1' = t1 then (t1', t2::t2s, u::us)::acc else (t1', t2s, us)::(insert (t1, t2, u) acc) + fun aggregate_fun_defs acc l = case l of + [] => acc + | (t1 $ t2, u)::l => if is_Free_or_Var t1 then aggregate_fun_defs (insert (t1, t2, u) acc) l else (aggregate_fun_defs acc l) + | _::l => aggregate_fun_defs acc l + val aggregates = aggregate_fun_defs [] defs' + fun mk_body x Tx Tu t2s us = case t2s of + [t2] => (case us of [u] => u) + | t2::t2s => (case us of u::us => (Const (@{const_name HOL.If}, HOLogic.boolT --> Tu --> Tu --> Tu)) $ ((Const (@{const_name HOL.eq}, Tx --> Tx --> HOLogic.boolT)) $ x $ t2) $ u $ (mk_body x Tx Tu t2s us)) (* if x = t2 then u else mk_body x t2s us *) + fun mk_fun_defs l = case l of + [] => [] + | (t1, t2::t2s, u::us)::l => let + val Tx = fastype_of t2 + val name = "xMkFun" + val x = (name, Tx) + val Tu = fastype_of u + in (t1, absfree x (mk_body (Free x) Tx Tu (t2::t2s) (u::us)))::(mk_fun_defs l) end + val fun_defs = mk_fun_defs aggregates + val _ = if tr then tracing (pretty_insts @{context} "SMT counterexample with partial defs of functions" fun_defs) else () + + (* Generation of the instantiation *) + val all_defs = real_defs @ fun_defs + val var_insts = map (fn x => (x, subst_atomic all_defs x)) vars + val _ = if tr then tracing (pretty_insts @{context} "Generated instantiation from SMT counterexample" var_insts) else () +in + BackendUtils.certify_pairs thy var_insts +end + +val (profiling, profiling_setup) = Attrib.config_bool @{binding testgen_profiling} (K false); + +(* a wrapper around smt_tac' that fixes the provided counterexamples *) +fun smt_ce_tac ctxt rules = +Subgoal.FOCUS(fn {context, prems, schematics, ...} => (fn thm => +let + val prof_name = "SMT" + val start_tac = (if Config.get ctxt profiling + then Clocks.start_clock_tac prof_name + else all_tac) + val stop_tac = (if Config.get ctxt profiling + then Clocks.stop_clock_tac prof_name + else all_tac) +in + (start_tac + THEN (SMT_patch_Solver.smt_get_model_tac context (rules @ prems) 1) + THEN stop_tac) thm + |> Seq.hd + |> Seq.single + + handle SMT_patch_Solver.SMT_Model {const_defs} => + let + val _ = if Config.get ctxt profiling then Clocks.stop_clock prof_name else () + val inv_insts = map swap (snd schematics) + val term_insts = BackendUtils.uncertify_pairs inv_insts + (* val free_constraints' = map (subst_atomic term_insts) free_constraints *) + val const_defs' = map (apply2 (subst_atomic term_insts)) const_defs + in + raise SMT_patch_Solver.SMT_Model {const_defs = const_defs'} + end + + | exc => let val _ = if Config.get ctxt profiling + then Clocks.stop_clock prof_name + else () + in + raise exc + end + +end)) ctxt + + +fun try_inst_tac ctxt insts n thm = let + val thm' = Drule.instantiate_normalize ([], insts) thm +in + BackendUtils.solve_by_simp_or_auto_tac ctxt n thm' +end + +fun smt_inst_tac ctxt rules n thm = let + val prem = Logic.nth_prem(n, Thm.prop_of thm) + val neg = @{const Pure.imp} + $ prem + $ (@{const "Trueprop"} $ @{const "False"}) + val goal = Goal.init (Thm.cterm_of ctxt neg) +in + (Seq.hd (smt_ce_tac ctxt rules 1 goal); Seq.empty) + handle SMT_patch_Solver.SMT_Model {const_defs} + => try_inst_tac ctxt (mkinsts ctxt (BackendUtils.premvars n thm) ((* free_constraints @ *) const_defs) (Config.get ctxt TestEnv.smt_model)) n thm +end + +fun unbounded_smt_tac ctxt rules = + Subgoal.FOCUS_PARAMS(fn {context, ...} => + (fn thm => + EVERY[smt_inst_tac context rules 1, + ALLGOALS (full_simp_tac context)] thm + )) ctxt + +fun testgen_smt_tac ctxt = + let + val thy = Proof_Context.theory_of ctxt + val smt_facts = map (trig_thm thy) (TestEnv.get_smt_facts ctxt) + in + unbounded_smt_tac ctxt smt_facts + end + +end + +*} + + +(* Choice of the interface *) + +ML{* + +structure SMTBackend : SMTBACKEND = struct + + fun testgen_smt_tac ctxt = + if Config.get ctxt TestEnv.SMT then + OldSMTBackend.testgen_smt_tac ctxt + else if Config.get ctxt TestEnv.SMT2 then + NewSMTBackend.testgen_smt_tac ctxt + else + K no_tac + +end + +*} + + + +end diff --git a/src/test-gen/src/main/SharedMemory.thy b/src/test-gen/src/main/SharedMemory.thy new file mode 100644 index 0000000..3ded9c4 --- /dev/null +++ b/src/test-gen/src/main/SharedMemory.thy @@ -0,0 +1,1389 @@ +chapter{* A Shared-Memory-Model*} + +theory SharedMemory +imports Main +begin +section {*Shared Memory Model\label{SharedMemoryThy}*} +subsection{* Prerequisites *} + +text{* Prerequisite: a generalization of @{thm [source] fun_upd_def}: @{thm fun_upd_def}. + It represents updating modulo a sharing equivalence, i.e. an equivalence relation + on parts of the domain of a memory. *} + +definition fun_upd_equivp :: "('\ \ '\ \ bool) \ ('\ \ '\) \ '\ \ '\ \ ('\ \ '\)" +where "fun_upd_equivp eq f a b = (\x. if eq x a then b else f x)" + + +--{*This lemma is the same as @{thm [source] Fun.fun_upd_same}: @{thm Fun.fun_upd_same}; applied + on our genralization @{thm fun_upd_equivp_def} of @{thm fun_upd_def}. This proof tell + if our function @{term "fun_upd_equivp (op =) f x y" } is equal to @{term f} this is equivalent + to the fact that @{term "f x = y"}*} + +lemma fun_upd_equivp_iff: "((fun_upd_equivp (op =) f x y) = f) = (f x = y)" + by (simp add :fun_upd_equivp_def, safe, erule subst, auto) + +--{*Now we try to proof the same lemma applied on any equivalent relation @{term "equivp eqv"} + instead of the equivalent relation @{term "op ="}. For this case, we had split the lemma to 2 + parts. the lemma @{term "fun_upd_equivp_iff_part1"} to proof the case when + @{term "eq (f a) b \ eq (fun_upd_equivp eqv f a b z) (f z) "}, and the second part is + the lemma @{term "fun_upd_equivp_iff_part2"} to proof the case + @{term "equivp eqv \ fun_upd_equivp eqv f a b = f \ f a = b"}. *} + +lemma fun_upd_equivp_iff_part1: + assumes is_equivp: "equivp R" + and 2: "(\z. R x z \ R (f z) y) " + shows "R (fun_upd_equivp R f x y z) (f z)" + using assms + unfolding fun_upd_equivp_def + by (auto simp: Equiv_Relations.equivp_reflp Equiv_Relations.equivp_symp) + +lemma fun_upd_equivp_iff_part2: + assumes is_equivp: "equivp R" + shows "fun_upd_equivp R f x y = f \ f x = y" + using assms + apply (simp add :fun_upd_equivp_def, safe) + apply (erule subst, auto simp: Equiv_Relations.equivp_reflp) +done + +--{*Just anotther way to formalise @{thm fun_upd_equivp_iff_part2} without using the strong equality*} + +lemma + assumes is_equivp:"equivp R" + and 2: "(\z. R x z \ R (fun_upd_equivp R f x y z) (f z))" + shows "R y (f x)" + using assms + by (simp add: fun_upd_equivp_def Equiv_Relations.equivp_symp equivp_reflp) + +text{*this lemma is the same in @{thm fun_upd_equivp_iff_part1} where @{term "(op =)"} is + generalized by another equivalence relation*} + +lemma fun_upd_equivp_idem: + assumes image:"f x = y" + shows "(fun_upd_equivp (op =) f x y) = f" + using assms + by (simp only: fun_upd_equivp_iff) + +lemma fun_upd_equivp_triv : + "fun_upd_equivp (op =) f x (f x) = f " + by (simp only: fun_upd_equivp_iff) + +--{*This is the generalization of @{thm fun_upd_equivp_triv} on a given equivalence relation*} + +lemma fun_upd_equivp_triv_part1 : + "equivp R \ (\z. R x z \fun_upd_equivp (R') f x (f x) z) \ f x " + apply (auto simp:fun_upd_equivp_def) + apply (metis equivp_reflp) +done + +lemma fun_upd_equivp_triv_part2 : + "equivp R \ (\z. R x z \ f z ) \ fun_upd_equivp (R') f x (f x) x " + by (simp add:fun_upd_equivp_def equivp_reflp split: if_split) + +lemma fun_upd_equivp_apply [simp]: + "(fun_upd_equivp (op =) f x y) z = (if z = x then y else f z)" + by (simp only: fun_upd_equivp_def) + +--{*This is the generalization of @{thm fun_upd_equivp_apply} with e given equivalence relation and + not only with @{term "op ="}*} + +lemma fun_upd_equivp_apply1 [simp]: + "equivp R \(fun_upd_equivp R f x y) z = (if R z x then y else f z)" + by (simp add: fun_upd_equivp_def) + +lemma fun_upd_equivp_same: + "(fun_upd_equivp (op =) f x y) x = y" + by (simp only: fun_upd_equivp_def)simp + +--{*This is the generalization of @{thm fun_upd_equivp_same} with a given equivalence relation*} + +lemma fun_upd_equivp_same1: + assumes is_equivp:"equivp R" + shows "(fun_upd_equivp R f x y) x = y" + using assms + by (simp add: fun_upd_equivp_def equivp_reflp) + + +text{* For the special case that {@term eq} is just the equality {@term "op ="}, sharing +update and classical update are identical.*} + +lemma fun_upd_equivp_vs_fun_upd: "(fun_upd_equivp (op =)) = fun_upd" + by(rule ext, rule ext, rule ext,simp add:fun_upd_def fun_upd_equivp_def) + + + +subsection{* Definition of the shared-memory type*} + +typedef ('\, '\) memory = "{(\::'\ \'\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" +proof + show "(Map.empty, (op =)) \ ?memory" + by (auto simp: identity_equivp) +qed + +fun memory_inv :: "('\ \ '\ option) \ ('\ \ '\ \ bool) \ bool" +where "memory_inv (Pair f R) = (equivp R \ (\x y. R x y \ f x = f y))" + + + +lemma Abs_Rep_memory [simp]: + "Abs_memory (Rep_memory \) = \" + by (simp add:Rep_memory_inverse) + +lemma memory_invariant [simp]: + "memory_inv \_rep = (Rep_memory (Abs_memory \_rep) = \_rep)" + using Rep_memory [of "Abs_memory \_rep"] Abs_memory_inverse mem_Collect_eq + case_prodE case_prodI2 memory_inv.simps + by smt + +lemma Pair_code_eq : + "Rep_memory \ = Pair (fst (Rep_memory \)) (snd (Rep_memory \))" + by (simp add: Product_Type.surjective_pairing) + +lemma snd_memory_equivp [simp]: "equivp(snd(Rep_memory \))" + by(insert Rep_memory[of \], auto) + +subsection{* Operations on Shared-Memory *} + +setup_lifting type_definition_memory (*Mandatory for lift_definition*) + +abbreviation mem_init :: "('a \ 'b option) \ ('a \ 'a \ bool)" +where + "mem_init \ (Map.empty, (op =))" + +lemma memory_init_eq_sound: + "mem_init \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" +proof - + obtain mem and R + where Pair: "(mem, R) =mem_init " and Eq: "equivp R" + using identity_equivp by auto + have D1: "R = (op =)" + and D2: "mem = Map.empty " + using Pair prod.inject + by auto + moreover have inv_part2: "\ x y . R x y \ mem x = mem y" + unfolding D1 D2 by auto + ultimately show ?thesis + using Eq Abs_memory_cases Pair_inject Rep_memory_cases Rep_memory_inverse + identity_equivp memory_inv.elims(3) memory_invariant + by auto +qed + +lift_definition init :: "('\, '\) memory" + is "mem_init :: ('\ \ '\ option) \ ('\ \ '\ \ bool)" + using memory_init_eq_sound by simp + +(*code generation test*) +value "init::(nat,int)memory" +value "map (\x. the (fst (Rep_memory init)x)) [1 .. 10]" +value "take (10) (map (Pair Map.empty) [(op =) ])" +value "replicate 10 init" +term "Rep_memory \" +term "[(\::nat \ int, R )<-xs . equivp R \ (\x y. R x y \ \ x = \ y)]" + +(* deprecated >>>> *) +definition init_mem_list :: "'\ list \ (nat, '\) memory" +where "init_mem_list s = Abs_memory (let h = zip (map nat [0 .. int(length s)]) s + in foldl (\x (y,z). fun_upd x y (Some z)) + Map.empty h, + op =)" + +(* <<<<<<<<<<<<<<<< *) + + +subsubsection{* Memory Read Operation*} + +definition lookup :: "('\, '\) memory \ '\ \ '\" (infixl "$" 100) +where "\ $ x = the (fst (Rep_memory \) x)" + +subsubsection{* Memory Update Operation*} + +fun Pair_upd_lifter:: "('\ \ '\ option) \ ('\ \ '\ \ bool) \ '\ \ '\ \ + ('\ \ '\ option) \ ('\ \ '\ \ bool)" + where "Pair_upd_lifter ((f, R)) x y = (fun_upd_equivp R f x (Some y), R)" + +lemma update\<^sub>_sound': + assumes "\ \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" + shows "Pair_upd_lifter \ x y \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" +proof - + obtain mem and R + where Pair: "(mem, R) = \" and Eq: "equivp R" and Mem: "\ x y . R x y \ mem x = mem y" + using assms equivpE by auto + obtain mem' and R' + where Pair': "(mem', R') = Pair_upd_lifter \ x y" + using surjective_pairing by metis + have Def1: "mem' = fun_upd_equivp R mem x (Some y)" + and Def2: "R' = R" + using Pair Pair' by auto + have Eq': "equivp R'" + using Def2 Eq by auto + moreover have "\ y z . R' y z \ mem' y = mem' z" + using Mem equivp_symp equivp_transp + unfolding Def1 Def2 by (metis Eq fun_upd_equivp_def) + ultimately show ?thesis + using Pair' by auto +qed + +lemma memory_inv_update_rep: + "memory_inv (Pair_upd_lifter (Rep_memory \) x y)" +proof - + have *:"(equivp o snd) (Pair_upd_lifter (Rep_memory \) x y)" + and **:"(\w z. snd (Pair_upd_lifter (Rep_memory \) x y) w z \ + fst (Pair_upd_lifter (Rep_memory \) x y) w = + fst (Pair_upd_lifter (Rep_memory \) x y) z)" + using update\<^sub>_sound'[OF Rep_memory,of \ x y] + by auto + have ***:"memory_inv (Pair_upd_lifter (Rep_memory \) x y) = + memory_inv (fst (Pair_upd_lifter (Rep_memory \) x y), + snd (Pair_upd_lifter (Rep_memory \) x y))" + using surjective_pairing[of "(Pair_upd_lifter (Rep_memory \) x y)"] + by simp + show ?thesis + apply (simp only: * ** *** memory_inv.simps) + using * ** + apply simp + done +qed + +lift_definition update :: " ('\, '\) memory \'\ \ '\ \ ('\, '\) memory" ("_ '(_ :=\<^sub>$ _')" 100) + is Pair_upd_lifter + using update\<^sub>_sound' + by simp + +lemma update': "\ (x :=\<^sub>$ y) = Abs_memory (fun_upd_equivp (snd (Rep_memory \)) + (fst (Rep_memory \)) x (Some y), (snd (Rep_memory \)))" + using Rep_memory_inverse surjective_pairing Pair_upd_lifter.simps update.rep_eq + by metis + +(*update on list*) + +fun update_list_rep :: "('\ \ '\) \ ('\ \ '\ \ bool) \ ('\ \ '\ )list \ + ('\ \ '\) \ ('\ \ '\ \ bool)" +where "update_list_rep (f, R) nlist = + (foldl (\(f, R)(addr,val). + Pair_upd_lifter (f, R) addr val) (f, R) nlist)" + +lemma update_list_rep_p: + assumes 1: "P \" + and 2: "\src dst \. P \ \ P (Pair_upd_lifter \ src dst)" + shows "P (update_list_rep \ list)" + using 1 2 + apply (induct "list" arbitrary: \) + apply (force,safe) + apply (simp del: Pair_upd_lifter.simps) + using surjective_pairing + apply simp +done + +lemma update_list_rep_sound: + assumes 1: "\ \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" + shows "update_list_rep \ (nlist) \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" + using 1 + apply (elim update_list_rep_p) + apply (erule update\<^sub>_sound') +done + +lift_definition update_list :: "('\, '\) memory \ ('\ \ '\ )list \ ('\, '\) memory" (infixl "'/:=\<^sub>$" 30) + is update_list_rep + using update_list_rep_sound by simp + +lemma update_list_Nil[simp]: "(\ /:=\<^sub>$ []) = \" + unfolding update_list_def + by(simp,subst surjective_pairing[of "Rep_memory \"], + subst update_list_rep.simps, simp) + +lemma update_list_Cons[simp] : "(\ /:=\<^sub>$ ((a,b)#S)) = (\(a :=\<^sub>$ b) /:=\<^sub>$ S)" + unfolding update_list_def + apply(simp,subst surjective_pairing[of "Rep_memory \"], + subst update_list_rep.simps, simp) + apply(subst surjective_pairing[of "Rep_memory (\ (a :=\<^sub>$ b))"], + subst update_list_rep.simps, simp) + apply(simp add: update_def) + apply(subst Abs_memory_inverse) + apply (metis (lifting, mono_tags) Rep_memory update\<^sub>_sound') + apply simp +done + +text{* Type-invariant: *} + +lemma update\<^sub>_sound: + assumes "Rep_memory \ = (\', eq)" + shows "(fun_upd_equivp eq \' x (Some y), eq) \ + {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" + using assms insert Rep_memory[of "\"] + apply(auto simp: fun_upd_equivp_def) + apply(rename_tac "xa" "xb", erule contrapos_np) + apply(rule_tac R=eq and y=xa in equivp_transp,simp) + apply(erule equivp_symp, simp_all) + apply(rename_tac "xa" "xb", erule contrapos_np) + apply(rule_tac R=eq and y=xb in equivp_transp,simp_all) +done + +subsubsection{* Memory Transfer Based on Sharing Transformation*} + +(*ref: def by Oto Havle *) + +fun transfer_rep :: "('\ \ '\) \ ('\\'\ \ bool) \ '\ \ '\ \ ('\\'\) \ ('\\'\ \ bool)" +where "transfer_rep (m, r) src dst = + (m o (id (dst := src)), + (\ x y . r ((id (dst := src)) x) ((id (dst := src)) y)))" + +lemma transfer_rep_simp : + "transfer_rep X src dst = + ((fst X) o (id (dst := src)), + (\ x y . (snd X) ((id (dst := src)) x) ((id (dst := src)) y)))" + by(subst surjective_pairing[of "X"], + subst transfer_rep.simps, simp) + +(*ref: proof by Oto Havle *) + +lemma transfer_rep_sound: + assumes "\ \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" + shows "transfer_rep \ src dst \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" +proof - + obtain mem and R + where P: "(mem, R) = \" and E: "equivp R" and M: "\ x y . R x y \ mem x = mem y" + using assms equivpE by auto + obtain mem' and R' + where P': "(mem', R') = transfer_rep \ src dst" + by (metis surj_pair) + have D1: "mem' = (mem o (id (dst := src)))" + and D2: "R' = (\ x y . R ((id (dst := src)) x) ((id (dst := src)) y))" + using P P' by auto + have "equivp R'" + using E unfolding D2 equivp_def by metis + moreover have "\ y z . R' y z \ mem' y = mem' z" + using M unfolding D1 D2 by auto + ultimately show ?thesis + using P' by auto +qed + +lift_definition + transfer :: "('\,'\)memory \ '\ \ '\ \ ('\, '\)memory" ("_ '(_ \ _')" [0,111,111]110) + is transfer_rep + using transfer_rep_sound + by simp + +lemma transfer_rep_sound2 : + "transfer_rep (Rep_memory \) a b \ + {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" + by (metis (lifting, mono_tags) Rep_memory transfer_rep_sound) + +(* the following share_list construction is pretty indirect and motivated by code-generation + principles; why not a definition which is more direct ? e.g. :*) + +fun share_list2 :: "('\, '\) memory \ ('\ \ '\ )list \ ('\, '\) memory" (infix "'/\" 60) +where "\ /\ S = (foldl (\ \ (a,b). (\ (a\b))) \ S)" + +lemma sharelist2_Nil[simp] : "\ /\ [] = \" by simp + +lemma sharelist2_Cons[simp] : "\ /\ ((a,b)#S) = (\(a\b) /\ S)" by simp + +(* deprecated ??? >>> *) + +fun share_list_rep :: "('\ \ '\) \ ('\ \ '\ \ bool) \ ('\ \ '\ )list \ + ('\ \ '\) \ ('\ \ '\ \ bool)" +where "share_list_rep (f, R) nlist = + (foldl (\(f, R) (src,dst). transfer_rep (f, R) src dst) (f, R) nlist)" + +fun share_list_rep' :: "('\ \ '\) \ ('\ \ '\ \ bool) \ ('\ \ '\)list \ + ('\ \ '\) \ ('\ \ '\ \ bool)" +where "share_list_rep' (f, R) [] = (f, R)" + | "share_list_rep' (f, R) (n#nlist) = share_list_rep' (transfer_rep(f,R)(fst n)(snd n)) nlist" + +lemma share_list_rep'_p: + assumes 1: "P \" + and 2: " \src dst \. P \ \ P (transfer_rep \ src dst)" + shows "P (share_list_rep' \ list)" + using 1 2 + apply (induct "list" arbitrary: \ P) + apply force + apply safe + apply (simp del: transfer_rep.simps) + using surjective_pairing + apply metis +done + +lemma foldl_preserve_p: + assumes 1: "P mem" + and 2: "\y z mem . P mem \ P (f mem y z)" + shows "P (foldl (\a (y, z). f mem y z) mem list)" + using 1 2 + apply (induct "list" arbitrary: f mem , auto) + apply metis +done + +lemma share_list_rep_p: + assumes 1: "P \" + and 2: "\src dst \. P \ \ P (transfer_rep \ src dst)" + shows "P (share_list_rep \ list)" + using 1 2 + apply (induct "list" arbitrary: \) + apply force + apply safe + apply (simp del: transfer_rep.simps) + using surjective_pairing + apply metis +done + +text{* The modification of the underlying equivalence relation on adresses is only defined + on very strong conditions --- which are fulfilled for the empty memory, but difficult to + establish on a non-empty-one. And of course, the given relation must be proven to + be an equivalence relation. So, the case is geared towards shared-memory scenarios + where the sharing is defined initially once and for all. *} + +definition update\<^sub>R :: "('\, '\)memory \ ('\ \ '\ \ bool) \ ('\, '\)memory" ("_ :=\<^sub>R _" 100) +where "\ :=\<^sub>R R \ Abs_memory (fst(Rep_memory \), R)" + +definition lookup\<^sub>R :: "('\, '\)memory \ ('\ \ '\ \ bool)" ("$\<^sub>R _" 100) +where "$\<^sub>R \ \ (snd(Rep_memory \))" + +lemma update\<^sub>R_comp_lookup\<^sub>R: +assumes equiv : "equivp R" + and sharing_conform : " \ x y. R x y \ fst(Rep_memory \) x = fst(Rep_memory \) y" +shows "($\<^sub>R (\ :=\<^sub>R R)) = R" +unfolding lookup\<^sub>R_def update\<^sub>R_def +by(subst Abs_memory_inverse, simp_all add: equiv sharing_conform) + +subsection{* Sharing Relation Definition*} + +definition sharing :: "'\ \ ('\, '\)memory \ '\ \ bool" + ("(_ shares()\<^bsub>_\<^esub>/ _)" [201, 0, 201] 200) +where "(x shares\<^bsub>\\<^esub> y) \ (snd(Rep_memory \) x y)" + +definition Sharing :: "'\ set \ ('\, '\)memory \ '\ set \ bool" + ("(_ Shares()\<^bsub>_\<^esub>/ _)" [201, 0, 201] 200) +where "(X Shares\<^bsub>\\<^esub> Y) \ (\ x\X. \ y\Y. x shares\<^bsub>\\<^esub> y)" + +subsection{* Properties on Sharing Relation*} + +lemma sharing_charn: + "equivp (snd (Rep_memory \))" + by auto + +lemma sharing_charn': + assumes 1: "(x shares\<^bsub>\\<^esub> y)" + shows" (\R. equivp R \ R x y)" + by (auto simp add: snd_def equivp_def) + +lemma sharing_charn2: + shows"\x y. (equivp (snd (Rep_memory \)) \ (snd (Rep_memory \)) x y) " + using sharing_charn [THEN equivp_reflp ] + by (simp)fast + +--{*Lemma to show that @{thm sharing_def} is reflexive*} +lemma sharing_refl: "(x shares\<^bsub>\\<^esub> x)" + using insert Rep_memory[of "\"] + by (auto simp: sharing_def elim: equivp_reflp) + +--{*Lemma to show that @{thm sharing_def} is symetric*} +lemma sharing_sym [sym]: + assumes 1: "x shares\<^bsub>\\<^esub> y" + shows "y shares\<^bsub>\\<^esub> x" + using 1 Rep_memory[of "\"] + by (auto simp: sharing_def elim: equivp_symp) + +lemma sharing_commute : "x shares\<^bsub>\\<^esub> y = (y shares\<^bsub>\\<^esub> x)" + by(auto intro: sharing_sym) + +--{*Lemma to show that @{thm sharing_def} is transitive*} + +lemma sharing_trans [trans]: + assumes 1: "x shares\<^bsub>\\<^esub> y" + and 2: "y shares\<^bsub>\\<^esub> z" + shows "x shares\<^bsub>\\<^esub> z" + using assms insert Rep_memory[of "\"] + by(auto simp: sharing_def elim: equivp_transp) + +lemma shares_result: + assumes 1: "x shares\<^bsub>\\<^esub> y" + shows "fst (Rep_memory \) x = fst (Rep_memory \) y" + using 1 + unfolding sharing_def + using Rep_memory[of "\"] + by auto + +lemma sharing_init: + assumes 1: "i \ k" + shows "\(i shares\<^bsub>init\<^esub> k)" + unfolding sharing_def init_def + using 1 + by (auto simp: Abs_memory_inverse identity_equivp) + +lemma shares_init[simp]: "(x shares\<^bsub>init\<^esub> y) = (x=y)" + unfolding sharing_def init_def + by (metis init_def sharing_init sharing_def sharing_refl) + +lemma sharing_init_mem_list: + assumes 1: "i \ k" + shows "\(i shares\<^bsub>init_mem_list S\<^esub> k)" + unfolding sharing_def init_mem_list_def + using 1 + by (auto simp: Abs_memory_inverse identity_equivp) + +(* experimental: a simultaneous update to None for all elements in X and their equivalents. *) +definition reset :: "('\, '\) memory \ '\ set\ ('\, '\)memory" ("_ '(reset _')" 100) +where "\ (reset X) = (let (\',eq) = Rep_memory \; + eq' = \ a b. eq a b \ (\x\X. eq a x \ eq b x) + in if X={} then \ + else Abs_memory (fun_upd_equivp eq' \' (SOME x. x\X) None, eq))" + +lemma reset_mt : "\ (reset {}) = \" + unfolding reset_def Let_def + by simp + +lemma reset_sh : +assumes * : "(x shares\<^bsub>\\<^esub> y)" + and **: "x \ X" +shows "\ (reset X) $ y = None" +oops + +subsection{* Memory Domain Definition*} + +definition Domain :: "('\, '\)memory \ '\ set" +where "Domain \ = dom (fst (Rep_memory \))" + +subsection{* Properties on Memory Domain*} + +lemma Domain_charn: + assumes 1:"x \ Domain \" + shows "\ y. Some y = fst (Rep_memory \) x" + using 1 + by(auto simp: Domain_def) + +lemma Domain_charn1: + assumes 1:"x \ Domain \" + shows "\ y. the (Some y) = \ $ x" + using 1 + by(auto simp: Domain_def lookup_def) + +--{*This lemma says that if @{term "x"} and @{term "y"} are quivalent this + means that they are in the same set of equivalent classes*} + +lemma shares_dom [code_unfold, intro]: + assumes 1:"x shares\<^bsub>\\<^esub> y" + shows "(x \ Domain \) = (y \ Domain \)" + using insert Rep_memory[of "\"] 1 + by (auto simp: sharing_def Domain_def) + +lemma Domain_mono: + assumes 1: "x \ Domain \" + and 2: "(x shares\<^bsub>\\<^esub> y)" + shows "y \ Domain \" + using 1 2 Rep_memory[of "\"] + by (auto simp add: sharing_def Domain_def ) + +corollary Domain_nonshares : + assumes 1: "x \ Domain \" + and 2: "y \ Domain \ " + shows "\(x shares\<^bsub>\\<^esub> y)" + using 1 2 Domain_mono + by fast + +lemma Domain_init[simp] : "Domain init = {}" + unfolding init_def Domain_def + by(simp_all add:identity_equivp Abs_memory_inverse) + +lemma Domain_update[simp] :"Domain (\ (x :=\<^sub>$ y)) = (Domain \) \ {y . y shares\<^bsub>\\<^esub> x}" +unfolding update_def Domain_def sharing_def +proof (simp_all) + have * : "Pair_upd_lifter (Rep_memory \) x y \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" + by (simp, metis (lifting, mono_tags) Rep_memory mem_Collect_eq update\<^sub>_sound') + have ** : "snd (Rep_memory \) x x" + by(metis equivp_reflp sharing_charn2) + show "dom (fst (Rep_memory (Abs_memory (Pair_upd_lifter (Rep_memory \) x y)))) = + dom (fst (Rep_memory \)) \ {y. snd (Rep_memory \) y x}" + apply(simp_all add: Abs_memory_inverse[OF *] ) + apply(subst surjective_pairing [of "(Rep_memory \)"]) + apply(subst Pair_upd_lifter.simps, simp) + apply(auto simp: ** fun_upd_equivp_def) + done +qed + +lemma Domain_share1: +assumes 1 : "a \ Domain \" + and 2 : "b \ Domain \" +shows "Domain (\(a\b)) = Domain \" +proof(simp_all add:Set.set_eq_iff, rule allI) + fix x + have ***: "transfer_rep (Rep_memory \) (id a) (id b) \ + {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" + by (metis (lifting, mono_tags) Rep_memory transfer_rep_sound) + show "(x \ Domain (\ (a \ b))) = (x \ Domain \)" + unfolding sharing_def Domain_def transfer_def map_fun_def o_def + apply(subst Abs_memory_inverse[OF ***]) + apply(insert 1 2, simp add: o_def transfer_rep_simp Domain_def ) + apply(auto split: if_split if_split_asm ) + done +qed + +lemma Domain_share_tgt : + assumes 1:"a \ Domain \" + shows " b \ Domain (\ (a \ b))" + using 1 + unfolding sharing_def Domain_def transfer_def map_fun_def o_def id_def + apply(subst Abs_memory_inverse[OF transfer_rep_sound2]) + unfolding sharing_def Domain_def transfer_def map_fun_def o_def id_def + apply(simp add: o_def transfer_rep_simp Domain_def ) + apply(auto split: if_split) + done + +lemma Domain_share2 : +assumes 1 : "a \ Domain \" + and 2 : "b \ Domain \" +shows "Domain (\(a\b)) = (Domain \ - {x. x shares\<^bsub>\\<^esub> b} \ {b})" +proof(simp_all add:Set.set_eq_iff, auto) + fix x + assume 3 : "x \ SharedMemory.Domain (\ (a \ b))" + and 4 : "x \ b" + show "x \ SharedMemory.Domain \" + apply(insert 3 4) + unfolding sharing_def Domain_def transfer_def map_fun_def o_def id_def + apply(subst (asm) Abs_memory_inverse[OF transfer_rep_sound2]) + apply(insert 1 , simp add: o_def transfer_rep_simp Domain_def ) + apply(auto split: if_split if_split_asm ) + done +next + fix x + assume 3 : "x \ Domain (\ (a \ b))" + and 4 : "x \ b" + and 5 : "x shares\<^bsub>\\<^esub> b" + have ** : "x \ Domain \" using "2" "5" Domain_mono by (fast ) + show "False" + apply(insert 3 4 5, erule contrapos_pp, simp) + unfolding sharing_def Domain_def transfer_def map_fun_def o_def id_def + apply(subst Abs_memory_inverse[OF transfer_rep_sound2]) + apply(insert 1 , simp add: o_def transfer_rep_simp Domain_def ) + apply(auto split: if_split if_split_asm ) + using "**" SharedMemory.Domain_def domI apply fast + done +next + show "b \ Domain (\ (a \ b))" + using 1 Domain_share_tgt by fast +next + fix x + assume 3 : "x \ Domain \" + and 4 : "\ x shares\<^bsub>\\<^esub> b " + show " x \ Domain (\ (a \ b))" + unfolding sharing_def Domain_def transfer_def map_fun_def o_def id_def + apply(subst Abs_memory_inverse[OF transfer_rep_sound2]) + apply(insert 1 , simp add: o_def transfer_rep_simp Domain_def ) + apply(auto split: if_split if_split_asm ) + using "3" SharedMemory.Domain_def domD + apply fast + done +qed + +lemma Domain_share3: +assumes 1 : "a \ Domain \" +shows "Domain (\(a\b)) = (Domain \ - {b}) " +proof(simp_all add:Set.set_eq_iff, auto) + fix x + assume 3: "x \ Domain (\ (a \ b))" + show "x \ Domain \" + apply(insert 3) + unfolding sharing_def Domain_def transfer_def map_fun_def o_def id_def + apply(subst (asm) Abs_memory_inverse[OF transfer_rep_sound2]) + apply(insert 1 , simp add: o_def transfer_rep_simp Domain_def ) + apply(auto split: if_split if_split_asm ) + done +next + assume 3: "b \ Domain (\ (a \ b))" + show False + apply(insert 1 3) + apply(erule contrapos_pp[of "b \ SharedMemory.Domain (\ (a \ b))"], simp) + unfolding sharing_def Domain_def transfer_def map_fun_def o_def id_def + apply(subst Abs_memory_inverse[OF transfer_rep_sound2]) + apply(insert 1 , simp add: o_def transfer_rep_simp Domain_def ) + apply(auto split: if_split ) + done +next + fix x + assume 3: "x \ Domain \ " + and 4: "x \ b" + show "x \ Domain (\ (a \ b))" + apply(insert 3 4) + unfolding sharing_def Domain_def transfer_def map_fun_def o_def id_def + apply(subst Abs_memory_inverse[OF transfer_rep_sound2]) + apply(insert 1 , simp add: o_def transfer_rep_simp Domain_def ) + apply(auto split: if_split if_split_asm ) + done +qed + +lemma Domain_transfer : +"Domain (\(a\b)) = (if a \ Domain \ + then (Domain \ - {b}) + else if b \ Domain \ + then (Domain \ - {x. x shares\<^bsub>\\<^esub> b} \ {b}) + else Domain \ )" + using Domain_share1 Domain_share2 Domain_share3 + by metis + +lemma Domain_transfer_approx: + "Domain (\(a\b)) \ Domain (\) \ {b}" + by(auto simp: Domain_transfer) + +lemma Domain_update1: + "add \ Domain (\(add :=\<^sub>$ val))" + by (simp add: sharing_refl) + +subsection{* Sharing Relation and Memory Update*} + +lemma sharing_upd: "x shares\<^bsub>(\(a :=\<^sub>$ b))\<^esub> y = x shares\<^bsub>\\<^esub> y" + using insert Rep_memory[of "\"] + by(auto simp: sharing_def update_def Abs_memory_inverse[OF update\<^sub>_sound]) + +--{*this lemma says that if we do an update on an adress @{term "x"} all the elements that are + equivalent of @{term "x"} are updated*} + + +lemma update'': + "\ (x :=\<^sub>$ y) = Abs_memory(fun_upd_equivp (\x y. x shares\<^bsub>\\<^esub> y) (fst (Rep_memory \)) x (Some y), + snd (Rep_memory \))" + unfolding update_def sharing_def + by (metis update' update_def) + +theorem update_cancel: +assumes "x shares\<^bsub>\\<^esub> x'" +shows "\(x :=\<^sub>$ y)(x' :=\<^sub>$ z) = (\(x' :=\<^sub>$ z))" + proof - + have * : "(fun_upd_equivp(snd(Rep_memory \))(fst(Rep_memory \)) x (Some y),snd (Rep_memory \)) + \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" + unfolding fun_upd_equivp_def + by(rule update\<^sub>_sound[simplified fun_upd_equivp_def], simp) + have ** : "\ R \. equivp R \ R x x' \ + fun_upd_equivp R (fun_upd_equivp R \ x (Some y)) x' (Some z) + = fun_upd_equivp R \ x' (Some z)" + unfolding fun_upd_equivp_def + apply(rule ext) + apply(case_tac "R xa x'", auto) + apply(erule contrapos_np, erule equivp_transp, simp_all) + done + show ?thesis + apply(simp add: update') + apply(insert sharing_charn assms[simplified sharing_def]) + apply(simp add: Abs_memory_inverse [OF *] **) + done +qed + +theorem update_commute: + assumes 1:"\ (x shares\<^bsub>\\<^esub> x')" + shows "(\(x :=\<^sub>$ y))(x' :=\<^sub>$ z) = (\(x':=\<^sub>$ z)(x :=\<^sub>$ y))" + proof - + have * : "\ x y.(fun_upd_equivp(snd(Rep_memory \))(fst(Rep_memory \)) x (Some y),snd (Rep_memory \)) + \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" + unfolding fun_upd_equivp_def + by(rule update\<^sub>_sound[simplified fun_upd_equivp_def], simp) + have ** : "\ R \. equivp R \ \ R x x' \ + fun_upd_equivp R (fun_upd_equivp R \ x (Some y)) x' (Some z) = + fun_upd_equivp R (fun_upd_equivp R \ x' (Some z)) x (Some y)" + unfolding fun_upd_equivp_def + apply(rule ext) + apply(case_tac "R xa x'", auto) + apply(erule contrapos_np) + apply(frule equivp_transp, simp_all) + apply(erule equivp_symp, simp_all) + done + show ?thesis + apply(simp add: update') + apply(insert assms[simplified sharing_def]) + apply(simp add: Abs_memory_inverse [OF *] **) + done +qed + +subsection{* Properties on lookup and update wrt the Sharing Relation*} + +lemma update_triv: + assumes 1: "x shares\<^bsub>\\<^esub> y" + and 2: "y \ Domain \" + shows "\ (x :=\<^sub>$ (\ $ y)) = \" +proof - + { + fix z + assume zx: "z shares\<^bsub>\\<^esub> x" + then have zy: "z shares\<^bsub>\\<^esub> y" + using 1 by (rule sharing_trans) + have F: "y \ Domain \ \ x shares\<^bsub>\\<^esub> y + \ Some (the (fst (Rep_memory \) x)) = fst (Rep_memory \) y" + by(auto simp: Domain_def dest: shares_result) + have "Some (the (fst (Rep_memory \) y)) = fst (Rep_memory \) z" + using zx and shares_result [OF zy] shares_result [OF zx] + using F [OF 2 1] + by simp + } note 3 = this + show ?thesis + unfolding update'' lookup_def fun_upd_equivp_def + by (simp add: 3 Rep_memory_inverse if_cong) +qed + +lemma update_idem' : + assumes 1: "x shares\<^bsub>\\<^esub> y" + and 2: "x \ Domain \" + and 3: "\ $ x = z" + shows "\(y:=\<^sub>$ z) = \" +proof - + have * : "y \ Domain \" + by(simp add: shares_dom[OF 1, symmetric] 2) + have **: "\ (x :=\<^sub>$ (\ $ y)) = \" + using 1 2 * + by (simp add: update_triv) + also have "(\ $ y) = \ $ x" + by (simp only: lookup_def shares_result [OF 1]) + finally show ?thesis + using 1 2 3 sharing_sym update_triv + by fast +qed + +lemma update_idem : + assumes 2: "x \ Domain \" + and 3: "\ $ x = z" + shows "\(x:=\<^sub>$ z) = \" +proof - + show ?thesis + using 2 3 sharing_refl update_triv + by fast +qed + +lemma update_apply: "(\(x :=\<^sub>$ y)) $ z = (if z shares\<^bsub>\\<^esub> x then y else \ $ z)" +proof - + have *: "(\z. if z shares\<^bsub>\\<^esub> x then Some y else fst (Rep_memory \) z, snd (Rep_memory \)) + \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" + unfolding sharing_def + by(rule update\<^sub>_sound[simplified fun_upd_equivp_def], simp) + show ?thesis + proof (cases "z shares\<^bsub>\\<^esub> x") + case True + assume A: "z shares\<^bsub>\\<^esub> x" + show "\ (x :=\<^sub>$ y) $ z = (if z shares\<^bsub>\\<^esub> x then y else \ $ z)" + unfolding update'' lookup_def fun_upd_equivp_def + by(simp add: Abs_memory_inverse [OF *]) + next + case False + assume A: "\ z shares\<^bsub>\\<^esub> x " + show "\ (x :=\<^sub>$ y) $ z = (if z shares\<^bsub>\\<^esub> x then y else \ $ z)" + unfolding update'' lookup_def fun_upd_equivp_def + by(simp add: Abs_memory_inverse [OF *]) + qed +qed + +lemma update_share: + assumes "z shares\<^bsub>\\<^esub> x" + shows "\(x :=\<^sub>$ a) $ z = a" + using assms + by (simp only: update_apply if_True) + +lemma update_other: + assumes "\(z shares\<^bsub>\\<^esub> x)" + shows "\(x :=\<^sub>$ a) $ z = \ $ z" + using assms + by (simp only: update_apply if_False) + +lemma lookup_update_rep: + assumes 1: "(snd (Rep_memory \')) x y" + shows "(fst (Pair_upd_lifter (Rep_memory \') src dst)) x = + (fst (Pair_upd_lifter (Rep_memory \') src dst)) y" + using 1 shares_result sharing_def sharing_upd update.rep_eq + by (metis (hide_lams, no_types) ) + +lemma lookup_update_rep'': + assumes 1: "x shares\<^bsub>\\<^esub> y" + shows " (\ (src :=\<^sub>$ dst)) $ x = (\ (src :=\<^sub>$ dst)) $ y" + using 1 lookup_def lookup_update_rep sharing_def update.rep_eq + by metis + +theorem memory_ext : + assumes * : "\ x y. (x shares\<^bsub>\\<^esub> y) = (x shares\<^bsub>\'\<^esub> y)" + and ** : "Domain \ = Domain \'" + and *** : "\ x. \ $ x = \' $ x" + shows "\ = \'" +apply(subst Rep_memory_inverse[symmetric]) +apply(subst (3) Rep_memory_inverse[symmetric]) +apply(rule arg_cong[of _ _ "Abs_memory"]) +apply(auto simp:Product_Type.prod_eq_iff) +proof - + show "fst (Rep_memory \) = fst (Rep_memory \')" + apply(rule ext, insert ** ***, simp add: SharedMemory.lookup_def Domain_def) + apply (metis domIff option.expand) + done +next + show "snd (Rep_memory \) = snd (Rep_memory \')" + by(rule ext, rule ext, insert *, simp add: sharing_def) +qed + +text{* Nice connection between sharing relation, domain of the memory and content equaltiy + on the one hand and equality on the other; this proves that our memory model is fully + abstract in these three operations. *} +corollary memory_ext2: "(\ = \') = ((\ x y. (x shares\<^bsub>\\<^esub> y) = (x shares\<^bsub>\'\<^esub> y)) + \ Domain \ = Domain \' + \ (\ x. \ $ x = \' $ x))" +by(auto intro: memory_ext) + +subsection{* Rules On Sharing and Memory Transfer *} + +(*memory transfer*) + +lemma transfer_rep_inv_E: + assumes 1 : "\ \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" + and 2 : "memory_inv (transfer_rep \ src dst) \ Q" + shows Q + using assms transfer_rep_sound[of \] + by (auto simp: Abs_memory_inverse) + +lemma transfer_rep_fst1: + assumes 1: "\ = fst(transfer_rep (Rep_memory \') src dst)" + shows "\x. x = dst \ \ x = (fst (Rep_memory \')) src" + using 1 unfolding transfer_rep_simp + by simp + +lemma transfer_rep_fst2: + assumes 1: "\ = fst(transfer_rep (Rep_memory \') src dst)" + shows "\x. x \ dst \ \ x = (fst (Rep_memory \')) (id x)" + using 1 unfolding transfer_rep_simp + by simp + +lemma lookup_transfer_rep': + "(fst (transfer_rep (Rep_memory \') src dst)) src = + (fst (transfer_rep (Rep_memory \') src dst)) dst" + using Rep_memory [of "\'"] + apply (erule_tac src= "src" and dst = "dst" in transfer_rep_inv_E) + apply (rotate_tac 1) + apply (subst (asm) surjective_pairing[of "(transfer_rep (Rep_memory \') src dst)"]) + unfolding memory_inv.simps + apply (erule conjE) + apply (erule allE)+ + apply (erule impE) + unfolding transfer_rep_simp + apply auto + using equivp_reflp snd_memory_equivp + apply metis +done + +theorem share_transfer: + "x shares\<^bsub>\(a \ b)\<^esub> y = ( (y = b \ (x = b + \ (x \ b \ x shares\<^bsub>\\<^esub> a))) \ + (y \ b \ ((x = b \ a shares\<^bsub>\\<^esub> y) + \ (x \ b \ x shares\<^bsub>\\<^esub> y))))" +unfolding sharing_def transfer_def +unfolding transfer_def map_fun_def o_def id_def +apply(subst Abs_memory_inverse[OF transfer_rep_sound2], + simp add: transfer_rep_simp) +apply (metis equivp_reflp sharing_charn2) +done + +lemma transfer_share:"a shares\<^bsub>\(a \ b)\<^esub> b" + by(simp add: share_transfer sharing_refl) + +lemma transfer_share_sym:"a shares\<^bsub>\ (b \ a)\<^esub> b" + by(simp add: share_transfer sharing_refl) + +lemma transfer_share_mono:"x shares\<^bsub>\\<^esub> y \ \(x shares\<^bsub>\\<^esub> b) \ (x shares\<^bsub>\ (a \ b)\<^esub> y)" + by(auto simp: share_transfer sharing_refl) + +lemma transfer_share_charn: + "\(x shares\<^bsub>\\<^esub> b) \ \(y shares\<^bsub>\\<^esub> b) \ x shares\<^bsub>\(a \ b)\<^esub> y = x shares\<^bsub>\\<^esub> y" + by(auto simp: share_transfer sharing_refl) + +lemma transfer_share_trans:"(a shares\<^bsub>\\<^esub> x) \ (x shares\<^bsub>\(a \ b)\<^esub> b)" + by(auto simp: share_transfer sharing_refl sharing_sym) + +lemma transfer_share_trans_sym:"(a shares\<^bsub>\\<^esub> y) \ (b shares\<^bsub>(\(a \ b))\<^esub> y)" + using transfer_share_trans sharing_sym + by fast + +lemma transfer_share_trans': "(a shares\<^bsub>(\(a \ b))\<^esub> z) \ (b shares\<^bsub>(\(a \ b))\<^esub> z)" + using transfer_share sharing_sym sharing_trans + by fast + +lemma transfer_tri : "x shares\<^bsub>\ (a \ b)\<^esub> y \ x shares\<^bsub>\\<^esub> b \ b shares\<^bsub>\\<^esub> y \ x shares\<^bsub>\\<^esub> y" +by (metis sharing_sym transfer_share_charn) + +lemma transfer_tri' : "\ x shares\<^bsub>\ (a \ b)\<^esub> y \ y shares\<^bsub>\\<^esub> b \ \ x shares\<^bsub>\\<^esub> y" +by (metis sharing_sym sharing_trans transfer_share_mono) + +lemma transfer_dest' : +assumes 1: "a shares\<^bsub>\ (a \ b)\<^esub> y" + and 2: "b \ y" + shows "a shares\<^bsub>\\<^esub> y" + using assms + by(auto simp: share_transfer sharing_refl sharing_sym) + +lemma transfer_dest : +assumes 1: "\(x shares\<^bsub>\\<^esub> a)" + and 2: "x \ b" + and 3: "x shares\<^bsub>\\<^esub> b" + shows "\(x shares\<^bsub>\ (a \ b)\<^esub> b)" + using assms + by(auto simp: share_transfer sharing_refl sharing_sym) + +lemma transfer_dest'':"x = b \ y shares\<^bsub>\\<^esub> a \ x shares\<^bsub>\(a \ b)\<^esub> y" +by (metis sharing_sym transfer_share_trans_sym) + +thm share_transfer (* the universal catch-all *) + transfer_share + transfer_share_sym + sharing_sym [THEN transfer_share_trans] + (* transfer_share_trans *) + sharing_sym [THEN transfer_share_trans_sym] + (* transfer_share_trans_sym *) + transfer_share_trans' + transfer_dest'' + transfer_dest' + transfer_tri' + transfer_share_mono + transfer_tri + transfer_share_charn + transfer_dest + +subsection{* Properties on Memory Transfer and Lookup *} + +lemma transfer_share_lookup1: "(\(x \ y)) $ x = \ $ x" + using lookup_transfer_rep' transfer_rep_fst1 + unfolding lookup_def transfer.rep_eq + by metis + +lemma transfer_share_lookup2: + "(\(x \ y)) $ y = \ $ x" + using transfer_rep_fst1 + unfolding transfer.rep_eq lookup_def + by metis + +lemma add\<^sub>e_not_share_lookup: + assumes 1: "\(x shares\<^bsub>\\<^esub> z)" + and 2: "\(y shares\<^bsub>\\<^esub> z)" + shows "\ (x \ y) $ z = \ $ z" + using assms + unfolding sharing_def lookup_def transfer.rep_eq + using id_def sharing_def sharing_refl transfer_rep_fst2 + by metis + +lemma transfer_share_dom: + assumes 1: "z \ Domain \" + and 2: "\(y shares\<^bsub>\\<^esub> z)" + shows "(\(x \ y)) $ z = \ $ z" + using assms + unfolding Domain_def sharing_def lookup_def + using 2 transfer.rep_eq id_apply sharing_refl transfer_rep_fst2 + by metis + +lemma shares_result': + assumes 1: "(x shares\<^bsub>\\<^esub> y)" + shows " \ $ x = \ $ y" + using assms lookup_def shares_result + by metis + +lemma transfer_share_cancel1: + assumes 1: "(x shares\<^bsub>\\<^esub> z)" + shows "(\(x \ y)) $ z = \ $ x" + using 1 transfer.rep_eq transfer_share_trans lookup_def + transfer_rep_fst1 shares_result + by (metis) + +subsection{* Test on Sharing and Transfer via smt ... *} + +(*test to see the needed lemmas by smt*) +lemma "\x y. x \ y \ \(x shares\<^bsub>\\<^esub> y) \ + \ $ x > \ $ y \ \(3 \ (4::nat))= \' \ + \'' = (\'(3 :=\<^sub>$ ((\' $ 4) + 2))) \ + x \ 3 \ x \ 4 \ y \ 3 \ y \ 4 \ \'' $ x > \'' $ y" +by (smt add\<^sub>e_not_share_lookup share_transfer update_apply) + +subsection{* Instrumentation of the smt Solver*} + +lemma transfer_share_charn_smt : + "\(i shares\<^bsub>\\<^esub> k') \ + \(k shares\<^bsub>\\<^esub> k') \ + i shares\<^bsub>\(i' \ k')\<^esub> k = i shares\<^bsub>\\<^esub> k" + using transfer_share_charn + by fast + +lemma add\<^sub>e_not_share_lookup_smt: + "\(x shares\<^bsub>\\<^esub> z)\ \(y shares\<^bsub>\\<^esub> z)\ (\ (x \ y) $ z) = (\ $ z)" + using add\<^sub>e_not_share_lookup + by auto + +lemma transfer_share_dom_smt: + "z \ Domain \ \ \(y shares\<^bsub>\\<^esub> z)\ (\(x \ y)) $ z = \ $ z" + using transfer_share_dom + by auto + +lemma transfer_share_cancel1_smt: + "(x shares\<^bsub>\\<^esub> z)\ (\(x \ y)) $ z = \ $ x" + using transfer_share_cancel1 + by auto + +lemma lookup_update_rep''_smt: + "x shares\<^bsub>\\<^esub> y\(\ (src :=\<^sub>$ dst)) $ x = (\ (src :=\<^sub>$ dst)) $ y" + using lookup_update_rep'' + by auto + +theorem update_commute_smt: + "\ (x shares\<^bsub>\\<^esub> x') \ ((\(x :=\<^sub>$ y))(x' :=\<^sub>$ z)) = (\(x':=\<^sub>$ z)(x :=\<^sub>$ y))" + using update_commute + by auto + +theorem update_cancel_smt: + "(x shares\<^bsub>\\<^esub> x')\ (\(x :=\<^sub>$ y)(x' :=\<^sub>$ z)) = (\(x' :=\<^sub>$ z))" + using update_cancel + by auto + +lemma update_other_smt: + "\(z shares\<^bsub>\\<^esub> x)\ (\(x :=\<^sub>$ a) $ z) = \ $ z" + using update_other + by auto + +lemma update_share_smt: + "(z shares\<^bsub>\\<^esub> x) \ (\(x :=\<^sub>$ a) $ z) = a" + using update_share + by auto + +lemma update_idem_smt : + "(x shares\<^bsub>\\<^esub> y)\ x \ Domain \ \ (\ $ x = z) \ (\(x:=\<^sub>$ z)) = \" + using update_idem + by fast + +lemma update_triv_smt: + "(x shares\<^bsub>\\<^esub> y) \ y \ Domain \ \ (\ (x :=\<^sub>$ (\ $ y))) = \" + using update_triv + by auto + +lemma shares_result_smt: + "x shares\<^bsub>\\<^esub> y\ \ $ x = \ $ y" + using shares_result' + by fast + +lemma shares_dom_smt : + "x shares\<^bsub>\\<^esub> y \ (x \ Domain \) = (y \ Domain \)" + using shares_dom by fast + +lemma sharing_sym_smt : + "x shares\<^bsub>\\<^esub> y\y shares\<^bsub>\\<^esub> x" + using sharing_sym + by auto + +lemma sharing_trans_smt: + "x shares\<^bsub>\\<^esub> y \ y shares\<^bsub>\\<^esub> z \ x shares\<^bsub>\\<^esub> z" + using sharing_trans + by auto + +lemma nat_0_le_smt: "0 \ z \ int (nat z) = z" + by transfer clarsimp + +lemma nat_le_0_smt: "0 > z \ int (nat z) = 0" + by transfer clarsimp + +lemma transfer_share_trans_smt: + "(x shares\<^bsub>\\<^esub> z) \(z shares\<^bsub>\(x \ y)\<^esub> y)" + using transfer_share_trans + by fast + +lemma transfer_share_mono_smt: + "(x shares\<^bsub>\\<^esub> y)\ \(x shares\<^bsub>\\<^esub> y')\ (x shares\<^bsub>\ (x' \ y')\<^esub> y)" + using transfer_share_mono + by fast + +lemma transfer_share_trans'_smt: + "(x shares\<^bsub>(\(x \ y))\<^esub> z)\(y shares\<^bsub>(\(x \ y))\<^esub> z) " + using transfer_share_trans' + by fast + +lemma transfer_share_old_new_trans_smt: + "(x shares\<^bsub>\\<^esub> z)\(y shares\<^bsub>(\(x \ y))\<^esub> z) " + using transfer_share_trans_sym + by fast + +lemma transfer_share_old_new_trans1_smt: + "a shares\<^bsub>\\<^esub> b \ a shares\<^bsub>\\<^esub> c \ + (c shares\<^bsub>(\ (a \ d))\<^esub> b ) " + using transfer_share_trans_smt sharing_sym_smt sharing_trans_smt + by metis + +lemma Domain_mono_smt: + "x \ Domain \ \ (x shares\<^bsub>\\<^esub> y)\y \ Domain \" + using Domain_mono + by fast + +lemma sharing_upd_smt: "x shares\<^bsub>(\(a :=\<^sub>$ b))\<^esub> y = x shares\<^bsub>\\<^esub> y" + using sharing_upd + by fast + +lemma sharing_init_mem_list_smt : + "i \ k \ \(i shares\<^bsub>init_mem_list S\<^esub> k)" + using sharing_init_mem_list + by fast + +lemma mem1_smt: + "(\(a\b) $ a) = (\(a\b) $ b)" + using transfer_share_lookup1 transfer_share_lookup2 + by metis + +lemmas sharing_smt = sharing_refl transfer_share + sharing_commute nat_le_0_smt + nat_0_le_smt sharing_sym_smt + transfer_share_lookup1 transfer_share_lookup2 + sharing_init_mem_list_smt sharing_upd_smt + shares_result_smt transfer_share_old_new_trans_smt + transfer_share_trans_smt mem1_smt + update_share_smt shares_dom_smt + Domain_mono_smt sharing_trans_smt + transfer_share_cancel1_smt transfer_share_trans'_smt + update_apply update_other_smt + update_cancel_smt transfer_share_old_new_trans1_smt + lookup_update_rep''_smt update_triv_smt + transfer_share_mono_smt update_commute_smt + transfer_share_dom_smt add\<^sub>e_not_share_lookup_smt + update_idem_smt transfer_share_charn_smt +(* @Chantal : if you want, you could add a generic smt config here ... *) + +subsection {*Tools for the initialization of the memory*} + +definition update_memory\<^sub>i\<^sub>n\<^sub>i\<^sub>t :: "'address list \ 'value list \ ('address, 'value)memory" +where "update_memory\<^sub>i\<^sub>n\<^sub>i\<^sub>t ADD VAL = + (foldl (\ m (x, y). (m (x:=\<^sub>$y))) init (zip ADD VAL))" + + +definition share_memory\<^sub>i\<^sub>n\<^sub>i\<^sub>t :: "'address list \ 'address list \ + ('address, 'value)memory \('address,'value)memory" +where "share_memory\<^sub>i\<^sub>n\<^sub>i\<^sub>t SRC DST m = + (foldl (\m (x, y). (m (x\y))) m (zip SRC DST))" + +definition memory\<^sub>i\<^sub>n\<^sub>i\<^sub>t :: "'address list \ 'value list \ 'address list \ + ('address,'value)memory" +where "memory\<^sub>i\<^sub>n\<^sub>i\<^sub>t SRC VAL DST = + foldl (\ m (SRC, DST). share_memory\<^sub>i\<^sub>n\<^sub>i\<^sub>t SRC DST m) + (update_memory\<^sub>i\<^sub>n\<^sub>i\<^sub>t SRC VAL) [(SRC, DST)]" + +lemmas sharing_refl_smt = sharing_refl (* legacy *) + + +subsection{* An Intrastructure for Global Memory Spaces *} +text{* Memory spaces are common concepts in Operating System (OS) design since it is + a major objective of OS kernels to separate logical, linear memory spaces + belonging to different processes (or in other terminologies such as PiKeOS: tasks) + from each other. We achieve this goal by modeling the adresses of memory spaces + as a \emph{pair} of a subject (e.g. process or task, denominated by a process-id or + task-id) and a location (a conventional adress). *} +text{* + Our model is still generic - we do not impose a particular type for subjects + or locations (which could be modeled in a concrete context by an enumeration type as well as + integers of bitvector representations); for the latter, however, we require that + they are instances of the type class @{typ "'\::comm_semiring_1"} assuring that there + is a minimum of infrastructure for address calculation: there must exist a + @{term 0}-element, a distinct @{term 1}-element and an addition operation with + the usual properties. +*} + +fun init\<^sub>g\<^sub>l\<^sub>o\<^sub>b\<^sub>a\<^sub>l\<^sub>m\<^sub>e\<^sub>m :: "(('sub\'loc::comm_semiring_1), '\) memory + \ ('sub\'loc) \ '\ list + \ (('sub\'loc), '\) memory" ("_ |> _ <| _" [60,60,60] 70) +where "\ |> start <| [] = \" + | "\ |> (sub,loc) <| (a # S) = ((\((sub,loc):=\<^sub>$ a)) |> (sub, loc+1)<| S)" + +lemma Domain_mem_init_Nil : "Domain(\ |> start <| []) = Domain \" +by simp + +subsubsection{* Example *} + +type_synonym task_id = int +type_synonym loc = int + +type_synonym global_mem = "((task_id\loc), int)memory" + +definition \\<^sub>0 :: "global_mem" +where "\\<^sub>0 \ init |> (0,0) <| [0,0,0,0] + |> (2,0) <| [0,0] + |> (4,0) <| [2,0]" + +(* why does this not work ? +value "(\\<^sub>0 ((4, 0)\(2, 1))) $ (4, 0)" +*) + +lemma \\<^sub>0_Domain: "Domain \\<^sub>0 = {(4, 1), (4, 0), (2, 1), (2, 0), (0, 3), (0, 2), (0, 1), (0, 0)}" +unfolding \\<^sub>0_def +by(simp add: sharing_upd) + +subsection{* Memory Transfer Based on Sharing Closure (Experimental) *} + +text{* One might have a fundamentally different understanding on memory transfer --- at least as +far as the sharing relation is concerned. The prior definition of sharing is based on the idea that +the overridden part is ``carved out'' of the prior equivalence. Instead of transforming the +equivalence relation, one might think of transfer as an operation where the to be shared memory is +synchronized and then the equivalence relation closed via reflexive-transitive closure. *} + +definition transfer' :: "('a,'b)memory \ 'a \ 'a \ ('a, 'b)memory" ("_ '(_ \\\ _')" [0,111,111]110) +where "\(i \\\ k) = + (\(i :=\<^sub>$ (\ $ k)) :=\<^sub>R (rtranclp(\x y. ($\<^sub>R \) x y \ (x=i \ y = k) \ (x=k \ y = i))))" + + +lemma transfer'_rep_sound: + "(fst(Rep_memory (\(i:=\<^sub>$(\ $ k)))),(\xa ya. ($\<^sub>R \) xa ya \ xa = x \ ya = y \ xa = y \ ya = x)\<^sup>*\<^sup>*) + \ + {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" +unfolding update_def +proof(auto) + let ?R' = "((\xa ya. ($\<^sub>R \) xa ya \ xa = x \ ya = y \ xa = y \ ya = x)\<^sup>*\<^sup>*)" + have E : "equivp ($\<^sub>R \)" unfolding lookup\<^sub>R_def by (metis snd_memory_equivp) + have fact1 : "symp ?R'" + unfolding symp_def + apply (auto) + apply (erule Transitive_Closure.rtranclp_induct,auto) + apply (drule E[THEN equivp_symp]) + by (metis (lifting, full_types) converse_rtranclp_into_rtranclp)+ + have fact2 : "transp ?R'" + unfolding transp_def + by (metis (lifting, no_types) rtranclp_trans) + have fact3 : "reflp ?R'" + unfolding reflp_def + by (metis (lifting) rtranclp.rtrancl_refl) + show "equivp (\xa ya. ($\<^sub>R \) xa ya \ xa = x \ ya = y \ xa = y \ ya = x)\<^sup>*\<^sup>*" + using fact1 fact2 fact3 equivpI by auto +next + fix xa ya + assume H : "(\xa ya. ($\<^sub>R \) xa ya \ xa = x \ ya = y \ xa = y \ ya = x)\<^sup>*\<^sup>* xa ya" + have * : "(fun_upd_equivp (snd (Rep_memory \)) (fst (Rep_memory \)) i (Some (\ $ k)), + snd (Rep_memory \)) + \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" oops +(* + show "fst (Rep_memory (Abs_memory (Pair_upd_lifter (Rep_memory \) i (\ $ k)))) xa = + fst (Rep_memory (Abs_memory (Pair_upd_lifter (Rep_memory \) i (\ $ k)))) ya" + apply(subst surjective_pairing[of "(Rep_memory \)"]) + apply(subst Pair_upd_lifter.simps) + apply(subst (4)surjective_pairing[of "(Rep_memory \)"]) + apply(subst Pair_upd_lifter.simps) + apply(auto simp: Abs_memory_inverse[OF *]) + apply(simp add: SharedMemory.lookup_def) + apply(insert H, simp add: SharedMemory.lookup\<^sub>R_def) +oops +*) + +subsection{* Framing Conditions on Shared Memories (Experimental)*} + +text{* The Frame of an action --- or a monadic operation --- is the smallest possible subset of the +domain of a memory, in which the action has effect, i.e. it modifies only locations +in this set.*} + + +(* Experimental. Known problem: should run over all memory-maps, + but only one fixed sharing relation R, in which also the + equivs of x in R were collected... Frame\<^bsub>R\<^esub> A ? Fibered Framing ?*) +definition Frame :: "(('\, '\)memory \ ('\, '\)memory) \ '\ set" +where "Frame A \ Least(\X. \ \. (\(reset X)) = ((A \)(reset X)))" + +(* hard. *) +lemma Frame_update : "Frame (\\. \(x :=\<^sub>$ y)) = {x}" +oops + +(* hard *) +lemma Frame_compose : "Frame (A o B) \ Frame A \ Frame B" +oops + + +notation transfer ("add\<^sub>e") (* legacy *) +lemmas add\<^sub>e_def = transfer_def (* legacy *) +lemmas add\<^sub>e_rep_eq = transfer.rep_eq (* legacy, was add\<^sub>e.rep_eq *) +lemmas transfer_share_old_new_trans = transfer_share_trans_sym (* legacy *) +lemmas sharing_commute_smt = sharing_commute (*legacy *) +lemmas update_apply_smt = update_apply (* legacy *) +lemmas transfer_share_lookup2_smt = transfer_share_lookup2 (* legacy *) +lemmas transfer_share_lookup1_smt = transfer_share_lookup1 (* legacy *) +lemmas transfer_share_smt = SharedMemory.transfer_share (* legacy *) + + +end diff --git a/src/test-gen/src/main/Term_Tactics.thy b/src/test-gen/src/main/Term_Tactics.thy new file mode 100644 index 0000000..fcd632f --- /dev/null +++ b/src/test-gen/src/main/Term_Tactics.thy @@ -0,0 +1,334 @@ + +(***************************************************************************** + * HOL-OCL --- an interactive theorem-prover for for UML/OCL + * http://www.brucker.ch/projects/hol-ocl/ + * + * isabelle2009_kernel_patch.ML --- Isabelle kernel extensions + * This file is part of HOL-OCL. + * + * Copyright (c) 2003-2007 ETH Zurich, Switzerland + * 2008-2009 Achim D. Brucker, Germany + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: isabelle2009_kernel_patch.ML 9289 2012-01-30 18:22:21Z krieger $ *) + + +theory Term_Tactics +imports Main +begin + + +(* Code for Isabelle2004/5-Kernel. Should go to Tactic - structure. *) +(* (up to make_elim_preserve, which is already there ... *) +(* >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> *) + +(* +Like lift_inst_rule but takes terms, not strings, where the terms may contain +Free variables referring to parameters of the subgoal (following the +conventions of the string-based version). + +insts: [...,(vj,tj),...] + +Both vj and tj must be type correct and have the same types as in the +string-based version (i.e. have the types *before lifting* over the +context of subgoal i. In particular, tj may not contain loose bound +variables. In order to use lift_inst_rule with subterms of the subgoal, +these have to be substituted by free variables before. + +NB: the types in insts must be correctly instantiated already, + i.e. Tinsts is not applied to insts. + + +An Example in HOL: +================== + +We assume st = [| [] @ y = y @ []; + !!a list. list @ y = y @ list ==> (a # list) @ y = y @ a # list |] + ==> (x @ y = y @ x)" : Thm.thm, +i = 2, rule = sym and a standard test-based substitution +sinsts = [("t","(a # list) @ y")]. + +Then standard lift_inst_rule (st, i, sinsts, rule) yields: + + "(!!a list. list @ y = y @ list ==> ?s1 a list = (a # list) @ y) + ==> (!!a list. list @ y = y @ list ==> (a # list) @ y = ?s1 a list)" + +i.e. a lifted version of 'sym'. + +Internally, the variables were set to: + + val params = [("a", "'a"), ("list", "'a List.list")]; + val inc = 1; + val used = ["'a"]:: string list; + val Tinsts = [(("'a", 0), "'a list")] : (Term.indexname * Thm.ctyp) list; + val insts = [("?t", "(a # list) @ y")] : (Thm.cterm * Thm.cterm) list; + +in this case. + +Now we emulate the effect of "lift_inst_rule" by "term_lift_inst_rule", +we simply have to convert the substitutions: + + val Tinsts'= map (fn(x,y) => (x,#T(rep_ctyp y))) Tinsts; + (*val Tinst' = [(("'a", 0), "'a List.list")]:(Term.indexname*Term.typ)list*) + val insts' = map (fn(x,y)=>(dest_Var(term_of x), term_of y)) insts; + (*[((("t", 0), "'a List.list"), + Const ("List.op @", "['a List.list, 'a List.list] => 'a List.list") + $(Const("List.list.Cons","['a, 'a List.list] => 'a List.list") $ + Free ("a", "'a") $ Free ("list", "'a List.list")) $ + Free ("y", "'a List.list"))] + :((Term.indexname * Term.typ) * Term.term) list *) + +Thus, we get: + + lift_inst_rule (st, i, sinsts, rule) + = term_lift_inst_rule (st, i, Tinsts', insts', rule) + + +where (Tinsts', insts') = read_insts_in_state (st, i, sinsts, rule). +This explains the connection between string- and term-based +versions. + +Unfortunately, the term_lift_inst_rule exported from the +the structure Tactics (in Isabelle/src/Pure/tactic.ML) +DOES NOT satisfy the desired equality - in subtle special +cases related to paramaters of a subgoal in st, it behaves +different. Therefore, a re-implementation based on +lift_inst_rule-code is done here. + +On top of this, the definition of term based substitution +tactic variants for res_inst_tac, eres_inst_tac, dres_inst_tac is +straigt forward. + +COULD BE RealIZED BY MORE GENERAL VERSION OF gen_compose_inst_tac, TOO. + +*) +ML{* +signature TERM_TACTICS = +sig +val params_of_state : thm -> int -> (string * typ) list +(* +val read_insts_in_state : thm * int * (indexname * string) list * thm + -> (ctyp * ctyp) list * (cterm * cterm) list +*) +val term_lift_inst_rule : Proof.context + -> thm * int * (ctyp * ctyp) list * (cterm * cterm) list * thm -> thm +val compose_terminst_tac: Proof.context + -> (ctyp * ctyp) list + -> (cterm * cterm) list -> bool * thm * int -> int -> tactic +val res_terminst_tac : Proof.context + -> (ctyp * ctyp) list + -> (cterm * cterm) list -> thm -> int -> tactic +val eres_terminst_tac : Proof.context + -> (ctyp * ctyp) list + -> (cterm * cterm) list -> thm -> int -> tactic +val make_elim_preserve : Proof.context -> thm -> thm +val cut_terminst_tac : Proof.context + ->(ctyp * ctyp) list + -> (cterm * cterm) list -> thm -> int -> tactic +val forw_terminst_tac : Proof.context + ->(ctyp * ctyp) list + -> (cterm * cterm) list -> thm -> int -> tactic +val dres_terminst_tac : Proof.context + -> (ctyp * ctyp) list + -> (cterm * cterm) list -> thm -> int -> tactic +(* +val convert_tinsts : ((indexname * sort) * typ) list -> theory -> (ctyp * ctyp) list +val convert_substs : ((indexname * typ) * term) list -> theory -> (cterm * cterm) list +*) +val subgoal_terminst_tac: Proof.context + -> (ctyp * ctyp) list + -> term -> int -> tactic +end; + +*} + + +ML{* +structure Term_Tactics : TERM_TACTICS = +struct + +open Thm; +(* copied code from Isabelle/src/Pure/tactic.ML, + essentially for debugging purposes ... (version 2005) + >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> + *) + +(*Determine print names of goal parameters (reversed)*) +fun innermost_params i st = (fn goal => + Term.rename_wrt_term goal (Logic.strip_params goal)) (Logic.get_goal (Thm.prop_of st) i); +(*params of subgoal i as they are printed*) +fun params_of_state st i = rev (innermost_params i st); +fun cterm_fun f ct = Thm.global_cterm_of (Thm.theory_of_cterm ct) (f (Thm.term_of ct)); +(********* + +(*read instantiations with respect to subgoal i of proof state st*) + fun read_insts_in_state (st, i, sinsts, rule) = + let val thy = Thm.theory_of_thm st + and params = params_of_state st i + and rts = Drule.types_sorts rule and (types,sorts) = Drule.types_sorts st + fun types'(a, ~1) = (case AList.lookup (op =) params a of NONE => types (a, ~1) | sm => sm) + | types' ixn = types ixn; + val used = Drule.add_used rule (Drule.add_used st []); + in read_insts thy rts (types',sorts) used sinsts end; + +*************) + + + +(* copied code from Isabelle/src/Pure/tactic.ML, + but modified. (deletion of its first line + and expansion of the parameters) ... (version 2005) + >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> + *) + +fun term_lift_inst_rule ctxt (st, i, Tinsts, insts, rule) = +let (*val {maxidx,...} = rep_thm st*) + val maxidx = Thm.maxidx_of st + and params = params_of_state st i + val paramTs = map #2 params + and inc = maxidx+1 + fun ctyp_fun f cT = Thm.ctyp_of ctxt (f (Thm.typ_of cT)); + fun liftvar (Var ((a,j), T)) = Var((a, j+inc), paramTs---> Logic.incr_tvar inc T) + | liftvar t = raise TERM("Variable expected", [t]); + fun liftterm t + = fold_rev absfree params (Logic.incr_indexes([],paramTs,inc) t) + (*Lifts instantiation pair over params*) + (*fun liftpair (cv,ct) = (cterm_fun liftvar cv, cterm_fun liftterm ct)*) + + val to_var_index = (fn Var(s,t) => (s,t)) o Thm.term_of + val to_tvar_index = (fn TVar(s,t) => (s,t)) o Thm.typ_of + + fun liftpair (cv,ct) = ((to_var_index o (cterm_fun liftvar)) cv, + cterm_fun liftterm ct) + fun lifttvar (c,tt) = ((to_tvar_index o ctyp_fun (Logic.incr_tvar inc)) c, + ctyp_fun (Logic.incr_tvar inc) tt) +in Drule.instantiate_normalize (map lifttvar Tinsts, map liftpair insts) + (Thm.lift_rule (Thm.cprem_of st i) rule) +end; + + +(* copied code from Isabelle/src/Pure/tactic.ML, (gen_compose_inst_tac) + but modified. (definition unfolding, exchange of lifting function, + adoption of parameters) ... (version 2005) + >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> + *) + +fun compose_terminst_tac ctxt Tinsts insts (bires_flg, rule, nsubgoal) i st = + if i > nprems_of st then no_tac st + else st |> + (compose_tac ctxt + (bires_flg, term_lift_inst_rule ctxt (st, i, Tinsts, insts, rule), nsubgoal) + i + handle TERM (msg,_) => (warning msg; no_tac) + | THM (msg,_,_) => (warning msg; no_tac)); + + +(*"Resolve" version. Note: res_inst_tac cannot behave sensibly if the + terms that are substituted contain (term or type) unknowns from the + goal, because it is unable to instantiate goal unknowns at the same time. + + The type checker is instructed not to freeze flexible type vars that + were introduced during type inference and still remain in the term at the + end. This increases flexibility but can introduce schematic type vars in + goals. +*) + +(* copied code from Isabelle/src/Pure/tactic.ML, (res_inst_tac etc.) + but modified. (definition unfolding, exchange of lifting function, + adoption of parameters) ... (version 2005) + >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> + *) + +fun res_terminst_tac ctxt Tinsts insts rule i = + compose_terminst_tac ctxt Tinsts insts (false, rule, nprems_of rule) i; + +(*eresolve elimination version*) +fun eres_terminst_tac ctxt Tinsts insts rule i = + compose_terminst_tac ctxt Tinsts insts (true, rule, nprems_of rule) i; + + +(*For forw_inst_tac and dres_inst_tac. Preserve Var indexes of rl; + increment revcut_rl instead. *) +(* COPIED FROM TACTIC STRUCTURE. SUPERFLUOUS HERE IF IT WOULD BE EXPORTED !!! *) + +fun make_elim_preserve rl = Rule_Insts.make_elim_preserve rl + +(* +fun make_elim_preserve rl = + let val {maxidx,...} = rep_thm rl + val thy = Thm.theory_of_thm rl + fun cvar ixn = cterm_of (thy) (Var(ixn,propT)); + val revcut_rl' = + Drule.instantiate_normalize ([], [(cvar("V",0), cvar("V",maxidx+1)), + (cvar("W",0), cvar("W",maxidx+1))]) revcut_rl + val arg = (false, rl, nprems_of rl) + val [th] = Seq.list_of (Thm.bicompose false arg 1 revcut_rl') + in th end + handle Bind => raise THM("make_elim_preserve", 1, [rl]); +*) + +(*instantiate and cut -- for a FACT, anyway...*) +fun cut_terminst_tac ctxt Tinsts insts rule = res_terminst_tac ctxt Tinsts insts (make_elim_preserve ctxt rule); + +(*forward tactic applies a RULE to an assumption without deleting it*) +fun forw_terminst_tac ctxt Tinsts insts rule = cut_terminst_tac ctxt Tinsts insts rule THEN' assume_tac ctxt; + +(*dresolve tactic applies a RULE to replace an assumption*) +fun dres_terminst_tac ctxt Tinsts insts rule = eres_terminst_tac ctxt Tinsts insts (make_elim_preserve ctxt rule); + +(* conversions to handle depricated versions of this module : + >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> *) + +fun convert_tinsts Tinsts thy = map (fn(x,y) => (Thm.ctyp_of thy (TVar x), Thm.ctyp_of thy y)) Tinsts; +fun convert_substs Subst thy = map (fn(x,y) => (Thm.cterm_of thy (Var x), Thm.cterm_of thy y)) Subst; + +(* Of course, some code duplication can be can be avoided by introducing + higher-order variants. *) + + +fun subgoal_terminst_tac ctxt insts sprop goal st = + (DETERM o (res_terminst_tac ctxt insts) + (convert_substs [((("psi",0),propT), sprop)] ctxt) cut_rl THEN' + SUBGOAL (fn (prop, _) => + let val concl' = Logic.strip_assums_concl prop in + if null (Term.add_tvars concl' []) then () + else warning"Type variables in new subgoal: add a type constraint?"; + all_tac + end)) goal st; +end; + +*} + + + +ML{* Term_Tactics.subgoal_terminst_tac *} +end diff --git a/src/test-gen/src/main/TestEnv.ML b/src/test-gen/src/main/TestEnv.ML new file mode 100644 index 0000000..ce89494 --- /dev/null +++ b/src/test-gen/src/main/TestEnv.ML @@ -0,0 +1,458 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * TestEnv.ML --- environment for configuration parameters + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007 ETH Zurich, Switzerland + * 2008-2013 Achim D. Brucker, Germany + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) + +signature TESTENV = +sig + type test_derivation_rule = Proof.context -> int -> thm -> (int * tactic) list + + (* Core data structure for the test environment. + Contains tables, parameters, all sorts of "screws" + to control the test generation process. *) + type testinfo = + {test_thm_tab : thm Symtab.table, (* name to testthm *) + + type_candicates : typ list, (* type grounding list *) + + cod_term_tab : (typ list -> term) Symtab.table, + (* random generators for terms, + tab assigns to each type constuctor + name a generator that gets the + instances of type constructor as args.*) + abstest_data_tab: (thm list) Symtab.table, + (* assigns name to list of + abstract (local) test data statements *) + test_data_tab : (thm list) Symtab.table, + + pre_safe_tac : Proof.context -> tactic, + pre_normalize_TNF_tac : Proof.context -> tactic, + pre_minimize_TNF_tac : Proof.context -> tactic, + test_derivation_rules : test_derivation_rule Unsynchronized.ref list + }; + + val prelude : bool Config.T + val wrapper : bool Config.T + val toString : string Config.T + val setup_code : string Config.T + val dataconv_code : string Config.T + + val depth : int Config.T (* data structure depth *) + val breadth : int Config.T (* no of free's in testspec *) + + val bound : int Config.T (* global bound for data statements *) + val case_breadth : int Config.T (* no of test data per case, + weakening uniformity *) + val iterations : int Config.T (* number of random attempts to solve a case *) + + val type_range_bound : int Config.T (* effectively used elements + type grounding list *) + + val SMT : bool Config.T + val get_smt_facts : Proof.context -> thm list + + type testenv + val rep_testenv : testenv -> testinfo + + val get_data : Proof.context -> testenv + val map_data : (testenv -> testenv) -> Context.generic -> Context.generic + val get_data_global : theory -> testenv + val map_data_global : (testenv -> testenv) -> theory -> theory + val print_data : Proof.context -> unit + + + val add_test_case : string * thm -> testenv -> testenv + (* add test theorem of "name". + The test-theorem is assumed to consist of either clauses in TNF or + of Test Hypotheses. *) + + val del_test_case : string -> testenv -> testenv + val add_test_data : string * thm -> testenv -> testenv + (* used to insert abstract test data registrated under + "string" into the test environment; abstract test data + (or "local test theorems") were used in gen_test_data *) + val del_test_data : string -> testenv -> testenv + (* erase abstract test data from test environment *) + + val thm_tab : testenv -> thm Symtab.table + val absdata_tab : testenv -> thm list Symtab.table + val jdmt_tab : testenv -> thm list Symtab.table + + val thm_tab_update : thm Symtab.table -> testenv -> testenv + val jdmt_tab_update : thm list Symtab.table -> testenv -> testenv + + val get_pre_safe_tac : testenv -> Proof.context -> tactic + val get_pre_normalize_TNF_tac : testenv -> Proof.context -> tactic + val get_pre_minimize_TNF_tac : testenv -> Proof.context -> tactic + + val get_test_derivation_rules : testenv -> test_derivation_rule list + val add_test_derivation_rule : test_derivation_rule -> testenv -> testenv + + val type_candidates_update : typ list -> testenv -> testenv + val pre_safe_tac_update : (Proof.context -> tactic) -> testenv -> testenv + val pre_normalize_TNF_tac_update : (Proof.context -> tactic) -> testenv -> testenv + val pre_minimize_TNF_tac_update : (Proof.context -> tactic) -> testenv -> testenv + + val setup: theory -> theory +end + +structure TestEnv : TESTENV = +struct + +open HOLogic; + +type test_derivation_rule = Proof.context -> int -> thm -> (int * tactic) list + +type testinfo ={test_thm_tab : thm Symtab.table, (* name to testthm *) + + type_candicates : typ list, (* type grounding list *) + + cod_term_tab : (typ list -> term) Symtab.table, + (* random generators for terms, + tab assigns to each type constuctor + name a generator that gets the + instances of type constructor as args.*) + abstest_data_tab: (thm list) Symtab.table, + (* assigns name to list of + data statements *) + test_data_tab : (thm list) Symtab.table, + pre_safe_tac : Proof.context -> tactic, + pre_normalize_TNF_tac : Proof.context -> tactic, + pre_minimize_TNF_tac : Proof.context -> tactic, + test_derivation_rules : test_derivation_rule Unsynchronized.ref list + }; + +val (prelude, prelude_setup) = Attrib.config_bool @{binding testgen_prelude} (K true); +val (wrapper, wrapper_setup) = Attrib.config_bool @{binding testgen_wrapper} (K true); +val (toString, toString_setup) = Attrib.config_string @{binding testgen_toString} (K ""); +val (setup_code, setup_code_setup) = Attrib.config_string @{binding testgen_setup_code} (K ""); +val (dataconv_code, dataconv_code_setup) = Attrib.config_string @{binding testgen_dataconv_code} (K ""); +val (depth, depth_setup) = Attrib.config_int @{binding testgen_depth} (K 3); +val (breadth, breadth_setup) = Attrib.config_int @{binding testgen_breadth} (K 1); +val (bound, bound_setup) = Attrib.config_int @{binding testgen_bound} (K 200); +val (case_breadth, case_breadth_setup) = Attrib.config_int @{binding testgen_case_breadth} (K 1); +val (iterations, iterations_setup) = Attrib.config_int @{binding testgen_iterations} (K 25); +val (type_range_bound, type_range_bound_setup) = Attrib.config_int @{binding testgen_type_range_bound} (K 1); +val (SMT, SMT_setup) = Attrib.config_bool @{binding testgen_SMT} (K false); + +structure TestGen_SMT + = Named_Thms (val name = @{binding "testgen_smt_facts"} val description = "Facts for HOL-TestGen SMT solving"); + +val get_smt_facts = TestGen_SMT.get; + +datatype testenv = Testenv of testinfo + +fun rep_testenv (Testenv X) = X; + + +val initial_testenv = Testenv + {test_thm_tab = Symtab.empty, + + type_candicates = [HOLogic.intT,HOLogic.unitT, + HOLogic.boolT, + HOLogic.mk_setT HOLogic.intT, + HOLogic.listT HOLogic.intT], + cod_term_tab = Symtab.empty, + abstest_data_tab= Symtab.empty, + test_data_tab = Symtab.empty, + pre_safe_tac = fn ctxt => all_tac, + pre_normalize_TNF_tac = fn ctxt => all_tac, + pre_minimize_TNF_tac = fn ctxt => all_tac, + test_derivation_rules = [] +}; + +fun merge_testenv + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}, + Testenv{test_thm_tab=tttab', + type_candicates=tc', + cod_term_tab=ctt', abstest_data_tab=tdt', + test_data_tab=tjt',pre_safe_tac=pst',pre_normalize_TNF_tac=pnt',pre_minimize_TNF_tac=pmt', + test_derivation_rules = tdr'}) = + + Testenv{test_thm_tab = Symtab.merge (Thm.eq_thm) (tttab,tttab'), + type_candicates = distinct (op=) (tc@tc'), + cod_term_tab = Symtab.empty, + (* don't know what a senseful + definition of override could + be; therefore constraint to the + simplest and most conservative + one *) + abstest_data_tab = Symtab.merge (fn (t1, t2) => List.all Thm.eq_thm (map2 (fn a => fn b => (a, b)) t1 t2)) (tdt,tdt'), + test_data_tab = Symtab.merge (fn (t1, t2) => List.all Thm.eq_thm (map2 (fn a => fn b => (a, b)) t1 t2)) (tjt,tjt'), + pre_safe_tac = pst, + pre_normalize_TNF_tac = pnt, + pre_minimize_TNF_tac = pmt, + test_derivation_rules = distinct (op=) (tdr@tdr')}; + + +(* Context data *) + +structure Data = Generic_Data +( + type T = testenv + val empty = initial_testenv + val extend = I + val merge = merge_testenv +); + +val get_data = Data.get o Context.Proof; +val map_data = Data.map; + +val get_data_global = Data.get o Context.Theory; +val map_data_global = Context.theory_map o map_data; + +fun print_data ctxt = + let val {test_thm_tab, + type_candicates, + cod_term_tab, abstest_data_tab,test_data_tab,pre_safe_tac, + pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules} = + rep_testenv (get_data ctxt) + val depth = Config.get ctxt depth + val breadth = Config.get ctxt breadth + val bound = Config.get ctxt bound + val iterations = Config.get ctxt iterations + fun H (n,thm) = [Pretty.str (n^":"), Display.pretty_thm ctxt thm] + fun H2(n,thms)= + [Pretty.str (n^":"), Pretty.str "======"] @ + map (Display.pretty_thm ctxt) thms @ [Pretty.str "======"] + in [Pretty.str ">>> Testenvironment >>>>>>>>>>>>>>>>>", + Pretty.str "+++ Control Data: +++++++++++++++++++", + Pretty.str ("*** default depth: "^Int.toString(depth)), + Pretty.str ("*** default breadth: "^Int.toString(breadth)), + Pretty.str ("*** bound: "^Int.toString(bound)), + Pretty.str ("*** iterations: "^Int.toString(iterations)), + Pretty.str "+++ Testtheoremtable: +++++++++++++++"] @ + maps H (Symtab.dest test_thm_tab) @ + [Pretty.str "+++ Testjudgements: +++++++++++++++++"] @ + maps H2 (Symtab.dest test_data_tab) @ + [Pretty.str "+++ Testdatatable: ++++++++++++++++++"] @ + maps H2 (Symtab.dest abstest_data_tab) @ + [Pretty.str "<<< Testenvironment <<<<<<<<<<<<<<<<<"] + end |> Pretty.chunks |> Pretty.writeln; + + +fun add_test_case (name,thm) + (Testenv {test_thm_tab, + type_candicates, + cod_term_tab, abstest_data_tab,test_data_tab,pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, + test_derivation_rules}) = + Testenv({test_thm_tab=Symtab.update(name,thm)test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=abstest_data_tab, + test_data_tab=test_data_tab, pre_safe_tac=pre_safe_tac,pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun del_test_case name (Testenv {test_thm_tab, + type_candicates,cod_term_tab, abstest_data_tab, + test_data_tab,pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, + test_derivation_rules}) = + Testenv({test_thm_tab=Symtab.delete_safe name test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, abstest_data_tab=abstest_data_tab, + test_data_tab=test_data_tab, pre_safe_tac=pre_safe_tac,pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + + + +fun add_test_data (name,thm) + (Testenv {test_thm_tab, + type_candicates, + cod_term_tab, abstest_data_tab,test_data_tab,pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, + test_derivation_rules}) = + Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=Symtab.cons_list (name,thm) abstest_data_tab, + test_data_tab=test_data_tab,pre_safe_tac=pre_safe_tac,pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun del_test_data name + (Testenv {test_thm_tab, + type_candicates, + cod_term_tab,abstest_data_tab,test_data_tab,pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, + test_derivation_rules}) = + Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=Symtab.delete_safe name abstest_data_tab, + test_data_tab=test_data_tab,pre_safe_tac=pre_safe_tac,pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun thm_tab te = #test_thm_tab (rep_testenv te) +fun absdata_tab te = #abstest_data_tab(rep_testenv te) +fun jdmt_tab te = #test_data_tab(rep_testenv te) +(* In the sequel we also use the term "test judgement" synonymously + for "test data statement". *) + +fun thm_tab_update thm_tab + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) = + (Testenv{test_thm_tab=thm_tab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + +fun jdmt_tab_update jdmt_tab + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) = + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=jdmt_tab,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + + +fun type_candidates_update (type_candidates) + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) + = + (Testenv{test_thm_tab=tttab,type_candicates=type_candidates, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + +fun pre_safe_tac_update (pre_safe_tat) + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) + = + (Testenv{test_thm_tab=tttab,type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pre_safe_tat,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + + +fun pre_normalize_TNF_tac_update (pre_normalize_TNF_tac) + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) + = + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + + +fun pre_minimize_TNF_tac_update (pre_minimize_TNF_tac) + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, + pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) + = + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = tdr}); + + + + +fun get_pre_safe_tac (Testenv{pre_safe_tac=pst, ...}) = pst; +fun get_pre_normalize_TNF_tac (Testenv{pre_normalize_TNF_tac=pnt, ...}) = pnt; +fun get_pre_minimize_TNF_tac (Testenv{pre_minimize_TNF_tac=pmt, ...}) = pmt; + +fun get_test_derivation_rules (Testenv{test_derivation_rules = tdr, ...}) = map (op !) tdr; + +fun add_test_derivation_rule (tdr) + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, + pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdrs}) + = + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, + pre_minimize_TNF_tac=pmt, + test_derivation_rules = (Unsynchronized.ref tdr)::tdrs}); + +(* module setup *) + +val setup = + prelude_setup #> + wrapper_setup #> + toString_setup #> + setup_code_setup #> + dataconv_code_setup #> + depth_setup #> + breadth_setup #> + bound_setup #> + case_breadth_setup #> + iterations_setup #> + SMT_setup #> + TestGen_SMT.setup; + +end (*struct *); + + diff --git a/src/test-gen/src/main/TestEnv.thy b/src/test-gen/src/main/TestEnv.thy new file mode 100644 index 0000000..e5ad3c7 --- /dev/null +++ b/src/test-gen/src/main/TestEnv.thy @@ -0,0 +1,726 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * TestEnv.ML --- environment for configuration parameters + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007 ETH Zurich, Switzerland + * 2008-2013 Achim D. Brucker, Germany + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +theory TestEnv +imports Main +begin +ML{* + +(* TODO : not correctly implemented : no ABSTRACT testcases. *) + +signature TESTENV = +sig + type test_derivation_rule = Proof.context -> int -> thm -> (int * tactic) list + + (* Core data structure for the test environment. + Contains tables, parameters, all sorts of "screws" + to control the test generation process. *) + type testinfo = + {test_thm_tab : thm Symtab.table, (* name to testthm *) + + test_thm_inst_tab : (thm list) Symtab.table, (* instantiated testthm *) + + type_candicates : typ list, (* type grounding list *) + + cod_term_tab : (typ list -> term) Symtab.table, + (* random generators for terms, + tab assigns to each type constuctor + name a generator that gets the + instances of type constructor as args.*) + abstest_data_tab : (thm list) Symtab.table, + (* assigns name to list of + abstract (local) test data statements given + by the user *) + + test_data_tab : (cterm list) Symtab.table, + (* concrete test data table *) + unsolved_PO_tab : (cterm list) Symtab.table, + (* proof obligations left unresolved in selection phase *) + test_thyps_tab : (cterm list) Symtab.table, + (* test-hypothesis *) + + + pre_safe_tac : Proof.context -> tactic, + pre_normalize_TNF_tac : Proof.context -> tactic, + pre_minimize_TNF_tac : Proof.context -> tactic, + test_derivation_rules : test_derivation_rule Unsynchronized.ref list + }; + + val prelude : bool Config.T + val wrapper : bool Config.T + val toString : string Config.T + val setup_code : string Config.T + val dataconv_code : string Config.T + + val depth : int Config.T (* data structure depth *) + val steps : int Config.T (* no of iterations of splittings *) + + val bound : int Config.T (* global bound for data statements *) + val case_steps : int Config.T (* no of test data per case, + weakening uniformity *) + val iterations : int Config.T (* number of random attempts to solve a case *) + + val type_range_bound : int Config.T (* effectively used elements + type grounding list *) + + val SMT : bool Config.T (* use the SMT backend (default: false) *) + val SMT2 : bool Config.T (* use the SMT2 backend (default: false ; false if SMT is set to true) *) + val get_smt_facts : Proof.context -> thm list + + val pon : int Config.T (* test data generation for only the chosen PO; + 0 for all the POs *) + val smt_model : bool Config.T + + val use_metis : bool Config.T (* if true, use metis to check SMT models *) + val use_smt : bool Config.T (* if true, use smt to check SMT models *) + + type testenv + val rep_testenv : testenv -> testinfo + + val get_testenv : Proof.context -> testenv + val map_data : (testenv -> testenv) -> Context.generic -> Context.generic + val get_testenv_global : theory -> testenv + val map_data_global : (testenv -> testenv) -> theory -> theory + val print_testenv : string -> Proof.context -> unit + val get_test_data : Proof.context -> string -> thm list option + + val get_test_data_tab : testenv -> cterm list Symtab.table + val get_unsolved_PO_tab: testenv -> cterm list Symtab.table + val get_test_thyps_tab : testenv -> cterm list Symtab.table + + val add_test_data_tab : string * cterm -> testenv -> testenv + val add_unsolved_PO_tab: string * cterm -> testenv -> testenv + val add_test_thyps_tab : string * cterm -> testenv -> testenv + + val set_test_data_tab : string * (cterm list) -> testenv -> testenv + val set_unsolved_PO_tab: string * (cterm list) -> testenv -> testenv + val set_test_thyps_tab : string * (cterm list) -> testenv -> testenv + + val add_test_case : string * thm -> testenv -> testenv + (* add test theorem of "name". + The test-theorem is assumed to consist of either clauses in TNF or + of Test Hypotheses. *) + + val del_test_case : string -> testenv -> testenv + val add_abstest_data : string * thm -> testenv -> testenv + (* used to insert abstract test data registrated under + "string" into the test environment; abstract test data + (or "local test theorems") were used in gen_test_data *) + val del_abstest_data : string -> testenv -> testenv + (* erase abstract test data from test environment *) + + val get_test_thm_tab : testenv -> thm Symtab.table + val get_absdata_tab : testenv -> thm list Symtab.table + val get_test_thm_inst_tab : testenv -> thm list Symtab.table + + val test_thm_tab_update : thm Symtab.table -> testenv -> testenv + val test_thm_inst_tab_update : thm list Symtab.table -> testenv -> testenv + + val get_pre_safe_tac : testenv -> Proof.context -> tactic + val get_pre_normalize_TNF_tac : testenv -> Proof.context -> tactic + val get_pre_minimize_TNF_tac : testenv -> Proof.context -> tactic + + val get_test_derivation_rules : testenv -> test_derivation_rule list + val add_test_derivation_rule : test_derivation_rule -> testenv -> testenv + + val type_candidates_update : typ list -> testenv -> testenv + val pre_safe_tac_update : (Proof.context -> tactic) -> testenv -> testenv + val pre_normalize_TNF_tac_update : (Proof.context -> tactic) -> testenv -> testenv + val pre_minimize_TNF_tac_update : (Proof.context -> tactic) -> testenv -> testenv + + val print_test_case : Proof.context -> string -> int option -> string + val print_test_data : Proof.context -> string -> int option -> string + val print_test_hyps : Proof.context -> string -> int option -> string + val print_unsolved_pos : Proof.context -> string -> int option -> string + + val setup: theory -> theory +end +*} + +ML{* + +structure TestEnv : TESTENV = +struct + +open HOLogic; + +type test_derivation_rule = Proof.context -> int -> thm -> (int * tactic) list + +type testinfo ={test_thm_tab : thm Symtab.table, (* name to testthm *) + + test_thm_inst_tab : (thm list) Symtab.table, + + type_candicates : typ list, (* type grounding list *) + + cod_term_tab : (typ list -> term) Symtab.table, + (* random generators for terms, + tab assigns to each type constuctor + name a generator that gets the + instances of type constructor as args.*) + abstest_data_tab : (thm list) Symtab.table, + (* assigns name to list of data statements *) + test_data_tab : (cterm list) Symtab.table, + unsolved_PO_tab : (cterm list) Symtab.table, + test_thyps_tab : (cterm list) Symtab.table, + pre_safe_tac : Proof.context -> tactic, + pre_normalize_TNF_tac : Proof.context -> tactic, + pre_minimize_TNF_tac : Proof.context -> tactic, + test_derivation_rules : test_derivation_rule Unsynchronized.ref list + }; + +val (prelude, prelude_setup) = Attrib.config_bool @{binding testgen_prelude} (K true); +val (wrapper, wrapper_setup) = Attrib.config_bool @{binding testgen_wrapper} (K true); +val (toString, toString_setup) = Attrib.config_string @{binding testgen_toString} (K ""); +val (setup_code, setup_code_setup) = Attrib.config_string @{binding testgen_setup_code} (K ""); +val (dataconv_code, dataconv_code_setup) = Attrib.config_string @{binding testgen_dataconv_code} (K ""); +val (depth, depth_setup) = Attrib.config_int @{binding testgen_depth} (K 3); +val (steps, steps_setup) = Attrib.config_int @{binding testgen_steps} (K 1); +val (bound, bound_setup) = Attrib.config_int @{binding testgen_bound} (K 200); +val (case_steps, case_steps_setup) = Attrib.config_int @{binding testgen_case_steps} (K 1); +val (iterations, iterations_setup) = Attrib.config_int @{binding testgen_iterations} (K 25); +val (type_range_bound, type_range_bound_setup) = Attrib.config_int @{binding testgen_type_range_bound} (K 1); +val (SMT, SMT_setup) = Attrib.config_bool @{binding testgen_SMT} (K false); +val (SMT2, SMT2_setup) = Attrib.config_bool @{binding testgen_SMT2} (K false); +val (pon, pon_setup) = Attrib.config_int @{binding testgen_pon} (K 0); +val (smt_model, smt_model_setup) = Attrib.config_bool @{binding testgen_smt_model} (K false); +val (use_metis, use_metis_setup) = Attrib.config_bool @{binding testgen_use_metis} (K false); +val (use_smt, use_smt_setup) = Attrib.config_bool @{binding testgen_use_smt} (K false); + +structure TestGen_SMT + = Named_Thms (val name = @{binding "testgen_smt_facts"} + val description = "Facts for HOL-TestGen SMT solving"); + +val get_smt_facts = TestGen_SMT.get; + +datatype testenv = Testenv of testinfo + +fun rep_testenv (Testenv X) = X; + + +val initial_testenv = Testenv + {test_thm_tab = Symtab.empty, + type_candicates = [HOLogic.intT,HOLogic.unitT, + HOLogic.boolT, + HOLogic.mk_setT HOLogic.intT, + HOLogic.listT HOLogic.intT], + cod_term_tab = Symtab.empty, + abstest_data_tab= Symtab.empty, + test_thm_inst_tab = Symtab.empty, + test_data_tab = Symtab.empty, + unsolved_PO_tab = Symtab.empty, + test_thyps_tab = Symtab.empty, + + pre_safe_tac = fn ctxt => all_tac, + pre_normalize_TNF_tac = fn ctxt => all_tac, + pre_minimize_TNF_tac = fn ctxt => all_tac, + test_derivation_rules = [] + }; + +fun merge_testenv + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_data_tab = ttdt, + unsolved_PO_tab = uPOt, + test_thyps_tab = ttht, + test_thm_inst_tab=tjt, pre_safe_tac=pst, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}, + Testenv{test_thm_tab=tttab', + type_candicates=tc', + cod_term_tab=ctt', abstest_data_tab=tdt', + test_data_tab = ttdt', + unsolved_PO_tab = uPOt', + test_thyps_tab = ttht', + test_thm_inst_tab=tjt',pre_safe_tac=pst', + pre_normalize_TNF_tac=pnt',pre_minimize_TNF_tac=pmt', + test_derivation_rules = tdr'}) = + + Testenv{test_thm_tab = Symtab.merge (Thm.eq_thm) (tttab,tttab'), + type_candicates = distinct (op=) (tc@tc'), + cod_term_tab = Symtab.empty, + (* don't know what a senseful + definition of override could + be; therefore constraint to the + simplest and most conservative + one *) + abstest_data_tab = Symtab.merge (fn (t1, t2) => List.all Thm.eq_thm (map2 (fn a => fn b => (a, b)) t1 t2)) (tdt,tdt'), + test_thm_inst_tab = Symtab.merge (fn (t1, t2) => List.all Thm.eq_thm (map2 (fn a => fn b => (a, b)) t1 t2)) (tjt,tjt'), + test_data_tab = Symtab.merge (fn (t1, t2) => List.all Thm.aconvc (map2 (fn a => fn b => (a, b)) t1 t2)) (ttdt,ttdt'), + unsolved_PO_tab = Symtab.merge (fn (t1, t2) => List.all Thm.aconvc (map2 (fn a => fn b => (a, b)) t1 t2)) (uPOt,uPOt'), + test_thyps_tab = Symtab.merge (fn (t1, t2) => List.all Thm.aconvc (map2 (fn a => fn b => (a, b)) t1 t2)) (ttht,ttht'), + pre_safe_tac = pst, + pre_normalize_TNF_tac = pnt, + pre_minimize_TNF_tac = pmt, + test_derivation_rules = distinct (op=) (tdr@tdr')}; + + +structure Data = Generic_Data +( + type T = testenv + val empty = initial_testenv + val extend = I + val merge = merge_testenv +); + +val get_testenv = Data.get o Context.Proof; +val map_data = Data.map; + +val get_testenv_global = Data.get o Context.Theory; +val map_data_global = Context.theory_map o map_data; + +fun print_testenv envname ctxt = + let val {test_thm_tab, type_candicates, + cod_term_tab, abstest_data_tab,test_thm_inst_tab,pre_safe_tac, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules} = + rep_testenv (get_testenv ctxt) + val depth = Config.get ctxt depth + val steps = Config.get ctxt steps + val bound = Config.get ctxt bound + val iterations = Config.get ctxt iterations + fun H (n,thm) = if n = envname orelse envname = "" + then [Pretty.str (n^":"), Thm.pretty_thm ctxt thm] + else [] + fun H2(n,thms)= if n = envname orelse envname = "" + then [Pretty.str (n^":"), Pretty.str "======"] @ + map (Thm.pretty_thm ctxt) thms @ + [Pretty.str "======"] + else [] + in [Pretty.str ">>> Testenvironment >>>>>>>>>>>>>>>>>", + Pretty.str "+++ Control Data: +++++++++++++++++++", + Pretty.str ("*** default depth: "^Int.toString(depth)), + Pretty.str ("*** default steps: "^Int.toString(steps)), + Pretty.str ("*** bound: "^Int.toString(bound)), + Pretty.str ("*** iterations: "^Int.toString(iterations)), + Pretty.str "+++ Testtheoremtable: +++++++++++++++"] @ +(* TODO bu: add cterm construction ... *) + maps H (Symtab.dest test_thm_tab) @ + [Pretty.str "+++ Testjudgements: +++++++++++++++++"] @ + maps H2 (Symtab.dest test_thm_inst_tab) @ + [Pretty.str "+++ Testdatatable: ++++++++++++++++++"] @ + maps H2 (Symtab.dest abstest_data_tab) @ + [Pretty.str "<<< Testenvironment <<<<<<<<<<<<<<<<<"] + end |> Pretty.chunks |> Pretty.writeln; + + +fun add_test_case (name,thm) + (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = + Testenv({test_thm_tab=Symtab.update(name,thm)test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_safe_tac=pre_safe_tac, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun del_test_case name (Testenv {test_thm_tab, + type_candicates,cod_term_tab, abstest_data_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + test_thm_inst_tab,pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, + test_derivation_rules}) = + Testenv({test_thm_tab=Symtab.delete_safe name test_thm_tab, type_candicates=type_candicates, + cod_term_tab=cod_term_tab, abstest_data_tab=abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab, pre_safe_tac=pre_safe_tac, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + + + +fun add_abstest_data (name,thm) + (Testenv {test_thm_tab, type_candicates, + cod_term_tab, abstest_data_tab,test_thm_inst_tab,pre_safe_tac, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_normalize_TNF_tac,pre_minimize_TNF_tac, + test_derivation_rules}) + = Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=Symtab.cons_list (name,thm) abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab,pre_safe_tac=pre_safe_tac, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun del_abstest_data name + (Testenv {test_thm_tab, + type_candicates, + cod_term_tab,abstest_data_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + test_thm_inst_tab,pre_safe_tac, + pre_normalize_TNF_tac,pre_minimize_TNF_tac, + test_derivation_rules}) + = Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=Symtab.delete_safe name abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab,pre_safe_tac=pre_safe_tac, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun get_test_thm_tab te = #test_thm_tab (rep_testenv te) +fun get_absdata_tab te = #abstest_data_tab(rep_testenv te) +fun get_test_thm_inst_tab te = #test_thm_inst_tab(rep_testenv te) + + + +fun get_test_data_tab te = #test_data_tab (rep_testenv te) +fun get_unsolved_PO_tab te = #unsolved_PO_tab (rep_testenv te) +fun get_test_thyps_tab te = #test_thyps_tab(rep_testenv te) + + +fun add_test_data_tab (name,ct) + (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = + Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab, + test_data_tab = Symtab.cons_list(name,ct) test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_safe_tac=pre_safe_tac, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + +fun add_unsolved_PO_tab (name,ct) + (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = + Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab, + test_data_tab = test_data_tab, + unsolved_PO_tab = Symtab.cons_list(name,ct) unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_safe_tac=pre_safe_tac, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun add_test_thyps_tab (name,ct) + (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = + Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = Symtab.cons_list(name,ct) test_thyps_tab, + pre_safe_tac=pre_safe_tac, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun set_test_data_tab (name,ctl) + (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = + Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab, + test_data_tab = Symtab.update(name,ctl) test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_safe_tac=pre_safe_tac, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + +fun set_unsolved_PO_tab (name,ctl) + (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = + Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab, + test_data_tab = test_data_tab, + unsolved_PO_tab = Symtab.update(name,ctl) unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_safe_tac=pre_safe_tac, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +fun set_test_thyps_tab (name,ctl) + (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = + Testenv({test_thm_tab=test_thm_tab, + type_candicates=type_candicates, + cod_term_tab=cod_term_tab, + abstest_data_tab=abstest_data_tab, + test_thm_inst_tab=test_thm_inst_tab, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = Symtab.update(name,ctl) test_thyps_tab, + pre_safe_tac=pre_safe_tac, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = test_derivation_rules}); + + +(* In the sequel we also use the term "test judgement" synonymously for "test data statement". *) + +fun test_thm_tab_update thm_tab + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, + abstest_data_tab=tdt, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + test_thm_inst_tab=tjt,pre_safe_tac=pst, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) = + (Testenv{test_thm_tab=thm_tab, + type_candicates=tc, + cod_term_tab=ctt, + abstest_data_tab=tdt, + test_thm_inst_tab=tjt,pre_safe_tac=pst, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + +fun test_thm_inst_tab_update jdmt_tab + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, + abstest_data_tab=tdt, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + test_thm_inst_tab=tjt,pre_safe_tac=pst, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) = + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, + abstest_data_tab=tdt, + test_thm_inst_tab=jdmt_tab, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, + pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + + +fun type_candidates_update (type_candidates) + (Testenv{test_thm_tab=tttab, + type_candicates=tc, + cod_term_tab=ctt, + abstest_data_tab=tdt, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + test_thm_inst_tab=tjt,pre_safe_tac=pst, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) + =(Testenv{test_thm_tab=tttab,type_candicates=type_candidates, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt,pre_safe_tac=pst, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + +fun pre_safe_tac_update (pre_safe_tat) + (Testenv{test_thm_tab=tttab, type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt,pre_safe_tac=pst, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) + =(Testenv{test_thm_tab=tttab,type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt, pre_safe_tac=pre_safe_tat, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + + +fun pre_normalize_TNF_tac_update (pre_normalize_TNF_tac) + (Testenv{test_thm_tab=tttab, type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt,pre_safe_tac=pst, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) + = (Testenv{test_thm_tab=tttab, type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt, pre_safe_tac=pst, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_normalize_TNF_tac=pre_normalize_TNF_tac, + pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}); + + +fun pre_minimize_TNF_tac_update (pre_minimize_TNF_tac) + (Testenv{test_thm_tab=tttab, type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt, pre_safe_tac=pst, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_normalize_TNF_tac=pnt, pre_minimize_TNF_tac=pmt, + test_derivation_rules = tdr}) + =(Testenv{test_thm_tab=tttab, type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt, pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_minimize_TNF_tac=pre_minimize_TNF_tac, + test_derivation_rules = tdr}); + + + + +fun get_pre_safe_tac (Testenv{pre_safe_tac=pst, ...}) = pst; +fun get_pre_normalize_TNF_tac (Testenv{pre_normalize_TNF_tac=pnt, ...}) = pnt; +fun get_pre_minimize_TNF_tac (Testenv{pre_minimize_TNF_tac=pmt, ...}) = pmt; + +fun get_test_derivation_rules (Testenv{test_derivation_rules = tdr, ...}) = map (op !) tdr; + +fun get_test_data ctxt name = let val Testenv {test_thm_inst_tab,...} = (get_testenv ctxt) + in Symtab.lookup test_thm_inst_tab name end + +fun add_test_derivation_rule (tdr) + (Testenv{test_thm_tab=tttab, type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, + test_data_tab, unsolved_PO_tab, test_thyps_tab, + pre_minimize_TNF_tac=pmt, test_derivation_rules = tdrs}) + = (Testenv{test_thm_tab=tttab, type_candicates=tc, + cod_term_tab=ctt, abstest_data_tab=tdt, + test_thm_inst_tab=tjt, pre_safe_tac=pst, pre_normalize_TNF_tac=pnt, + test_data_tab = test_data_tab, + unsolved_PO_tab = unsolved_PO_tab, + test_thyps_tab = test_thyps_tab, + pre_minimize_TNF_tac=pmt, + (* OH MY GOD --- WHAT CRAP IS THIS >>>>> bu *) + test_derivation_rules = (Unsynchronized.ref tdr)::tdrs}); + +(* module setup *) + + + +fun print_test_data_generic f ctxt name index_opt = + let val ctrms = case (Symtab.lookup (f (get_testenv ctxt)) name) of + SOME k => k + | NONE => error "No such test suite" + val print_cterm = Pretty.string_of o (Syntax.pretty_term ctxt) o Thm.term_of + in case index_opt of + NONE => String.concatWith "\n" (map print_cterm ctrms) + |SOME ind => print_cterm (nth ctrms ind) + end + + +val print_test_data = print_test_data_generic get_test_data_tab; +val print_test_case = print_test_data_generic get_test_data_tab; (* temporary hack by bu - + should be own implem with own + table*) + +val print_test_hyps = print_test_data_generic get_test_thyps_tab; + +val print_unsolved_pos = print_test_data_generic get_unsolved_PO_tab; + + +val setup = + prelude_setup #> + wrapper_setup #> + toString_setup #> + setup_code_setup #> + dataconv_code_setup #> + depth_setup #> + steps_setup #> + bound_setup #> + case_steps_setup #> + iterations_setup #> + SMT_setup #> + SMT2_setup #> + pon_setup #> + smt_model_setup #> + use_metis_setup #> + use_smt_setup #> + TestGen_SMT.setup; + +end (*struct *); + +*} + +end diff --git a/src/test-gen/src/main/TestGen.thy b/src/test-gen/src/main/TestGen.thy new file mode 100644 index 0000000..8d6d18e --- /dev/null +++ b/src/test-gen/src/main/TestGen.thy @@ -0,0 +1,1704 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * TestGen.thy --- the core of the HOL-TestGen System. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2010 ETH Zurich, Switzerland + * 2008-2015 Achim D. Brucker, Germany + * 2009-2017 Universite Paris-Sud, France + * 2015-2017 The University of Sheffield, UK + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* The TestGen Core*} + +theory TestGen +imports + Complex_Main + Term_Tactics (*ML*) + clocks (*ML*) + log (* ML *) + TestEnv (* ML *) + BackendUtils + RandomBackend + QuickCheckBackend + (*SMTBackend*) + + +keywords "mk_test_suite" :: "qed_global" + and "test_spec" :: "thy_goal" + and "gen_test_data" :: "thy_decl_block" (*"thy_script"*) (* really ? *) + and "print_testenv" :: diag + and "print_conc_tests" :: diag + and "print_abs_tests" :: diag + and "print_thyps" :: diag + and "print_upos" :: diag + and "set_pre_safe_tac" :: diag + and "set_pre_normalize_TNF_tac" :: diag + and "set_pre_minimize_TNF_tac" :: diag +begin + + +section{*General Framework for Unit-Test*} + + +definition PO :: "bool \ bool" + where "PO x = x" + +lemma PO_norm1 [simp]: "PO(PO A) = PO A" by(simp add: PO_def) +lemma PO_norm2 [simp]: "(\ PO A) = (PO(\ A))" by(simp add: PO_def) +lemma PO_norm3 [simp]: "(A \ PO B) = (PO(A \ B))" by(simp add: PO_def) +lemma PO_norm4 [simp]: "(PO A \ B) = (PO(A \ B))" by(simp add: PO_def) +lemma PO_norm5 [simp]: "(A \ PO B) = (PO(A \ B))" by(simp add: PO_def) +lemma PO_norm6 [simp]: "(PO A \ B) = (PO(A \ B))" by(simp add: PO_def) +lemma PO_norm7 [simp]: "(A \ (PO B)) = (PO(A \ B))" by(simp add: PO_def) +lemma PO_norm8 [simp]: "((PO A) \ B) = (PO(A \ B))" by(simp add: PO_def) +lemma PO_norm9 [simp]: "(A = (PO B)) = (PO(A = B))" by(simp add: PO_def) +lemma PO_norm10 [simp]: "((PO A) = B) = (PO(A = B))" by(simp add: PO_def) +lemma PO_norm11 [simp]: "(\x. (PO (A x))) = (PO(\x. A x))" by(simp add: PO_def) +lemma PO_norm12 [simp]: "(\x. (PO (A x))) = (PO(\x. A x))" by(simp add: PO_def) + +lemma PO_I : "(PO P) \ Q \ P \ Q" by(simp add: PO_def) (* internal use only *) +lemma PO_rev[elim!]: "\ PO False \ PO False \ False" by auto +lemma PO_grow : "PO(P \ Q) \ R \ (PO P) \ (Q \ R)" by(simp add: PO_def) (* internal use only *) + + +definition THYP :: "bool \ bool" + where "THYP x = x" + +lemma THYP_triv [simp]: "THYP True = True" by(simp add: THYP_def) + +lemma THYP_I : "A \ THYP A" by(simp add: THYP_def) + +lemma THYP_D : "THYP A \ A" by(simp add: THYP_def) + +lemma THYP_E : "\ THYP A; (A \ R) \ \ R" by(simp add: THYP_def) + +lemma THYP_spec : "THYP(\x. P x) \ THYP(P x)" by(simp add: THYP_def) + +lemma THYP_app1 : "\ THYP(A \ B); A \ \ B" by(simp add: THYP_def) + (* to be used with etac in order to apply uniformity hyp *) + +lemma THYP_app1_rev : "\ A; THYP(A \ B) \ \ B" by(simp add: THYP_def) + (* to be used with etac in order to apply uniformity hyp *) + +lemma THYP_app2 : "\ THYP(A); A \ B \ \ B" by(simp add: THYP_def) + (* to be used with RS in order to apply regularity hyp *) + + +lemma THYP_E_reg : "\ size x \ k \ B; THYP((k < size x) \ B)\ \ B" + by(simp add: THYP_def, auto) + (* to be used with rtac in order to introduce regularity hyp *) + + +definition RSF :: "bool" + where "RSF = True" -- {* just to denote random solver failures *} + +lemma RSF_E : "\ RSF \ P \ \ P" + by(auto simp: RSF_def) + + +section {* Tool Setup *} + + +ML{* +infix 1 |$> + +infix 6 DOWNTO + +infix 0 COLLECT_SS + +signature TESTGEN = +sig + val mt_testenv : TestEnv.testenv + + val get_test_thm : theory -> Symtab.key -> thm list + + val trace : bool Config.T + (* switches on debug information *) + + val no_uniformity : bool Config.T + (* switches off uniformity generation *) + + val profiling : bool Config.T + (* switches on timer profiler *) + + val no_finalize : bool Config.T + (* don't generate constraints (POs) *) + + val is_neither_thyp_nor_PO : term -> bool + + val |$> : thm * tactic -> thm + val TRY' : (int -> tactic) -> int -> tactic + val COND' : (int -> thm -> bool) -> (int -> tactic) -> (int -> tactic) -> int -> tactic + val DOWNTO : int * int -> (int -> tactic) -> tactic + val DEPTH_SPLIT : (int -> tactic) -> int -> tactic + val REPEAT' : ('a -> tactic) -> 'a -> tactic + val ALLCASES : (int -> tactic) -> tactic + val to_TNF : Proof.context -> string list -> int -> thm -> thm Seq.seq + val uniformityI_tac : Proof.context -> string list -> int -> tactic + val CLOSURE : (int -> tactic) -> int -> thm -> thm Seq.seq + val case_tac_typ : Proof.context -> string list -> int -> thm -> thm Seq.seq + + val ONLY_POS : (int -> tactic) -> int -> tactic + + val finalize_tac : Proof.context -> string list -> tactic + + val abs_data_tac : Proof.context -> thm list -> int -> tactic + + val mp_fy : Proof.context -> int -> tactic + val all_ify : Proof.context -> string list -> int -> tactic + val thyp_ify : Proof.context -> int -> tactic + + val var_hyp_subst_tac : Proof.context -> int -> tactic + + val gen_test_case_tac : Proof.context -> string list -> tactic + (* "gen_test_case_tac ctxt prognames" is a + tactic that converts all subgoals of current proof state into + test-normal form - i.e. a conjunctive normal form + (instead of DNF), where the conclusions have the + form "P(progname x1 .. xn)", where progname does + not occur in the premisses (the "constraints") + of the test. As a prerequisite, a data-separation + theorem of depth k is resolved for up to >steps< number + of free variables in the current proof-state. + + NOTE: At present, the generation of data-separation + lemmas is limited to "real" free variables and will + not work for parameters of a test specification. Further, + the proof-state is assumed to have only one goal. + *) + + (* Included for complete backward compatibility *) + val gen_test_case_old_tac : Proof.context -> string list -> tactic + + val gen_test_data : string -> Context.generic -> Context.generic + (* "gen_test_data name context" constructs ground type instances of + test theorems associated to "name". In a further step, + "abstract" or local test data were resolved into the resulting test + theorems (if available in the testenv under "name"), remaining + meta variables were instantiated by a "random_solver" + that attempts to generate an instance and tries to solve + the constraints of a goal up to "iterations" times. + + The resulting simplified and "solved" test data theorems + were stored again in the test environment. + *) + + (* TODO: remove these three functions *) + val get_test_data : theory -> string -> thm list + (* get_test_data thy name extracts the test judgements + (i.e. the premisses of test data statement, that are not + test hypothesis or proof obligation.). *) + + val get_test_hyps : theory -> string -> thm list + (* get_test_hyps thy name extracts the test hypothesis + of a !!test data statement!!, not a test theorem. *) + + val get_pos : theory -> string -> thm list + (* extract remaining constraints (proof obligations) *) + + val animate : theory -> string -> unit + (* ground test data statements may be animated, if "P(progname x1 .. xn)" + is executable. "animate" presents test data statements by + unfolding "progname x1 .. xn" and simplifying on a stepwise + basis. (* for the moment, it's just asm_simp_tac with the + standard evaluation trace, in the future its more or less + the same except with slightly improved output *) + *) + val discharge_POs : string -> Proof.context -> tactic + + val setup: theory -> theory +end +*} + + +ML{* + +structure TestGen : TESTGEN = +struct + +open Thm; +open HOLogic; +open BNF_LFP_Compat ; +open Old_Datatype_Aux +(* open TestEnv; *) +(* open Clocks; *) +(* open BNF_LFP_Compat *) +open Int; +open List; +open Ctr_Sugar_Util ; (* rtac dtac etc. *) +open Rule_Insts; (* res_inst_tac etc *) + + + +val (trace, trace_setup) = Attrib.config_bool @{binding testgen_trace} (K false); +val (no_uniformity, no_uniformity_setup) = Attrib.config_bool @{binding no_uniformity} (K false); +val (profiling, profiling_setup) = Attrib.config_bool @{binding testgen_profiling} (K false); +val (no_finalize, no_finalize_setup) = Attrib.config_bool @{binding testgen_no_finalize} (K false); + + +(* ************************************************************************* *) +(* access to datatype package ... *) +(* ************************************************************************* *) + +fun isDataType thy (Type(tn,_)) = is_some (BNF_LFP_Compat.get_info thy [] tn) (* CHECK ME: empty preference list ok ? *) + |isDataType _ _ = false; + + +fun get_case_split_of_dt thy type_name = #exhaust (BNF_LFP_Compat.the_info thy [] type_name); + +fun get_nchotomy_of thy type_name = #nchotomy (BNF_LFP_Compat.the_info thy [] type_name); + + +(* ************************************************************************* *) +(* access to record package ... *) +(* ************************************************************************* *) + +fun isRecordType thy (Type(tn,_)) = is_some (Record.get_info thy tn) + |isRecordType _ _ = false; + +fun get_case_split_of_record thy type_name = #cases(the (Record.get_info thy type_name)); + + +(* ************************************************************************* *) +(* term computations ... *) +(* ************************************************************************* *) + +fun max_depth (Free h) = (0,[h]) + |max_depth (Const h)= (0,[]) + |max_depth (Abs(s,tt,t)) = max_depth t + |max_depth (s $ t) = let val (k,S) = max_depth (s) + val (k',S') = max_depth (t) + in if k > k'+1 then (k,S) + else if k = k'+1 then (k, S @ S') + else (k'+1, S') + end + |max_depth (_) = (0,[]); + +(* relies on definitions in Testing.thy ... *) +val is_thyp = exists_Const (fn (@{const_name THYP},_) => true | _ => false) + +fun is_po (Const (@{const_name Trueprop},_) $ (Const (@{const_name PO},_) $ _)) = true + |is_po _ = false; + +fun is_neither_thyp_nor_PO t = (not (is_thyp t)) andalso (not (is_po t)) + +fun is_test_data t = (is_neither_thyp_nor_PO t) andalso not (exists_subterm is_Var t) + +fun dest_TruepropAll (Const ("all", _) $ Abs(s,ty, t)) = + (Const (@{const_name "HOL.All"}, (ty --> HOLogic.boolT) --> HOLogic.boolT) + $ Abs(s,ty , dest_TruepropAll t)) + |dest_TruepropAll t = HOLogic.dest_Trueprop t; + + +fun exists_prog prog = Term.exists_subterm + (fn Free (x, _) => member (op =) prog x + | Const (c, _) => member (op =) prog (Long_Name.base_name c) (* FIXME proper internal name!? *) + | _ => false); + +(* complex type instantiations ... *) + +fun groundT_thm context bound candTs thm = + let val ctxt = Context.proof_of context + val thy = Proof_Context.theory_of ctxt + val thm = Thm.varifyT_global thm + val tvars = Term.add_tvars (list_comb(Thm.concl_of thm,Thm.prems_of thm)) [] + fun ctyp_of_sort x so t = if Sign.of_sort thy (t,so) + then (SOME((x,so), Thm.ctyp_of ctxt t) + handle _ => NONE) (* FIXME avoid handle _ *) + else NONE + fun prodS [] _ = [] + |prodS (a::R) B = (map(fn x => a::x) B) @ (prodS R B) + + fun compute_instsT [] = [[]] + |compute_instsT ((x,so)::R) = prodS (List.mapPartial (ctyp_of_sort x so) candTs) + (compute_instsT R) + in + map (fn x => Drule.instantiate_normalize (x,[]) thm) + (Library.take bound (compute_instsT tvars)) + end; + +(* test + + val thm' = groundT_thm thy + (#type_range_bound(rep_testenv te)) + (#type_candicates(rep_testenv te)) + thm + + *) + + + +(* *********************************************************************** *) +(* *) +(* SOME more universal tacticals -> tctical.ML *) +(* *) +(* *********************************************************************** *) + +fun thm |$> tac = + case Seq.pull (tac thm) of + SOME (m,_) => m + |_ => error"|$> : TACTIC FAILED" + +fun TRY' tac n = TRY(tac n) (* standard combinator, + not implemented so far in Isabelle *) + +fun REPEAT' tac n = REPEAT (tac n) + +fun COND' testfun thenf elsef x prf = + if testfun x prf then thenf x prf else elsef x prf; + + +fun (k DOWNTO (n:int)) = fn tac => (fn st => + let fun doall x = if x < n + then all_tac + else ((tac x) THEN doall(x-1)) + val m = Thm.nprems_of st + in if k <= 0 orelse k > m + then all_tac st + else doall k st + end) + + +fun DEPTH_SPLIT tac n st = +(* PRE tac should only exhausting - i.e. increase + the number of subgoals finitely many times + POST if tac never fails, DEPTH_SPLIT tac will not fail. + *) + let val m = Thm.nprems_of st + val res = st |$> (tac n) + val m' = Thm.nprems_of res + in if m' > m + then ((n + m' - m) DOWNTO n)(DEPTH_SPLIT tac) (res) + else all_tac res + end + +fun SOLVE_ASMS ctxt tac n state = + let val goal = Logic.get_goal (Thm.prop_of state) n + val params = Logic.strip_params goal + val prems = map HOLogic.dest_Trueprop (rev(Logic.strip_imp_prems goal)) + val eqTrueI2 = @{lemma "[|P;P |] ==> True" by auto} + fun decomp thm n = let val mm = (thm RS conjunct1) + in (dtac ctxt (mm RS eqTrueI2) n) THEN + (etac ctxt @{thm TrueE} n) THEN + (decomp (thm RS conjunct2) n) end + handle THM _ => (dtac ctxt (thm RS eqTrueI2) n) THEN + (etac ctxt @{thm TrueE} n) + + in case prems of + [] => all_tac state + |a::r => let val prem = List.foldl HOLogic.mk_conj a r + val prem'= (Term.subst_bounds ((rev(map Free params)),prem)) + val thm = Goal.prove ctxt [] [] (HOLogic.mk_Trueprop prem') tac + in (decomp thm n) state end + handle _ => no_tac state (* could not solve asms ... *) (* FIXME avoid handle _ *) + end + + +fun auto_solver thms {prems: thm list, context: Proof.context} state = + let val thm' = state |$> (TRY(safe_tac (context delrules [notI]))) + val goals = Thm.nprems_of thm' + fun solve_thm thm = FIRSTGOAL (fn n => (rtac context thm n) THEN + (IF_UNSOLVED (SOLVE(auto_tac context)))) + fun solve_thms [] = no_tac + |solve_thms (a::R) = solve_thm a ORELSE solve_thms R + in if goals > 0 then (solve_thms thms) thm' else all_tac thm' + end + + +(* *********************************************************************** *) +(* *) +(* SOME Testing specific tacticals *) +(* *) +(* *********************************************************************** *) + +(* PRE n>0 and n <= nprems_of thm *) +fun is_THYP n thm = is_thyp(Logic.nth_prem(n, Thm.prop_of thm)) + +fun is_PO n thm = is_po(Logic.nth_prem(n, Thm.prop_of thm)) + +fun is_TEST n thm = is_neither_thyp_nor_PO(Logic.nth_prem(n, Thm.prop_of thm)) + +fun ONLY_CASES tac = COND' is_TEST tac (K all_tac) + +fun ALLCASES_before k tac = (k DOWNTO 1) (ONLY_CASES tac) + +fun ALLCASES tac thm = (ALLCASES_before (Thm.nprems_of thm) tac thm) + +fun ONLY_POS tac = COND' is_PO tac (K all_tac) + + +fun unfold ctxt rules = asm_full_simp_tac (put_simpset HOL_basic_ss ctxt addsimps rules) + +(* Optimization potential: safe_tac against something more + specialized should do ... *) +fun if_splitter ctxt cs = asm_full_simp_tac (fold Splitter.add_split @{thms if_splits} (put_simpset HOL_basic_ss ctxt)) + THEN' TRY' (fn n => SELECT_GOAL (safe_tac cs) n) + +(* *********************************************************************** *) +(* *) +(* basic tactics ... *) +(* *) +(* *********************************************************************** *) + +fun strip_tac ctxt i = REPEAT (resolve_tac ctxt [impI, allI] i); (* declared Legacy in 2013-2*) + +(* Pre : A1 ==> .. ==> An ==> An+1 + Post: A1 --> .. --> An --> An+1 *) +fun mp_fy ctxt n = (REPEAT(etac ctxt rev_mp n)); (* Version replaced by more efficient one *) +fun mp_fy ctxt n = (REPEAT(ematch_tac ctxt [rev_mp] n)); + +fun all_ify ctxt excpt n state = + let val subgoal = Logic.nth_prem(n, Thm.prop_of state) + val params = Logic.strip_params subgoal + val frees = Term.add_frees subgoal [] + fun sbs(n,t) = res_inst_tac ctxt [((("x",0), Position.none), n)] [] spec + val vars = filter_out (member (op =) excpt o fst) (params @ frees) + in (if null vars + then all_tac + else (List.foldr(fn(x,ta) => (sbs x n) THEN ta) (prune_params_tac ctxt) vars) + (* NEW PATCH rev 12197*) + THEN + all_ify ctxt excpt n (* sbs can fail to eliminate *all* variables + due to alpha-renaming of parameters in + res_inst_tac. This recursive call makes + all_ify exhaustive.*) + (* END NEW PATCH rev 12197*) + ) + (state) + end + + +(*FIXME Free's are actually local consts, and cannot be closed in general + without disrupting Isar locality*) +fun allify_frees_tac ctxt excpt = SUBGOAL (fn (subgoal, i) => + let val thy = Proof_Context.theory_of ctxt + val a = (("'a", 0), @{sort type}) + fun spec_tac (x, T) = + rtac ctxt (Thm.instantiate ([(a, Thm.ctyp_of ctxt T)], [((("x", 0), T), Thm.cterm_of ctxt (Free (x, T)))]) + (Thm.transfer thy @{thm spec})) i + val vars = filter_out (member (op =) excpt o fst) (Term.add_frees subgoal []) + in + if null vars then all_tac + else EVERY (map spec_tac vars) + end) + +fun thyp_ify ctxt n = + EVERY[mp_fy ctxt n, all_ify ctxt [] n, rtac ctxt (@{thm THYP_def} RS @{thm iffD1}) n] + +fun vars_to_frees t = let + val termvars = Term.add_vars t [] + val freenames = Term.add_free_names t [] + val varnames = map (fn ((s, a),_) => s^(Int.toString a)) termvars + val newfreenames = Name.variant_list freenames varnames + val insts = map (fn (x as (_, T), freename) => (Var x, Free (freename, T))) (termvars ~~ newfreenames) +in + Term.subst_atomic insts t +end + +fun convert_goals_to_metahyps (test:(term -> bool)) thm = + let val thy = Thm.theory_of_thm thm + in map ((fn prop => Thm.forall_elim_vars 0 (Thm.assume (Thm.global_cterm_of thy prop))) o vars_to_frees) + (filter test (Thm.prems_of thm)) + end; + + +fun instantiate_tac ctxt insts n thm = + let val goal = Thm.global_cterm_of (Thm.theory_of_thm thm) (Logic.nth_prem(n, Thm.prop_of thm)) + in (rtac ctxt (Drule.instantiate_normalize ([],insts)(Thm.trivial goal)) n +) thm end + +fun var_hyp_subst_tac ctxt no state = + let val prms = Logic.strip_imp_prems(Logic.nth_prem(no, Thm.prop_of state)); + fun is_eq (Const (@{const_name HOL.eq}, _) $ (Var _) $ _) = true + |is_eq (Const (@{const_name HOL.eq}, _) $ _ $ (Var _)) = true + |is_eq _ = false + val i = Library.find_index(fn (Const(@{const_name Trueprop},_)$X) => is_eq X) + (prms) + fun mks thm = if i >= 0 + then ((rotate_tac i no) THEN etac ctxt @{thm thin_refl} no) thm + else no_tac thm + in ((REPEAT mks) + THEN + (full_simp_tac ctxt no)) state + end; + + +(* Code seems to be superseeded by Goals.PARALLEL_GOALS in Isabelle2013-2 ... *) +(* parallel tactic application, without any sanity checks *) + +local exception FAILED of unit in + +fun PARALLEL tac st = + let + fun try_tac g = + (case SINGLE tac g of + NONE => raise FAILED () + | SOME g' => g'); + val goals = Drule.strip_imp_prems (Thm.cprop_of st); + val results = Par_List.map (try_tac o Goal.init) goals; + in all_tac st (* ALLGOALS (fn i => retrofit i 1 (nth results (i - 1))) st *) + end + handle FAILED () => Seq.empty; + +end; + + + +(* A specialized map-reduce pattern for constraint-solving in TestGen. The + PO's were extracted, attempts to their solution parallized, and the retrofit + of the results into the test-theorem is again sequential, since it instantiates + the meta-variables. *) +fun PARALLEL_TRYSOLVE_POs ctxt test_solve_tac st = + let + val pon = Config.get ctxt TestEnv.pon + fun filter_pos filter S = + let fun h _ _ [] = [] + | h f n (a::S) = if f a then (n,a)::(h f (n+1) S) else (h f (n+1) S) + fun h_pon _ _ _ [] = [] + | h_pon f n k (a::S) = if f a then (if k = pon then (n,a):: nil + else (h_pon f (n+1) (k+1) S)) + else (h_pon f (n+1) k S) + in if pon = 0 then h filter 1 S else h_pon filter 1 1 S end; + fun try_solve ct = ((Option.map (Goal.finish ctxt)) o + (SINGLE (test_solve_tac ctxt))) + (Goal.init ct); + val goals = Drule.strip_imp_prems (Thm.cprop_of st); + val po_trms = (filter_pos (is_po o Thm.term_of) goals); + val jdmts = Par_List.map (fn(n,ct) => (n, try_solve ct)) po_trms + + in Seq.succeed(foldr (fn ((k, SOME a),thm) => (a RSN (k, thm)) + |((_, NONE),thm) => thm) st jdmts) + end + + (* val PARALLEL = PARALLEL_GOALS *) (* Experiment. See above. bu *) + + +(* applies exhaustively tactic tac ctxt i on subgoal i and all its descendants. + tac can solve current goal i. + PRE: tac must eventually terminate, also in all descendants ( otherwise CLOSURE loops). + PRE: current goal i must be in {1 .. nprems_of st}. + PRE: ctxt must be the ProofContext of st. +*) +fun CLOSURE tac i st = + let fun closure i k st = if i > k then all_tac st + else let val m = Thm.nprems_of st + in case SINGLE (tac i) st of + NONE => closure (i+1) k st + | SOME st' => let val m' = Thm.nprems_of st' + in if m' = m-1 (* tac eliminated subgoal *) + then closure i (k-1) st' + else closure i (k+(m'-m)) st' + (* superfluous case distinct *) + end + end + in closure i i st end + + + +(* *********************************************************************** *) +(* *) +(* special tactics for TestGen *) +(* *) +(* *********************************************************************** *) + +fun cpat ctxt str = Thm.cterm_of ctxt (Proof_Context.read_term_pattern ctxt str); + +(*PRE : Must not be THYP or PO *) +(* Known Bug: does not work for arbirary mixtures between HOL and Pure Quantifications *) +fun uniformityI_tac ctxt prog = + (if Config.get ctxt no_uniformity + then (fn _ => all_tac) + else SELECT_GOAL + let + val cert = Thm.cterm_of ctxt; + + val core_tac = + SUBGOAL (fn (term, i) => + let fun conv_All (Const (@{const_name All}, ty) $ Abs(n, ty', tt)) = + (Const (@{const_name Ex}, ty) $ Abs (n, ty', conv_All tt)) + |conv_All (Const (@{const_name HOL.implies}, _) $ P $ Q) = + (Const (@{const_name HOL.conj}, [boolT,boolT]--->boolT) $ P $ conv_All Q) + | conv_All tt = tt; + (* val tt = dest_TruepropAll term; *) + val tt = dest_Trueprop term; + val thyp = mk_Trueprop (@{const THYP} $ (@{const HOL.implies} $ conv_All tt $ tt)); + in rtac ctxt (Thm.instantiate ([], [((("psi",0),propT), cert thyp)]) cut_rl) i end); + + val x = cpat ctxt "?x::?'a"; + val T = Thm.typ_of_cterm x; + val count = Unsynchronized.ref 0; + fun new_var () = cert (Var (("?X" ^ Int.toString (Unsynchronized.inc count) ^ "X", 0), T)); + fun exI_tac i st = rtac ctxt (Thm.instantiate ([], [((("x",0),T), new_var ())]) @{thm exI}) i st; + fun check_tac' (t,i) = case t of + @{term Trueprop} $ t' => check_tac' (t',i) + | @{term HOL.conj} $ u $ _ => if exists_prog prog u then + no_tac + else all_tac + + val check_tac = SUBGOAL check_tac' + val intro_PO = (rtac ctxt @{thm PO_I} 1) + THEN (check_tac 1) + THEN REPEAT_DETERM ((rtac ctxt @{thm PO_grow} 1) + THEN (check_tac 1)) + + val finish_tac = (intro_PO THEN (rtac ctxt @{thm "conjI"} 1) THEN (strip_tac ctxt 2)) ORELSE (strip_tac ctxt 1) + in + EVERY [mp_fy ctxt 1, all_ify ctxt prog 1, + core_tac 1, etac ctxt @{thm THYP_app1} 1, + (REPEAT_DETERM o exI_tac) 1, + finish_tac] + end) + + +(* ************************************************************************* *) +(* data separation lemma generation ... *) +(* ************************************************************************* *) + + +fun refine_data_separation_lemma changed ctxt bal prem_no cands thm = +(* - bal controls of the refinement is in balancing mode + (only parameters whose occurrence in the term is + has strictly less than maximum height) + or not. + - prem_no indicates the number of the premise + - cands is the global list of candidates. + In balancing mode, parameters in maximal depth + position were eliminated. + - PRE maxidx is 0 for all mvars + - POST maxidx is 0 for all mvars + + *) +let + val thy = Proof_Context.theory_of ctxt + val prem = Logic.nth_prem(prem_no, Thm.prop_of thm) + val params = Logic.strip_params(prem) + val cands' = if bal then + let val term = HOLogic.dest_Trueprop(hd + (Logic.strip_assums_hyp prem)) + val (k,mp) = max_depth(Term.subst_bounds + ((rev(map Free params)),term)) + in filter(fn p => forall(fn p'=> not(p=p')) (mp)) cands end + else cands; + + val var_no = find_index (fn(s,t)=> exists(fn (name,ty)=> + s = name andalso t = ty) cands') + params + val (name,ty) = if var_no > ~1 then List.nth(params,var_no) + else raise (ERROR("no param found")) + val Type(tn,_) = ty; + val m = get_case_split_of_dt thy tn; + val n = m RSN (prem_no, thm); + (* pre: all occurrences of ?indname t1 ... tn have the same + arguments and types and occur in a critical pattern : + ?indname t1 ... tn = C x1 ... xn + ... + The Thm.adjust_maxidx_thm reflects the fact, that only one + MVar y should have been left - the others are erased due to + hypsubst internally + *) + fun find_mvar_pat indname goal_no thm = + let val goal = Logic.get_goal thm goal_no + val params = Logic.strip_params goal + fun cc(Const(@{const_name Pure.imp}, _) + $ (Const(@{const_name Trueprop}, _) + $ (Const(@{const_name HOL.eq},t) $ A $ B)) + $ C) = (case strip_comb(subst_bounds( + rev(map Free params), A)) of + (Var(s,_),S) =>(if s = indname + then (map dest_Free S, head_of B) + else cc(C)) + | (_, _) => cc C) + | cc _ = error("find_mvar_pat: No pattern found") + in cc (strip_all_body goal) end + fun lifter name S = + let fun ll k [] = if 0 <=k then Bound(k) + else error"find_mvar:impossible lifter" + |ll k ((s,t)::R) = + if s = name then Abs(s,t, ll 0 R) + else Abs(s,t, ll (k+1) R) + in ll (~(length S)) S end + fun typ_of_lifter (name:string) S = + let fun tll S [] = error"find_mvar:impossible type for lifter" + |tll S ((s,t)::R) = + if name = s + then ((rev S)@(t::(map snd R)), t) + else tll (t::S) R + in (op --->)(tll [] S) end + val (lifter_params, constr) = find_mvar_pat ("y",1) prem_no (Thm.prop_of n) + (* ("y",1) is a fact from the invariant maxidx=0 (see below) *) + val subst0 = lifter name lifter_params + val ty_lift = fastype_of subst0 + val ty_constr = fastype_of constr + val subst = Thm.cterm_of ctxt subst0 + val ty_lift_body = body_type ty_lift + val ty_lift_constr = body_type ty_constr + val tyenv = Sign.typ_match thy (ty_lift_constr,ty_lift_body) Vartab.empty + handle Type.TYPE_MATCH => Vartab.empty + (* Hack that works for cases, where non-recursive positions in a type + were GROUND types (like color in RBT_tree). *) + fun tysubs2ctyps(x,(s,t)) = ((x,s),Thm.ctyp_of ctxt t) (*(Thm.ctyp_of ctxt (TVar(x,s)),Thm.ctyp_of ctxt t) *) + val tsubst = map tysubs2ctyps (Vartab.dest tyenv) + val () = (changed:=true) + val var = (("y",1),ty_lift) + val result = (Drule.instantiate_normalize (tsubst, [(var,subst)]) n) + |$> (ALLGOALS(fn n => TRY(hyp_subst_tac_thin true ctxt n))) + |$> prune_params_tac ctxt +in Thm.adjust_maxidx_thm 0 result + (* establishes POST and makes this fact therefore invariant. *) + (* More safe, but also more expensive variant: standard result. *) +end + +(* This is the core procedure of the data_separation_lemma generation, and, + in a way, the core of the test case generator itself. + + It proceeds as follows: in a given theorem thm (usually an exhaustion lemma + itself), the premisses (having the form !!x1..xn. A x1 .. xn ==> B in + general) were selected and "refined" in a "sweep". This means that in a + sweep, for each premisse an xi is selected (which must have a data-type, + say ti) and the exhaustion theorem of ti is forward resolved into this + premisse; cleaning it up (by prems_hyp_subst_tac) also erases xi as + parameter of this premisse. + In a next sweep, another xj may be selected and so forth, up to the moment + were no parameters remained in the premisse that existed before the initial + sweep (this the list parameters is the "cand" (candidates) x1 .. xj for + all premisses which precomputed.) + + Then, the candidate list may be recomputed and the process repeated up to + n times. Since any group of sweeps erases one generation of parameters, + the parameter n corresponds to "depth+1" of the data term occuring in the + premisse increases, i.e. from: + + [| ?y = Leaf ==> ?P; + !!a tree1 tree2. ?y = Node a tree1 tree2 ==> ?P |] + ==> ?P + + we get: + + [| ?y = Leaf ==> ?P; !!a. ?y = Node a Leaf Leaf ==> ?P; + !!a aa tree1 tree2a. ?y = Node a Leaf (Node aa tree1 tree2a) ==> ?P; + !!a aa tree1a tree2a. ?y = Node a (Node aa tree1a tree2a) Leaf ==> ?P; + !!a aa tree1a tree2a ab tree1 tree2b. + ?y = Node a (Node aa tree1a tree2a) (Node ab tree1 tree2b) ==> ?P + |] ==> ?P + + This picture is slightly complicated by the fact that Isabelle does not + change the parameters, but may reintroduce parameter erased by a previous + sweep during the forward resolution step. Thus, the procedure may loop. + + Instead of not erasing the parameters during lemma-refinement (this + leads to very large liftings of the meta-variables over these + parameter lists and is thus expensive), we chose to use + a particular test for "maximal" parameters in order to avoid this problem. + + *) +fun nrefine_data_separation_lemma changed ctxt 0 thm = thm + | nrefine_data_separation_lemma changed ctxt n thm = + let + val thy = Proof_Context.theory_of ctxt + val isaDataType = fn(_,ty)=> isDataType thy ty + val thm_params = maps Logic.strip_params (Thm.prems_of thm) + val cands = distinct (op=) (filter isaDataType thm_params) + fun rds b(prno,t) = refine_data_separation_lemma changed ctxt b prno cands t + handle _ => t (* FIXME avoid handle _ *) + val () = (changed:=false) + fun sweep b th = List.foldr (rds b) th (1 upto (length(Thm.prems_of th))) + val thm1 = sweep false thm (* debalancing phase *) + fun repeat_till_stable thm = let val () = (changed:=false) + val thm' = sweep true thm + in if !changed + then repeat_till_stable thm' + else thm' + end + in nrefine_data_separation_lemma changed ctxt + (n-1) + (if !changed + then thm1 (* debalancing phase had no effect *) + else repeat_till_stable thm1) (* balancing phase *) + end; + +(* NOTE: Currently, this core routine is restricted to + "real free" variables; parameters will not be + considered as a simple means to avoid looping ... *) +fun data_separation_tac ctxt var no state = + let + val thy = Proof_Context.theory_of ctxt + val depth = Config.get ctxt TestEnv.depth + val (na,Type(tn,_)) = var + fun ds_thm tn = nrefine_data_separation_lemma (Unsynchronized.ref false) + ctxt depth + (get_case_split_of_dt thy tn) + + val data_sep_thm = Drule.export_without_context (ds_thm tn) + val brchs = length(Thm.prems_of data_sep_thm) + in ((res_inst_tac ctxt [((("y",0),Position.none),na)] [] data_sep_thm no) + THEN + (res_inst_tac ctxt [((("x",0),Position.none),na), + ((("k",0),Position.none),Int.toString depth)] + [] + @{thm THYP_E_reg} + (no + brchs - 1)) + THEN (thin_tac ctxt (na^"= _") [] (no+brchs))) state + end; + +(* Copied from Isabelle2011 old_term.ML sources, for providing old variable order *) +(*Accumulates the Frees in the term, suppressing duplicates.*) +fun add_term_frees (t, frees: term list) = case t of + Free _ => Ord_List.insert Term_Ord.term_ord t frees + | Abs (_,_,body) => add_term_frees(body,frees) + | f$t => add_term_frees (f, add_term_frees(t, frees)) + | _ => frees; + +fun term_frees t = add_term_frees(t,[]); + +(* Included for complete backward compatibility *) +fun data_separation_old_tac ctxt depth no state = + let + val thy = Proof_Context.theory_of ctxt + val prem = Logic.nth_prem(no, Thm.prop_of state) + val params = Logic.strip_params(prem) + val term = HOLogic.dest_Trueprop(Logic.strip_assums_concl prem) + val frees = map dest_Free (term_frees term) +(* val cands = filter (fn (_,ty) => isDataType thy ty) (params@frees) *) + val cands = filter (fn (_,ty) => isDataType thy ty) (frees) + fun ds_thm tn = + nrefine_data_separation_lemma (Unsynchronized.ref false) ctxt depth (get_case_split_of_dt thy tn) + in (case cands of + [] => no_tac + | (na,Type(tn,_))::_ => + let val data_sep_thm = Drule.export_without_context (ds_thm tn) + val brchs = length(Thm.prems_of data_sep_thm) + in (res_inst_tac ctxt [((("y",0),Position.none),na)] [] data_sep_thm no) + THEN (res_inst_tac ctxt [((("x",0),Position.none),na), + ((("k",0),Position.none),Int.toString depth)] [] + @{thm THYP_E_reg} + (no + brchs - 1)) + THEN (thin_tac ctxt (na^"= _") [] (no+brchs)) + end) + (state) +end + + +(* "Small", non-recursive Version of gen_test_cases: + (TODO for future releases: Integration into the testgen-Kernel). + searches parameter in goal no with typename tname and performs case-split on this + parameter. Note that the type-name must be the full name. + PRE: current goal i must be in {1 .. nprems_of st}. + PRE: ctxt must be the ProofContext of st. *) + (* op member : ('a * 'b -> bool) -> 'b list -> 'a -> bool *) + + fun case_tac_typ ctxt tnames no thm = + let + val prem = Logic.nth_prem(no, Thm.prop_of thm) + val params = Logic.strip_params(prem) + val max = Thm.nprems_of thm + val params = params (* @ free variables TODO !!! *) + fun ty_search (_,Type(s,_)) = member (op =) (tnames) (s) + |ty_search (_,_) = false + fun REP tac st = let val max' = Thm.nprems_of st + in (max'-max+no DOWNTO no) tac st end + in case List.find ty_search params of + SOME (X,_) => EVERY[Induct_Tacs.case_tac ctxt X [] NONE no , + REP (hyp_subst_tac_thin true ctxt), + unfold_tac ctxt [@{thm triv_forall_equality}] ] thm + (* probably not the most efficient - try simp only *) + | NONE => no_tac thm + end; + + + +(* ************************************************************************* *) +(* normal form computation ... *) +(* *************************** ********************************************** *) + +fun to_TNF ctxt prog no state = + let + val thy = Proof_Context.theory_of ctxt; + val term = Thm.prop_of state + val prems = Logic.prems_of_goal term no + val concl = Logic.concl_of_goal term no + fun term_string_of_goal_no () = (Pretty.unformatted_string_of o (Syntax.pretty_term ctxt)) + (Logic.list_implies(prems,concl)) + val prems' = filter (exists_prog prog) prems; + val concl' = filter (exists_prog prog) [concl]; + fun subst t = (Thm.cterm_of ctxt (Var(("P",0),HOLogic.boolT)), + Thm.cterm_of ctxt t); + (* swap_tac : tactic * term -> tactic; takes one term containing + "prog" and swap it into the conclusion as disjoint *) + val swap_accumulate = @{lemma "[| P ; ~P | Q |] ==> Q" by auto} + fun swap_tac (tm,tc) = tc THEN + (Term_Tactics.eres_terminst_tac + ctxt + [] + [subst (HOLogic.dest_Trueprop tm)] + swap_accumulate + no) + (* rev_tac swaps term "a" into conclusion *) + fun rev_tac thy a = Term_Tactics.eres_terminst_tac + ctxt + [] + [subst (HOLogic.dest_Trueprop a)] + @{thm rev_notE} + no + in ((case (prems',concl') of + ([],[]) => error ("Subgoal " ^Int.toString(no) + ^" can not be converted into TNF: \n \n" ^ + term_string_of_goal_no()) + (* case that conclusion does not contain "prog", but several + assumptions do: pick one of them, revert first *) + |(a::R,[]) =>(rev_tac thy a + THEN + (List.foldl swap_tac all_tac R)) + |(R,_) => (List.foldl swap_tac all_tac R) + ) + THEN + (simp_tac (put_simpset HOL_ss ctxt) no)) state + end; + + + +fun normalize_TNF ctxt prog top = + List.foldl (fn (arg,t) => t THEN to_TNF ctxt prog arg) + all_tac + ((length (prems_of top)) downto 1) + top + + + +(* TODO: adb *) +fun minimize_TNF ctxt = distinct_subgoals_tac +(* This should do: + - sorting assumptions in all subgoals + - sorting parameters of subgoals + - sortieren der clauses - + weniger asms zuerst (defer_tac) + - eliminieren von assumptions subsumed in other clauses + [| P;R |] ==> A; + [| P;Q;R |] ==> A + ------------------ + [| P;R |] ==> A; + [| P;R |] ==> A + durch thin_tac + - eliminieren von subsumed or's, + a particular weakness of our procedure so far. + [| P;R |] ==> A; + [| ~P;Q |] ==> B + [| R; Q|] ==> A|B; + ------------------ + [| P;R |] ==> A; + [| ~P;Q |] ==> B + - finally distinct_subgoals_tac +*) + + +(* orig version *) +(* +fun gen_test_case_tac thy clasimp depth steps prog = + EVERY[ALLGOALS (asm_full_simp_tac (snd clasimp)), + safe_tac ((global_claset_of thy) addss (global_simpset_of thy)), + normalize_TNF thy prog, + minimize_TNF, + ALLGOALS (fn n => TRY(uniformityI_tac prog n))]; +*) + + + +(* ************************************************************************* *) +(* test case generation ... *) +(* ************************************************************************* *) + +fun SORT ctxt rules n thm = let + val matches = maps (fn rule => rule ctxt n thm) rules + val sorted = map (fn (i,tac) => tac) + (sort (fn (x,y) => rev_order (int_ord (fst x, fst y))) matches) +in + (EVERY sorted) thm +end + + + +fun EXEC ctxt tac str = EVERY[if Config.get ctxt profiling + then Clocks.start_clock_tac str + else all_tac, + if Config.get ctxt trace + then print_tac ctxt ("\n:"^str^"\n") + else all_tac, + tac, + if Config.get ctxt profiling + then Clocks.stop_clock_tac str + else all_tac] + +fun EXEC' ctxt tac str n = EXEC ctxt (tac n) str + +fun PRINT_THM ctxt no = + (fn state => (if Config.get ctxt trace + then let val prem = Logic.nth_prem(no, prop_of state) + val str = (Pretty.string_of o (Syntax.pretty_term ctxt)) prem + in print_tac ctxt ("\n goal "^Int.toString(no)^"::"^str^"\n") end + else all_tac) state) + +fun finalize_tac ctxt prog = + EXEC ctxt (ALLCASES(TRY'(uniformityI_tac ctxt prog))) "main uniformity NF" + +fun gen_test_case_tac ctxt prog = let + val steps = Config.get ctxt TestEnv.steps + val testenv = TestEnv.get_testenv ctxt + val rules = TestEnv.get_test_derivation_rules testenv + + val begin_profiling_tac = if Config.get ctxt profiling + then Clocks.start_clock_tac "unnamed_test_thm" THEN Clocks.start_clock_tac "gen_test_cases" + else all_tac + + val end_profiling_tac = if Config.get ctxt profiling + then Clocks.stop_clock_tac "gen_test_cases" THEN Clocks.stop_clock_tac "unnamed_test_thm" + else all_tac + + fun pre_normalize_TNF_tac ctxt = TestEnv.get_pre_normalize_TNF_tac (TestEnv.get_testenv ctxt) ctxt + + val sort_tac = SELECT_GOAL (SORT ctxt rules 1) +in + EVERY[begin_profiling_tac, + EXEC ctxt (ALLCASES((asm_full_simp_tac ctxt))) "pre-simplification", + EXEC ctxt (REPEAT_DETERM_N steps (ALLCASES sort_tac)) "main completed", + EXEC ctxt (pre_normalize_TNF_tac ctxt) "pre_norm", + EXEC ctxt (normalize_TNF ctxt prog) "TNF", + if Config.get ctxt no_finalize + then all_tac + else finalize_tac ctxt prog, + end_profiling_tac] +end + +fun data_separation_rule ctxt no state = +let + val thy = Proof_Context.theory_of ctxt + val prem = Logic.nth_prem(no, prop_of state) + (*val params = Logic.strip_params(prem) *) + val term = HOLogic.dest_Trueprop(Logic.strip_assums_concl prem) + val frees = Term.add_frees term [] + (*val cands = filter (fn (_,ty) => isDataType thy ty) (params@frees) + : LOOPS AT PRESENT SINCE gen_test_cases active on frees *) + val cands = filter (fn (_,ty) => isDataType thy ty) (frees) + fun tac cand = TRY (CHANGED_PROP (ALLCASES (data_separation_tac ctxt cand)) + THEN (ALLCASES (TRY' (hyp_subst_tac_thin true ctxt)))) + THEN (if Config.get ctxt trace + then print_tac ctxt "After data separation" + else all_tac) +in + map (fn cand => (100, tac cand)) cands +end + +fun to_HCNF_rule ctxt no state = + [(50, EXEC ctxt (TestEnv.get_pre_safe_tac (TestEnv.get_testenv ctxt) ctxt) "Simp" + THEN EXEC ctxt (TRY (safe_tac ctxt)) "HCN")] + +fun minimize_rule ctxt no state = + [(10, EXEC ctxt (TestEnv.get_pre_minimize_TNF_tac (TestEnv.get_testenv ctxt) ctxt) "pre_minimize" + THEN EXEC ctxt (minimize_TNF ctxt) "MinimTNF" + THEN (ALLCASES(asm_full_simp_tac ctxt)) + THEN (TRY (safe_tac ctxt)))] + +(* Included for complete backward compatibility *) +fun gen_test_case_old_tac ctxt prog = + let val testenv = TestEnv.get_testenv ctxt + val depth = Config.get ctxt TestEnv.depth + val steps = Config.get ctxt TestEnv.steps + + val prep_HCNF = TestEnv.get_pre_safe_tac testenv + val to_HCNF = TRY (safe_tac ctxt) + val pre_normalize_TNF_tac = TestEnv.get_pre_normalize_TNF_tac testenv + val pre_minimize_TNF_tac = TestEnv.get_pre_minimize_TNF_tac testenv + fun trace_tac b str = (if Config.get ctxt trace + then print_tac ctxt ( "\n"^b^":"^str^"\n") + else all_tac) + fun exec phase b str = EVERY[if Config.get ctxt profiling + then Clocks.start_clock_tac str + else all_tac, + phase, trace_tac b str, + if Config.get ctxt profiling + then Clocks.stop_clock_tac str + else all_tac] + + fun standard k b = + EVERY[if Config.get ctxt trace then print_tac ctxt "Enter" else all_tac, + exec (prep_HCNF ctxt) b "Simp", + exec (to_HCNF) b "HCN", + exec (pre_normalize_TNF_tac ctxt) b "pre_norm", + exec (normalize_TNF ctxt prog) b "TNF", + exec (pre_minimize_TNF_tac ctxt) b "pre_minimize", + exec (minimize_TNF ctxt) b "MinimTNF"] + val dst = data_separation_old_tac ctxt depth + + fun pairself f (x, y) = (f x, f y); + + (*True if the two theorems have the same prop field, ignoring hyps, der, etc.*) + val eq_thm_prop = op aconv o pairself Thm.full_prop_of; + + fun main k st =(if k < steps (* not yet all frees per goal + less steps exhausted*) + then (ALLCASES(fn n => TRY(dst n) )) THEN + (* try exhaustion per goal *) + (COND(fn st' => not (eq_thm_prop(st, st'))) + (* something 's changed *) + ( (ALLCASES (fn n => TRY((hyp_subst_tac_thin true ctxt) n) ) ) (* REALLY ? *) + THEN (standard k "1") + THEN (main (k+1)) + ) + (standard k "2")) + else standard k "0" (* run out of steps *)) (st) + val begin_profiling_tac = if Config.get ctxt profiling then + Clocks.start_clock_tac "unnamed_test_thm" THEN Clocks.start_clock_tac "gen_test_cases" + else + all_tac + + val end_profiling_tac = if Config.get ctxt profiling then + Clocks.stop_clock_tac "gen_test_cases" THEN Clocks.stop_clock_tac "unnamed_test_thm" + else + all_tac + in EVERY[begin_profiling_tac, + exec (ALLCASES((asm_full_simp_tac ctxt))) "" "pre-simplification", + exec (main 0) "" "main completed", + exec (ALLCASES(TRY'(uniformityI_tac ctxt prog))) "" "main uniformity NF", + end_profiling_tac] + end; + + +(* ************************************************************************* *) +(* *) +(* testdata generation ... *) +(* *) +(* ************************************************************************* *) + + +fun abs_data_tac ctxt atdata = + let fun single_abs_data_tac thm = + Subgoal.FOCUS_PARAMS(fn {context, ...} => + SOLVE (Method.insert_tac context [thm] 1 THEN auto_tac context)) ctxt + in + FIRST' (map single_abs_data_tac atdata) + end + + +fun test_solve_tac ctxt atdata = + let val thy = Proof_Context.theory_of ctxt + val remove_po = EqSubst.eqsubst_tac ctxt [0] [@{thm PO_def}] + val total_iterations = Config.get ctxt TestEnv.iterations + val max_simple_iterations = 50 + val simple_iterations = Int.min(total_iterations, max_simple_iterations) + val further_iterations = Int.max(total_iterations - max_simple_iterations,0) + in + remove_po + THEN' PRINT_THM ctxt + THEN' (FIRST'[EXEC' ctxt (abs_data_tac ctxt atdata) "abs_data", + EXEC' ctxt (RandomBackend.random_inst_tac ctxt simple_iterations) "random_inst", + EXEC' ctxt (QuickCheckBackend.quickcheck_tac ctxt further_iterations) "quickcheck", + EXEC' ctxt (SMTBackend.testgen_smt_tac ctxt) "smt"]) + end + + + +fun is_solved n thm = + let fun is_unsolved_po i = not (null (inter (op =) (BackendUtils.premvars n thm) (BackendUtils.premvars i thm))) + andalso i <> n + in not (exists is_unsolved_po (1 upto (nprems_of thm))) + end + +(* Instantiate the remaining variables in test cases +with random terms *) + +fun finalize_test_tac ctxt n thm = let +in + (COND (is_solved n) (RandomBackend.single_rand_inst_tac ctxt (BackendUtils.premvars n thm)) all_tac) thm +end + + + +fun solve_all ctxt atdata state = + let (* adding a let expression.. in order to check if all free vars were instatiated by smt.. + if true then error code else solve_all*) + val PARALLEL_TRYSOLVE = PARALLEL_TRYSOLVE_POs ctxt (fn ctxt => test_solve_tac ctxt atdata 1) state + + val term = PARALLEL_TRYSOLVE |> Seq.list_of |> + (List.map prop_of) |> HOLogic.mk_list @{typ "term"} + val use_smt = Config.get ctxt TestEnv.use_smt; +in + if (use_smt andalso (Term.exists_subterm (Term.is_Var) term)) + then error("One or more free variables were not instantiated by the solver!") + + else state |$> PARALLEL_TRYSOLVE_POs ctxt (fn ctxt => test_solve_tac ctxt atdata 1) + |$> ALLCASES (finalize_test_tac ctxt) (* cannot parallelize here *) + +end + + + (* *********************************************************************** *) + (* Normalizer *) + (* *********************************************************************** *) + (* motivation: type instantiation may make + constraints of test-cases unsolvable. + Unsolvable cases should be removed before random-solving. + Since constraints with Mvars were not considered + by simptac and arith_tac, they were substituted + against arbitrary free vars aforehand. + +fun norm_tac ctxt n thm = + let val thy = Proof_Context.theory_of ctxt + val m = Thm.nprems_of thm + val prem = Logic.nth_prem(n, prop_of thm) + val k = length (Logic.strip_imp_prems(prem)) + val goal = Thm.trivial(cterm_of thy prem) + val tvars = OldTerm.term_vars prem + val insts = map(fn(x as Var((s,a),t))=> + (cterm_of thy x, + cterm_of thy (Free("XXX"^s^(Int.toString a),t)))) + (tvars) + val cleanup = List.foldr (fn (_,tac) => (atac (n)) THEN tac) + all_tac (1 upto k) + in ( + (rtac (instantiate ([],insts) goal) n) + THEN + (full_simp_tac (simpset_of ctxt) n) (* may erase goal n. + --> test case unsolvable. *) + THEN + (fn thm' => if (nprems_of thm' < m+k) (* goal erased *) + then cleanup thm' + else no_tac thm )(* report failure *) + )(thm) + end *) + +fun gen_test_data name context = + let val ctxt = Context.proof_of context + val te = TestEnv.get_testenv ctxt + val bound = Config.get ctxt TestEnv.type_range_bound + val candTs= #type_candicates(TestEnv.rep_testenv te) + val type_grounds = groundT_thm context bound candTs + val testthm = (the(Symtab.lookup(TestEnv.get_test_thm_tab te)name) + handle Option.Option => error("No test theorem: "^name^" available!")) + val atdata = (the(Symtab.lookup(TestEnv.get_absdata_tab te)name) + handle Option.Option => []) + val jdmts = map (solve_all ctxt atdata) (type_grounds testthm) + val te1 = TestEnv.test_thm_inst_tab_update (Symtab.update(name,jdmts)(TestEnv.get_test_thm_inst_tab te)) te + val prems = List.concat (map Thm.prems_of jdmts) + val data = map (Thm.cterm_of ctxt) (filter is_test_data prems) + val te2 = TestEnv.set_test_data_tab (name,data) te1 + val hyps = map (Thm.cterm_of ctxt) (filter is_thyp prems) + val te3 = TestEnv.set_test_thyps_tab (name,hyps) te2 + val pos = map (Thm.cterm_of ctxt) (filter is_po prems) + val te4 = TestEnv.set_unsolved_PO_tab (name,pos) te3 + + val t = LogThy.get_td_delta () + val _ = writeln (String.concat ["Test theorem (gen_test_data) '",name,"': ", + Int.toString (List.length data)," test cases in ", + Time.toString t, " seconds"]) + val _ = if not (null pos) + then writeln (String.concat ["Warning: There were ", Int.toString (List.length pos) , " unsolved POs."]) + else () + val _ = LogThy.append (String.concat [ + Context.theory_name (Proof_Context.theory_of ctxt), ", " ,name, ", ", + "test data, " ,Int.toString (List.length data), + ", " ,Time.toString t,"\n"]) + + in TestEnv.map_data (K te4) context end; + +fun select_goals pred thy name = let + val te = TestEnv.get_testenv_global thy +in + maps (convert_goals_to_metahyps pred) + (the(Symtab.lookup(TestEnv.get_test_thm_inst_tab te)name) + handle Option.Option => error("No data statement: "^name^" available!")) +end + +val get_test_data = select_goals is_test_data + +val get_test_hyps = select_goals is_thyp + +val get_pos = select_goals is_po + +fun get_test_thm thy name = let + val te = TestEnv.get_testenv_global thy +in + (the(Symtab.lookup(TestEnv.get_test_thm_inst_tab te)name) + handle Option.Option => error("No data statement: "^name^" available!")) +end + +fun discharge_POs name context = + let fun use_test_instances_tac name ctxt = + let val te = TestEnv.get_testenv ctxt + val atdata = (the(Symtab.lookup(TestEnv.get_absdata_tab te)name) + handle Option.Option => []) + in (TRY' ( EqSubst.eqsubst_tac ctxt [0] [@{thm PO_def}] + THEN' abs_data_tac ctxt atdata)) + end + in ALLGOALS(ONLY_POS (use_test_instances_tac name context)) end + +(* *********************************************************************** *) +(* *) +(* Initial test environment *) +(* *) +(* *********************************************************************** *) + +val _ = Context.>> (TestEnv.map_data + ((TestEnv.pre_safe_tac_update (fn ctxt => ALLCASES (asm_full_simp_tac ctxt))) + #> (TestEnv.add_test_derivation_rule data_separation_rule) + #> (TestEnv.add_test_derivation_rule to_HCNF_rule) + #> (TestEnv.add_test_derivation_rule minimize_rule))); + +val mt_testenv = TestEnv.get_testenv (Context.the_local_context ()); + +val _ = + Outer_Syntax.command @{command_keyword "print_testenv"} "print test environment" + (Parse.name >> (fn name => Toplevel.keep ((TestEnv.print_testenv name) o Toplevel.context_of))); + +val options = Scan.optional (Parse.$$$ "(" |-- Parse.!!! (Scan.option Parse.nat--| Parse.$$$ ")")) (NONE); + +val _ = let fun print name opts ctxt = writeln (TestEnv.print_test_data ctxt name opts) in + Outer_Syntax.command @{command_keyword "print_conc_tests"} "print concrete tests" + (options -- Parse.name >> (fn (opts, name) => + Toplevel.keep ((print name opts) o Toplevel.context_of))) + end; + +val _ = let fun print name opts ctxt = writeln (TestEnv.print_test_case ctxt name opts) in + Outer_Syntax.command @{command_keyword "print_abs_tests"} "print abstract tests" + (options -- Parse.name >> (fn (opts, name) => + Toplevel.keep ((print name opts) o Toplevel.context_of))) + end; + +val _ = let fun print name opts ctxt = writeln (TestEnv.print_test_hyps ctxt name opts) in + Outer_Syntax.command @{command_keyword "print_thyps"} "print test hypothesis" + (options -- Parse.name >> (fn (opts, name) => + Toplevel.keep ((print name opts) o Toplevel.context_of))) + end; + +val _ = let fun print name opts ctxt = writeln (TestEnv.print_unsolved_pos ctxt name opts) in + Outer_Syntax.command @{command_keyword "print_upos"} "print unsolved proof obligations" + (options -- Parse.name >> (fn (opts, name) => + Toplevel.keep ((print name opts) o Toplevel.context_of))) + end; + + +(* ************************************************************************* *) +(* micellaneous functions ... *) +(* ************************************************************************* *) + + +fun animate thy name = (* HACK - very simplistic implementation *) + let val ctxt = Proof_Context.init_global thy |> Config.put simp_trace true + fun animate_data_statement thm = + (Output.writeln ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"; + Pretty.writeln (Thm.pretty_thm ctxt thm); + Output.writeln "============================================"; + (* asm_full_simplify (global_simpset_of(theory_of_thm(thm))) thm; *) + asm_full_simplify ctxt thm; + Output.writeln "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"); + in + List.app animate_data_statement (get_test_data thy name) + end; + +(* module setup *) + +val setup = + trace_setup #> + no_uniformity_setup #> + profiling_setup #> + no_finalize_setup; + +end (*struct *); + +*} + +setup {* TestEnv.setup #> TestGen.setup *} +text{* A key concept of HOL/TestGen are \emph{test suites}. These can be seen as a kind of +container data-structure that relate to a test-specification a number of data, namely the +test theorem (the TNF), abstract and concrete test data, and specific configuration data. +Test-suites were created at the end of a test-specification statement and initial test-refinement. +Later on they were enriched and finally used to generate test drivers and test executions. + +So, a test suite with name "X" will be finally run at a test-execution executed outside HOL/TestGen +started by the shell-command "run_X". +*} + +ML{* + +fun mk_test_suite name = Toplevel.end_proof (fn _ => fn state => + let + val ctxt = Proof.context_of state; + val _ = if Config.get ctxt TestGen.profiling + then (Clocks.start_clock "unnamed_test_thm"; + Clocks.rename_clock "unnamed_test_thm" name; + Clocks.stop_clock name) + else () + + val _ = Proof.assert_backward state; + val {context = goal_ctxt, goal, ...} = Proof.goal state + fun up_thy thy = + let + fun add_to_dm name thm thy = + let + val thy = TestEnv.map_data_global (TestEnv.add_test_case (name,thm)) thy + val t = LogThy.get_tc_delta () + val num_of_tc = List.length + (List.filter (fn tc => TestGen.is_neither_thyp_nor_PO tc) (Thm.prems_of thm)) + val _ = writeln (String.concat ["Test theorem (gen_test_cases) '", + name,"': ", Int.toString num_of_tc, + " test cases in ",Time.toString t, + " seconds"]) + + val _ = LogThy.append (String.concat + [Context.theory_name thy, ", ", + name, ", " ,"test case, " ,Int.toString num_of_tc, + ", ", Time.toString t,"\n"]) + in + thy + end + in + thy |> Sign.add_path (space_implode "_" [name]) + |> (Global_Theory.add_thms [((@{binding test_thm}, goal), [])]) + |> snd + |> Sign.parent_path + |> add_to_dm (space_implode "_" [name]) goal + |> Clocks.flush_clocks + end + + in goal_ctxt |> Local_Theory.background_theory up_thy end); + + +val _ = Outer_Syntax.command + @{command_keyword mk_test_suite} + "store test state (theorem)" + (Parse.name >> mk_test_suite); + + + +(**********************) +fun gen_test_dataT name thy = + let val _ = LogThy.start_td_timer () + val _ = if Config.get_global thy TestGen.profiling + then (Clocks.init_clocks thy;Clocks.start_clock name;Clocks.start_clock "gen_test_data") + else () + + val thy = Context.theory_map (TestGen.gen_test_data name) thy (* call of the core-routine*) + + val t = LogThy.get_td_delta () + val thy = if Config.get_global thy TestGen.profiling + then (Clocks.stop_clock "gen_test_data"; Clocks.stop_clock name; Clocks.flush_clocks thy) + else thy + + val thy = Sign.add_path (space_implode "_" [name]) thy; + val thm = TestGen.get_test_thm thy name + val thy = snd(Global_Theory.add_thmss [((@{binding test_thm_inst},thm),[])] (thy)) + + val thy = Sign.parent_path thy; + in + thy + end + +val _ = Outer_Syntax.command + (*@{command_spec "gen_test_data"}*) + @{command_keyword gen_test_data} + "generate test data" + (Parse.name >> (Toplevel.theory o gen_test_dataT)); + +(**********************) + +val general_statement = + Parse_Spec.statement >> (fn x => ([], Element.Shows x)) || + Parse_Spec.long_statement; + +val _ = Outer_Syntax.local_theory_to_proof + @{command_keyword test_spec} + "define test specification" + (Scan.optional (Parse_Spec.opt_thm_name ":" --| + Scan.ahead (Parse_Spec.includes >> K "" || Parse_Spec.long_statement_keyword)) + Binding.empty_atts -- + Scan.optional Parse_Spec.includes [] -- + general_statement >> + (fn ((a, includes), (elems, concl)) => fn lthy => + let val _ = if Config.get lthy TestGen.profiling + then Clocks.init_clocks (Proof_Context.theory_of lthy) + else () + in + Specification.schematic_theorem_cmd true + "test_spec" NONE (K I) a includes elems concl false lthy + end)); + +*} +ML{* +fun set_wrapper str H src = + let val txt = Input.text_of src + val range = Input.range_of src + val txt' = str ^ "(" ^ txt ^ ")" + in H (Input.source true txt' range) end; + +(* BU : TO BE REVISED BY BU TO 2016. WAS SORT OF A HACK ANYWAY. + Chantal: did revise it; please double check*) +val _ = Outer_Syntax.command + (*@{command_spec "set_pre_safe_tac"}*) + @{command_keyword set_pre_safe_tac} + "configure gen_test_gen: set pre_safe_tac" + (Parse.ML_source >> (Toplevel.theory o + (set_wrapper + "(TestEnv.map_data_global o TestEnv.pre_safe_tac_update)" + Isar_Cmd.setup))); + + +val _ = Outer_Syntax.command + (*@{command_spec "set_pre_normalize_TNF_tac"}*) + @{command_keyword set_pre_normalize_TNF_tac} + "configure gen_test_gen: pre_normalize_TNF_tac" + (Parse.ML_source >> (Toplevel.theory o + (set_wrapper + "(TestEnv.map_data_global o TestEnv.pre_normalize_TNF_tac_update)" + Isar_Cmd.setup))) + +val _ = Outer_Syntax.command + (*@{command_spec "set_pre_minimize_TNF_tac"}*) + @{command_keyword set_pre_minimize_TNF_tac} + "configure gen_test_gen: set pre_minimize_TNF_tac" + (Parse.ML_source >> (Toplevel.theory o + (set_wrapper + "(TestEnv.map_data_global o TestEnv.pre_minimize_TNF_tac_update)" + Isar_Cmd.setup))) + + +fun pretty_cterm_style_generic f ctxt (style, (name,pos:Position.T)) = + let val termS = case (Symtab.lookup (f (TestEnv.get_testenv ctxt)) name) of + SOME k => List.map (style o Thm.term_of) k (* (nth(k,0)) *) + | NONE => error "No such test suite" + in Pretty.big_list "\\\\" (List.map (Thy_Output.pretty_term ctxt) ( termS)) end + +val pretty_thyp_style = pretty_cterm_style_generic TestEnv.get_test_thyps_tab +val pretty_conctest_style = pretty_cterm_style_generic TestEnv.get_test_data_tab +val pretty_uPO_style = pretty_cterm_style_generic TestEnv.get_unsolved_PO_tab + +(* code copy from Thy_Output (2016) since this is not exported there ... *) + +fun basic_entities name scan pretty = + Thy_Output.antiquotation name scan (fn {source, context = ctxt, ...} => + Thy_Output.output ctxt o Thy_Output.maybe_pretty_source pretty ctxt source); + +fun basic_entity name scan = basic_entities name (scan >> single); + +(* end copy *) + +val _ = Theory.setup +( (basic_entity @{binding thyps} (Term_Style.parse -- (Scan.lift (Parse.position Args.name))) + pretty_thyp_style) #> + (basic_entity @{binding uPOs} (Term_Style.parse -- (Scan.lift (Parse.position Args.name))) + pretty_uPO_style) #> + (basic_entity @{binding conctests} (Term_Style.parse -- (Scan.lift (Parse.position Args.name))) + pretty_conctest_style)) + ; +*} + + +attribute_setup test_inst = + {* Scan.lift Args.name >> (fn name => + Thm.declaration_attribute (fn thm => + TestEnv.map_data (TestEnv.add_abstest_data (name, thm)))) *} + "declare theorems for test case generation" + +attribute_setup testgen_type_candidates = + {* Scan.repeat1 Args.typ >> + (fn Ts => + Thm.declaration_attribute (fn _ => + TestEnv.map_data (TestEnv.type_candidates_update Ts))) *} + "declare testgen type candidates" + + +method_setup discharge_POs = + {* Scan.lift (Scan.option(Parse.string) >> + (fn params => fn ctxt => + let val _ = LogThy.start (); + val name = (case params of + NONE => "" + | SOME (s) => s); + in Method.SIMPLE_METHOD (CHANGED_PROP + (TestGen.discharge_POs name ctxt)) + end)) *} + "eliminate POs by test instances" + +method_setup gen_test_cases = + {* Scan.lift (Scan.option (Parse.nat -- Parse.nat) + -- Scan.repeat1 (Scan.unless (Scan.first clasimp_modifiers) Args.name)) + --| Method.sections clasimp_modifiers >> + (fn (params, prog) => fn ctxt => + let val _ = LogThy.start (); + val ctxt' = (case params of + NONE => ctxt + | SOME (depth, steps) => ctxt + |> Config.put TestEnv.depth depth + |> Config.put TestEnv.steps steps); + in Method.SIMPLE_METHOD (CHANGED_PROP + (TestGen.gen_test_case_tac ctxt' prog)) + end) *} + "generate symbolic test cases" + +method_setup gen_test_cases_old = + {* Scan.lift (Scan.option (Parse.nat -- Parse.nat) -- + Scan.repeat1 (Scan.unless (Scan.first clasimp_modifiers) Args.name)) --| + Method.sections clasimp_modifiers >> + (fn (params, prog) => fn ctxt => + let val _ = LogThy.start (); + val ctxt' = (case params of + NONE => ctxt + | SOME (depth, steps) => ctxt + |> Config.put TestEnv.depth depth + |> Config.put TestEnv.steps steps); + in Method.SIMPLE_METHOD (CHANGED_PROP + (TestGen.gen_test_case_old_tac ctxt' prog)) + end) + *} + "old variant of gen_test_cases" + +ML{* fn ctxt => HEADGOAL o (K(TestGen.thyp_ify ctxt ))*} +ML{* fn ctxt => METHOD (HEADGOAL o (K(TestGen.thyp_ify ctxt )))*} + + + +ML{* +val _ = + Theory.setup + (Method.setup @{binding thyp_ify} + (Attrib.thms >> (fn _ => fn ctxt => METHOD (HEADGOAL o (K(TestGen.thyp_ify ctxt ))))) + "making testhyps explicit" #> + Method.setup @{binding mp_ify} + (Attrib.thms >> (fn _ => fn ctxt => METHOD (HEADGOAL o (K(TestGen.mp_fy ctxt )) ))) + "fast destruction matching" #> + Method.setup @{binding all_ify} + (Attrib.thms >> (fn _ => fn ctxt => METHOD (HEADGOAL o (K(TestGen.all_ify ctxt [])) ))) + "replace all free variables by quantifiers") +*} + +end diff --git a/src/test-gen/src/main/TestLib.thy b/src/test-gen/src/main/TestLib.thy new file mode 100644 index 0000000..b3ccc0a --- /dev/null +++ b/src/test-gen/src/main/TestLib.thy @@ -0,0 +1,57 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * Testing.thy --- wiring everything together. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007 ETH Zurich, Switzerland + * 2008-2012 Achim D. Brucker, Germany + * 2009-2012 Université Paris-Sud, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Summing up the Library *} + +theory + TestLib +imports + Monads + Automata + Interleaving + Observers + EFSM_Toolkit + TestRefinements +begin + +end diff --git a/src/test-gen/src/main/TestRefinements.thy b/src/test-gen/src/main/TestRefinements.thy new file mode 100644 index 0000000..4fa18cf --- /dev/null +++ b/src/test-gen/src/main/TestRefinements.thy @@ -0,0 +1,248 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * TestRefinements.thy --- for sequence testing. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007, ETH Zurich, Switzerland + * 2009 B. Wolff, Univ. Paris-Sud, France + * 2009 Achim D. Brucker, Germany + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Test-Refinements (IOCO and Friends) *} + +theory TestRefinements imports Monads Automata +begin + + + +text{* converts infinite trace sets to prefix-closed sets of finite traces, +reconciling the most common different concepts of traces ... *} + +consts cnv :: "(nat \ '\) \ '\ list" +(* NAIV !!! *) + +(* I WOULD PREFER A SYMMETRIC NOTION OF REFINEMENT ... However, this makes the use +of mbind less clear and evident. Sufficient conditions when these two things +are equivalent need to be explored ... *) + + +definition input_refine :: + "[('\,'o,'\) det_io_atm,('\,'o,'\) ndet_io_atm] \ bool" +where + input_refine_def: + "input_refine I SP = + (({det_io_atm.init I} = ndet_io_atm.init SP) \ + (\ t \ cnv ` (in_trace `(etraces SP)). + (det_io_atm.init I) + \ (os \ (mbind t (det_io_atm.step I)) ; + return(length t = length os))))" +notation input_refine ("(_/ \\<^sub>I _)" [51, 51] 50) + +text{* This testing notion essentially says: whenever we can run an +input sequence succesfully on the PUT (the program does not throw an exception), +it is ok. *} + +definition input_output_refine :: + "[('\,'o,'\) det_io_atm,('\,'o,'\) ndet_io_atm] \ bool" +where + input_output_refine_def: + "input_output_refine i s \ + ({det_io_atm.init i} = ndet_io_atm.init s) \ + (\ t \ prefixes (etraces s). + (det_io_atm.init i) + \ (os \ (mbind (map fst t) (det_io_atm.step i)); + return((map snd t) = os)))" + +notation input_output_refine ("(_/ \\<^sub>IO _)" [51, 51] 50) +text{* Our no-frills-approach to I/O conformance testing: +no quiescense, and strict alternation between input and output. *} + +text{* PROBLEM : Tretmanns / Belinfantes notion of ioco is inherently +different from Gaudels, which is refusal based. +See definition in accompanying IOCO.thy file: +\begin{verbatim} + +definition ioco :: "[('\,'o option,'\)IO_TS,('\,'o option,'\)IO_TS] \ bool" + (infixl "ioco" 200) + "i ioco s \ (\ t \ Straces(s). + out i (i after t) \ out s (s after t))" + +\end{verbatim} + +*} + + +definition after :: "[('\, 'o, '\) ndet_io_atm, ('\ \ 'o) list] \ '\ set" + (infixl "after" 100) +where "atm after l = {\' . \ t \ rtraces atm. (\' = fst(t (length l)) \ + (\ n \ {0 .. (length l) - 1}. l!n = fst(snd(t n))))}" + +definition out :: "[('\, 'o, '\) ndet_io_atm,'\ set, '\] \ 'o set" +where "out atm ss \ = {a. \\ \ ss. \\'. (a,\') \ ndet_io_atm.step atm \ \}" + +(* OLD: +definition ioco :: "[('\, 'o, '\) det_io_atm,('\, 'o, '\) ndet_io_atm] \ bool" + (infixl "ioco" 200) +where "i ioco s = (({det_io_atm.init i} = ndet_io_atm.init s) \ + (\ t \ cnv ` (etraces s). + \ \ \. + ((det_io_atm.init i) + \ (os \ (mbind (map fst t @ [\]) (det_io_atm.step i)) ; + return(os = map snd t @ [\]))) + \ + \ \ out s (s after t)))" *) + +definition ready :: "[('\, 'o, '\) ndet_io_atm,'\ set] \ '\ set" +where "ready atm ss = {\. \\ \ ss. ndet_io_atm.step atm \ \ \ {}}" + +definition ioco :: "[('\,'o,'\)ndet_io_atm, ('\,'o,'\)ndet_io_atm] \ bool" + (infixl "ioco" 200) +where "i ioco s = (\ t \ prefixes(etraces s). + \ \ \ ready s (s after t). out i (i after t) \ \ out s (s after t) \)" + +definition oico :: "[('\,'o,'\)ndet_io_atm,('\,'o,'\)ndet_io_atm] \ bool" + (infixl "oico" 200) +where "i oico s = (\ t \ prefixes(etraces s). + ready i (i after t) \ ready s (s after t))" + +definition ioco2 :: "[('\,'o,'\)ndet_io_atm, ('\,'o,'\)ndet_io_atm] \ bool" + (infixl "ioco2" 200) +where "i ioco2 s = (\ t \ eprefixes (ndet_io_atm.step s) (rtraces s). + \ \ \ ready s (s after t). + out i (i after t) \ \ out s (s after t) \)" + +definition ico :: "[('\, 'o, '\) det_io_atm,('\, 'o, '\) ndet_io_atm] \ bool" + (infixl "ico" 200) +where "i ico s = (\ t \ prefixes(etraces s). + let i' = det2ndet i in ready i' (i' after t) \ ready s (s after t))" + +lemma full_det_refine: "s = det2ndet s' \ + (det2ndet i) ioco s \ (det2ndet i) oico s \ input_output_refine i s" +apply(safe) +oops + +definition ico2 :: "[('\,'o,'\)ndet_io_atm,('\,'o,'\)ndet_io_atm] \ bool" + (infixl "ico2" 200) +where "i ico2 s \ \ t \ eprefixes (ndet_io_atm.step s) (rtraces s). + ready i (i after t) \ ready s (s after t)" + + +text{* There is lots of potential for optimization. +\begin{itemize} +\item only maximal prefixes +\item draw the $\omega$ tests inside the return +\item compute the $\omega$ by the $\ioprog$, not quantify over it. +\end{itemize} +*} + +section{* Generic Monadic Test Refinement Notions *} + +text{* Generic Notion *} + +definition monadic_test_refinement :: + "('\ \ ('o, '\)MON\<^sub>S\<^sub>E) \ + '\ set \ (* state invariant *) + '\ list set \ (* coverage or test-purpose *) + ('\ list \ 'o list \ '\ \ bool) \ (* post-cond *) + ('\ \ ('o, '\)MON\<^sub>S\<^sub>E) \ bool" + ("_ \\<^sub>\_,_,_\<^sub>\ _" [100,100,100,100,100]101) +where + + + "(MODEL \\<^sub>\Init,CovCrit,conf\<^sub>\ SUT) = + (\ \\<^sub>0 \ Init. \ \s \ CovCrit. \ res. + (\\<^sub>0 \ (os \ mbind \s MODEL; return (conf \s os res))) + \ + (\\<^sub>0 \ (os \ mbind \s SUT; return (conf \s os res))))" + +subsection{* Inclusion Test Refinements *} + +definition incl_ref :: "('\ \ ('o, '\)MON\<^sub>S\<^sub>E) \ + '\ set \ (* state invariant *) + '\ list set \ (* coverage or test-purpose *) + ('\ \ ('o, '\)MON\<^sub>S\<^sub>E) \ bool" + ("_ \\<^sub>I\<^sub>\_,_\<^sub>\ _" [100,100,100,100]101) +where "(S \\<^sub>I\<^sub>\Init,CC\<^sub>\ I) = (S \\<^sub>\Init,CC,(\ is os x. length is = length os \ os=x)\<^sub>\ I)" + + +lemma inclusion_test_I : + "(\\\<^sub>0 \s res. + \\<^sub>0 \ Init \ + \s \ CC \ + \\<^sub>0 \ ( os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s S; unit\<^sub>S\<^sub>E (length \s = length os \ os = res)) + \ + \\<^sub>0 \ ( os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s I; unit\<^sub>S\<^sub>E (length \s = length os \ os = res))) + \ (S \\<^sub>I\<^sub>\Init,CC\<^sub>\ I)" +unfolding incl_ref_def monadic_test_refinement_def +by auto + +lemma inclusion_test_I_opt : + "(\\\<^sub>0 \s res. + \\<^sub>0 \ Init \ + \s \ CC \ + \\<^sub>0 \ ( os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s S; unit\<^sub>S\<^sub>E (os = res)) + \ + \\<^sub>0 \ ( os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s I; unit\<^sub>S\<^sub>E (os = res))) + \ (S \\<^sub>I\<^sub>\Init,CC\<^sub>\ I)" +apply(rule inclusion_test_I) +apply(subst Monads.mbindFSave_vs_mbindFStop) +apply(subst (asm) Monads.mbindFSave_vs_mbindFStop) +by(auto) + + + +subsection{* Inclusion Test Refinements with Abort *} + +text{* The abort scenario allows operations to fail, so violations + to input-enabledness are permitted. The model assumes that a particular + abort-semantics is respected: the operation is assumed to fail explicitly, + \ie{} observably by the tester, and to leave the state unchanged. *} + + +definition incl_aborts_ref :: "('\ \ ('o, '\)MON\<^sub>S\<^sub>E) \ + '\ set \ (* state invariant *) + '\ list set \ (* coverage or test-purpose *) + ('\ \ ('o, '\)MON\<^sub>S\<^sub>E) \ bool" + ("_ \\<^sub>I\<^sub>A\<^sub>\_,_\<^sub>\ _" [100,100,100,100]101) +where "(S \\<^sub>I\<^sub>A\<^sub>\Inv,IS\<^sub>\ I) = (S \\<^sub>\Inv,IS,(\ is os x. os=x)\<^sub>\ I)" + + +(* TODO : Deadlock Refinement *) + +(* TODO : Monadic IOCO *) + + +end diff --git a/src/test-gen/src/main/TestScript.thy b/src/test-gen/src/main/TestScript.thy new file mode 100644 index 0000000..6cf2e42 --- /dev/null +++ b/src/test-gen/src/main/TestScript.thy @@ -0,0 +1,164 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * TestScript.thy --- Test Script and Test Data exporter + * This file is part of HOL-TestGen. + * + * Copyright (c) 2011-2015 Achim D. Brucker, Germany + * 2016-2017 The University of Sheffield, UK + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: TestScript.thy 13147 2017-08-18 19:42:12Z brucker $ *) + + +theory TestScript +imports TestGen + "codegen_fsharp/code_fsharp" +keywords "generate_test_script" :: thy_decl + and "export_test_data" :: diag +begin + +code_printing constant "RSF" \ + (SML) "TestHarness.rsf" + and (Fsharp) "TestHarness.rsf" + and (Scala) "TestHarness.rsf" + +datatype Lazy = "lazy" bool + +code_printing constant "lazy" \ + (SML) "fn () => (_)" + and (Fsharp) "lazy ((_))" + and (Scala) "() => ((_))" + +code_printing type_constructor Lazy \ + (SML) "(unit -> bool)" + and (Fsharp) "Lazy" + and (Scala) "() => Boolean" + +code_printing code_module "Header" \ (SML) +{* + (********************************************************) + (* This file is generated automatically. Do not edit *) + (********************************************************) +*} + +ML{* +open List; + fun gen_ts_def dataname thy = let + + val tsname = "test_script" + val fqtsname = (Context.theory_name thy)^"."^dataname^"."^tsname + val tsbnd = Binding.name tsname + val tstype = HOLogic.listT (HOLogic.mk_tupleT[HOLogic.listT @{typ "Lazy"},@{typ "Lazy"}]) + + val (td,_) = (TestGen.get_test_data thy dataname,thy); + + fun def const bnd term thy = snd (Global_Theory.add_defs false [((bnd, + (Logic.mk_equals (const, term))),[Code.add_default_eqn_attribute Code.Equation])] thy) + + val prop_terms = List.map (fn p => let val _ $ ret = Thm.concl_of p in ret end) td; + val prems_terms = map (fn data => (map (fn p => let val _ $ ret = p in ret end) + (Thm.prems_of data))) td; + val spec = map2 (fn x => fn y => (x,y)) prems_terms prop_terms + + fun mk_abs t = Const("TestScript.Lazy.lazy",@{typ "bool"} --> @{typ "Lazy"})$t + + val ts_term = HOLogic.mk_list (HOLogic.mk_tupleT[HOLogic.listT @{typ "Lazy"},@{typ "Lazy"}]) + (map (fn (a,b) => HOLogic.mk_tuple [HOLogic.mk_list @{typ "Lazy"} + (map mk_abs a), mk_abs b]) spec) + in + thy |> (Sign.add_path (space_implode "_" [dataname])) + |> Sign.add_consts [(tsbnd, tstype, NoSyn)] + |> def (Const(fqtsname,tstype)) (tsbnd) ts_term + |> (Sign.parent_path) + end + +*} + + +ML {* (Outer_Syntax.command (("generate_test_script"), Position.none) + "generate code equation representing test script" + (Parse.name >> (fn name => Toplevel.theory (gen_ts_def name)))) +*} + +ML{* +fun export_test_data ctxt filename dataname = + let + val filename = Path.explode filename + val thy = Proof_Context.theory_of ctxt + val master_dir = Resources.master_directory thy + val td = TestGen.get_test_data thy dataname + val today = (Date.toString(Date.fromTimeUniv (Time.now())))^" (UTC)"; + val abs_filename = if (Path.is_absolute filename) then filename else Path.append master_dir filename + val thyname = Context.theory_name thy + (* fun string_of_term ctxt = Pretty.unformatted_string_of o (Syntax.unparse_term ctxt) *) + val string_of_term = Sledgehammer_Util.hackish_string_of_term + fun generate () = + let + val num = Int.toString(length td) + fun string_of_prems ctxt [] = "" + | string_of_prems ctxt (p::ps) = ("\n" + ^" "^(string_of_term ctxt p) ^ "\n" + ^" \n")^(string_of_prems ctxt ps) + val thmsstrings = String.concat (map (fn t => "\n" + ^(string_of_prems ctxt (Thm.prems_of t)) + ^" \n" + ^" "^string_of_term ctxt (Thm.prop_of t) ^ "\n" + ^" \n" + ^"\n" + ) td) + in + "\n\n" + ^thmsstrings + end + + val test_data_str = Print_Mode.setmp [] generate (); + val _ = File.write (abs_filename) test_data_str + handle (IO.Io{name=name,...}) => warning ("Could not write \""^(name)^"\".") + in () end +*} + +ML {* + Outer_Syntax.command (("export_test_data"), Position.none) + "export test data to an external file" + (Parse.name -- Parse.name + >> (fn (filename,name) => + (Toplevel.keep (fn state => export_test_data (Toplevel.context_of state) filename name)))); + + *} + +end diff --git a/src/test-gen/src/main/TestSequence.thy b/src/test-gen/src/main/TestSequence.thy new file mode 100644 index 0000000..861e8df --- /dev/null +++ b/src/test-gen/src/main/TestSequence.thy @@ -0,0 +1,1001 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * Monads.thy --- a base testing theory for sequential computations. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007, ETH Zurich, Switzerland + * 2009 B. Wolff, Univ. Paris-Sud, France + * 2009, 2012 Achim D. Brucker, Germany + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Basic Monad Theory for Sequential Computations *} + +theory Monads imports Main +begin + + +section{*General Framework for Monad-based Sequence-Test *} +text{* As such, Higher-order Logic as a purely functional specification + formalism has no built-in mechanism for state and state-transitions. + Forms of testing involving state require therefore explicit mechanisms + for their treatment inside the logic; a well-known technique to model + states inside purely functional languages are \emph{monads} made popular + by Wadler and Moggi and extensively used in Haskell. \HOL is powerful + enough to represent the most important standard monads; + however, it is not possible to represent monads as such due to well-known + limitations of the Hindley-Milner type-system. *} + +text{* Here is a variant for state-exception monads, that models precisely + transition functions with preconditions. Next, we declare the + state-backtrack-monad. In all of them, our concept of i/o stepping + functions can be formulated; these are functions mapping input + to a given monad. Later on, we will build the usual concepts of: + \begin{enumerate} + \item deterministic i/o automata, + \item non-deterministic i/o automata, and + \item labelled transition systems (LTS) + \end{enumerate} +*} + +subsection{* Standard State Exception Monads *} +type_synonym ('o, '\) MON\<^sub>S\<^sub>E = "'\ \ ('o \ '\)" (* = '\ \ ('o \ '\)option *) + + +definition bind_SE :: "('o,'\)MON\<^sub>S\<^sub>E \ ('o \ ('o','\)MON\<^sub>S\<^sub>E) \ ('o','\)MON\<^sub>S\<^sub>E" +where "bind_SE f g = (\\. case f \ of None \ None + | Some (out, \') \ g out \')" + +notation bind_SE ("bind\<^sub>S\<^sub>E") + +syntax (xsymbols) + "_bind_SE" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>E,('o','\)MON\<^sub>S\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>E" + ("(2 _ \ _; _)" [5,8,8]8) +translations + "x \ f; g" == "CONST bind_SE f (% x . g)" + + +definition unit_SE :: "'o \ ('o, '\)MON\<^sub>S\<^sub>E" ("(return _)" 8) +where "unit_SE e = (\\. Some(e,\))" +notation unit_SE ("unit\<^sub>S\<^sub>E") + +definition fail_SE :: "('o, '\)MON\<^sub>S\<^sub>E" +where "fail_SE = (\\. None)" +notation fail_SE ("fail\<^sub>S\<^sub>E") + +definition assert_SE :: "('\ \ bool) \ (bool, '\)MON\<^sub>S\<^sub>E" +where "assert_SE P = (\\. if P \ then Some(True,\) else None)" +notation assert_SE ("assert\<^sub>S\<^sub>E") + +definition assume_SE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>E" +where "assume_SE P = (\\. if \\ . P \ then Some((), SOME \ . P \) else None)" +notation assume_SE ("assume\<^sub>S\<^sub>E") + +text{* The standard monad theorems about unit and associativity: *} + +lemma bind_left_unit [simp]: "(x \ return c; P x) = P c" + by (simp add: unit_SE_def bind_SE_def) + + +lemma bind_left_fail_SE[simp] : "(x \ fail\<^sub>S\<^sub>E; P x) = fail\<^sub>S\<^sub>E" + by (simp add: fail_SE_def bind_SE_def) + + +lemma bind_right_unit[simp]: "(x \ m; return x) = m" + apply (simp add: unit_SE_def bind_SE_def) + apply (rule ext) + apply (case_tac "m \", simp_all) + done + +lemma bind_assoc[simp]: "(y \ (x \ m; k x); h y) = (x \ m; (y \ k x; h y))" + apply (simp add: unit_SE_def bind_SE_def, rule ext) + apply (case_tac "m \", simp_all) + apply (case_tac "a", simp_all) + done + + +text{* The bind-operator in the state-exception monad yields already + a semantics for the concept of an input sequence on the meta-level: *} +lemma syntax_test: "(o1 \ f1 ; o2 \ f2; return (post o1 o2)) = X" +oops + + + +subsection {* "Pipe-free" - variant of the bind. *} + +definition seq_SE :: "[('\, '\)MON\<^sub>S\<^sub>E, ('\, '\)MON\<^sub>S\<^sub>E] \ ('\, '\)MON\<^sub>S\<^sub>E" (infixl ";-" 65) +where "f ;- g = (_ \ f ; g)" + + + +subsection {* Monadic If *} + +definition if_SE :: "['\ \ bool, ('\, '\)MON\<^sub>S\<^sub>E, ('\, '\)MON\<^sub>S\<^sub>E] \ ('\, '\)MON\<^sub>S\<^sub>E" +where "if_SE c E F = (\\. if c \ then E \ else F \)" + +syntax (xsymbols) + "_if_SE" :: "['\ \ bool,('o,'\)MON\<^sub>S\<^sub>E,('o','\)MON\<^sub>S\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>E" + ("(if\<^sub>S\<^sub>E _ then _ else _fi)" [5,8,8]8) +translations + "(if\<^sub>S\<^sub>E cond then T1 else T2 fi)" == "CONST if_SE cond T1 T2" + + + +subsubsection{* Theory of a Monadic While *} + +text{* First Step : Establishing an embedding between partial functions and relations *} +(* plongement *) +definition Mon2Rel :: "(unit, '\)MON\<^sub>S\<^sub>E \ ('\ \ '\) set" +where "Mon2Rel f = {(x, y). (f x = Some((), y))}" +(* ressortir *) +definition Rel2Mon :: " ('\ \ '\) set \ (unit, '\)MON\<^sub>S\<^sub>E " +where "Rel2Mon S = (\ \. if \\'. (\, \') \ S then Some((), SOME \'. (\, \') \ S) else None)" + +lemma Mon2Rel_Rel2Mon_id: assumes det:"single_valued R" shows "(Mon2Rel \ Rel2Mon) R = R" +apply (simp add: comp_def Mon2Rel_def Rel2Mon_def,auto) +apply (case_tac "\\'. (a, \') \ R", auto) +apply (subst some_eq_ex) back +apply (insert det[simplified single_valued_def]) +apply (auto) +done + +lemma Rel2Mon_Id: "(Rel2Mon \ Mon2Rel) x = x" +apply (rule ext) +apply (auto simp: comp_def Mon2Rel_def Rel2Mon_def) +apply (erule contrapos_pp, drule HOL.not_sym, simp) +done + +lemma single_valued_Mon2Rel: "single_valued (Mon2Rel B)" +by (auto simp: single_valued_def Mon2Rel_def) + +text{* Second Step : Proving an induction principle allowing to establish that lfp remains + deterministic *} + + +(* Due to Tobias Nipkow *) +definition chain :: "(nat => 'a set) => bool" +where "chain S = (\i. S i \ S(Suc i))" + +lemma chain_total: "chain S ==> S i \ S j \ S j \ S i" +by (metis chain_def le_cases lift_Suc_mono_le) + +definition cont :: "('a set => 'b set) => bool" +where "cont f = (\S. chain S \ f(UN n. S n) = (UN n. f(S n)))" + +lemma mono_if_cont: fixes f :: "'a set => 'b set" + assumes "cont f" shows "mono f" +proof + fix a b :: "'a set" assume "a \ b" + let ?S = "\n::nat. if n=0 then a else b" + have "chain ?S" using `a \ b` by(auto simp: chain_def) + hence "f(UN n. ?S n) = (UN n. f(?S n))" + using assms by(simp add: cont_def) + moreover have "(UN n. ?S n) = b" using `a \ b` by (auto split: if_splits) + moreover have "(UN n. f(?S n)) = f a \ f b" by (auto split: if_splits) + ultimately show "f a \ f b" by (metis Un_upper1) +qed + +lemma chain_iterates: fixes f :: "'a set => 'a set" + assumes "mono f" shows "chain(\n. (f^^n) {})" +proof- + { fix n have "(f ^^ n) {} \ (f ^^ Suc n) {}" using assms + by(induction n) (auto simp: mono_def) } + thus ?thesis by(auto simp: chain_def) +qed + +theorem lfp_if_cont: + assumes "cont f" shows "lfp f = (UN n. (f^^n) {})" (is "_ = ?U") +proof + show "lfp f \ ?U" + proof (rule lfp_lowerbound) + have "f ?U = (UN n. (f^^Suc n){})" + using chain_iterates[OF mono_if_cont[OF assms]] assms + by(simp add: cont_def) + also have "\ = (f^^0){} \ \" by simp + also have "\ = ?U" + by(auto simp del: funpow.simps) (metis not0_implies_Suc) + finally show "f ?U \ ?U" by simp + qed +next + { fix n p assume "f p \ p" + have "(f^^n){} \ p" + proof(induction n) + case 0 show ?case by simp + next + case Suc + from monoD[OF mono_if_cont[OF assms] Suc] `f p \ p` + show ?case by simp + qed + } + thus "?U \ lfp f" by(auto simp: lfp_def) +qed + + +lemma single_valued_UN_chain: + assumes "chain S" "(!!n. single_valued (S n))" + shows "single_valued(UN n. S n)" +proof(auto simp: single_valued_def) + fix m n x y z assume "(x, y) \ S m" "(x, z) \ S n" + with chain_total[OF assms(1), of m n] assms(2) + show "y = z" by (auto simp: single_valued_def) +qed + +lemma single_valued_lfp: +fixes f :: "('a \ 'a) set => ('a \ 'a) set" +assumes "cont f" "\r. single_valued r \ single_valued (f r)" +shows "single_valued(lfp f)" +unfolding lfp_if_cont[OF assms(1)] +proof(rule single_valued_UN_chain[OF chain_iterates[OF mono_if_cont[OF assms(1)]]]) + fix n show "single_valued ((f ^^ n) {})" + by(induction n)(auto simp: assms(2)) +qed + + +text{* Third Step: Definition of the Monadic While *} +definition \ :: "['\ \ bool,('\ \ '\) set] \ (('\ \ '\) set \ ('\ \ '\) set)" +where "\ b cd = (\cw. {(s,t). if b s then (s, t) \ cd O cw else s = t})" + + +definition while_SE :: "['\ \ bool, (unit, '\)MON\<^sub>S\<^sub>E] \ (unit, '\)MON\<^sub>S\<^sub>E" +where "while_SE c B \ (Rel2Mon(lfp(\ c (Mon2Rel B))))" + +syntax (xsymbols) + "_while_SE" :: "['\ \ bool, (unit, '\)MON\<^sub>S\<^sub>E] \ (unit, '\)MON\<^sub>S\<^sub>E" + ("(while\<^sub>S\<^sub>E _ do _ od)" [8,8]8) +translations + "while\<^sub>S\<^sub>E c do b od" == "CONST while_SE c b" + +lemma cont_\: "cont (\ c b)" +by (auto simp: cont_def \_def) + +text{* The fixpoint theory now allows us to establish that the lfp constructed over + @{term Mon2Rel} remains deterministic *} + +theorem single_valued_lfp_Mon2Rel: "single_valued (lfp(\ c (Mon2Rel B)))" +apply(rule single_valued_lfp, simp_all add: cont_\) +apply(auto simp: \_def single_valued_def) +apply(metis single_valued_Mon2Rel[of "B"] single_valued_def) +done + + +lemma Rel2Mon_if: + "Rel2Mon {(s, t). if b s then (s, t) \ Mon2Rel c O lfp (\ b (Mon2Rel c)) else s = t} \ = + (if b \ then Rel2Mon (Mon2Rel c O lfp (\ b (Mon2Rel c))) \ else Some ((), \))" +by (simp add: Rel2Mon_def) + +lemma Rel2Mon_homomorphism: + assumes determ_X: "single_valued X" and determ_Y: "single_valued Y" + shows "Rel2Mon (X O Y) = (Rel2Mon X) ;- (Rel2Mon Y)" +proof - + have relational_partial_next_in_O: "!!x E F. (\y. (x, y) \ (E O F)) + \ (\y. (x, y) \ E)" by (auto) +show ?thesis +apply (simp add: Rel2Mon_def seq_SE_def bind_SE_def) +apply (rule ext) +apply (case_tac " \\'. (\, \') \ X O Y") +apply (simp only: HOL.if_True) +apply (frule relational_partial_next_in_O) +apply (auto) +apply (insert determ_X determ_Y) + +defer 1 + +apply (rule_tac x=z in exI) +apply (rule someI2) +apply (assumption)+ +apply (simp add: single_valued_def) +apply (metis) + +apply (erule contrapos_pp) +apply (simp) +apply (rule_tac x=\' in exI) + apply (subgoal_tac "(SOME \'. (\, \') \ X) = \''") + apply (auto) + apply (auto simp: single_valued_def) +apply (fold single_valued_def) + + apply (subgoal_tac "(SOME \'. (x, \') \ X) = y") + apply (simp) + apply (subgoal_tac "(SOME \'. (y, \') \ Y) = z") + apply (simp) + apply (subgoal_tac "(SOME \'. (x, \') \ X O Y) = z") + apply (simp) + apply (auto simp: single_valued_def) + (* TODO IMPORTANT *) + + apply (rule someI2) + apply (assumption) + apply (rule someI2) + apply (auto) +oops +(* qed *) + +text{* Putting everything together, the theory of embedding and the invariance of + determinism of the while-body, gives us the usual unfold-theorem: *} +theorem "(while\<^sub>S\<^sub>E b do c od) = (if\<^sub>S\<^sub>E b then (c ;- (while\<^sub>S\<^sub>E b do c od)) else return () fi)" +apply (simp add: if_SE_def seq_SE_def while_SE_def unit_SE_def) +apply (subst lfp_unfold [OF mono_if_cont, OF cont_\]) +apply (rule ext) +apply (subst \_def) +apply (auto simp: Rel2Mon_if Rel2Mon_homomorphism seq_SE_def Rel2Mon_Id [simplified comp_def] + single_valued_Mon2Rel single_valued_lfp_Mon2Rel ) +done + + + +subsection{* Multi-binds *} + +text{* In order to express test-sequences also on the object-level and +to make our theory amenable to formal reasoning over test-sequences, +we represent them as lists of input and generalize the bind-operator +of the state-exception monad accordingly. The approach is straightforward, +but comes with a price: we have to encapsulate all input and output data +into one type. Assume that we have a typed interface to a module with +the operations $op_1$, $op_2$, \ldots, $op_n$ with the inputs +$\iota_1$, $\iota_2$, \ldots, $\iota_n$ (outputs are treated analogously). +Then we can encode for this interface the general input - type: +\begin{displaymath} +\texttt{datatype}\ \texttt{in}\ =\ op_1\ ::\ \iota_1\ |\ ...\ |\ \iota_n +\end{displaymath} +Obviously, we loose some type-safety in this approach; we have to express +that in traces only \emph{corresponding} input and output belonging to the +same operation will occur; this form of side-conditions have to be expressed +inside \HOL. From the user perspective, this will not make much difference, +since junk-data resulting from too weak typing can be ruled out by adopted +front-ends. +*} + +text{* Note that the subsequent notion of a test-sequence allows the io stepping +function (and the special case of a program under test) to stop execution +\emph{within} the sequence; such premature terminations are characterized by an +output list which is shorter than the input list. *} + +fun mbind :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" +where "mbind [] iostep \ = Some([], \)" | + "mbind (a#H) iostep \ = + (case iostep a \ of + None \ Some([], \) + | Some (out, \') \ (case mbind H iostep \' of + None \ Some([out],\') + | Some(outs,\'') \ Some(out#outs,\'')))" + +notation mbind ("mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e") (* future name: mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e *) + +text{* This definition is fail-safe; in case of an exception, the current state is maintained, + the computation as a whole is marked as success. + Compare to the fail-strict variant @{text "mbind'"}: *} + +lemma mbind_unit [simp]: + "mbind [] f = (return [])" + by(rule ext, simp add: unit_SE_def) + +text{* The characteristic property of @{term mbind} --- which distinguishes it from + @{text mbind'} defined in the sequel --- is that it never fails; it ``swallows'' internal + errors occuring during the computation. *} +lemma mbind_nofailure [simp]: + "mbind S f \ \ None" + apply(rule_tac x=\ in spec) + apply(induct S, auto simp:unit_SE_def) + apply(case_tac "f a x", auto) + apply(erule_tac x="b" in allE) + apply(erule exE, erule exE, simp) + done + + +fun mbind' :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" +where "mbind' [] iostep \ = Some([], \)" | + "mbind' (a#S) iostep \ = + (case iostep a \ of + None \ None + | Some (out, \') \ (case mbind' S iostep \' of + None \ None (* fail-strict *) + | Some(outs,\'') \ Some(out#outs,\'')))" +notation mbind' ("mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p") (* future name: mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p *) + +lemma mbind'_unit [simp]: + "mbind' [] f = (return [])" + by(rule ext, simp add: unit_SE_def) + +lemma mbind'_bind [simp]: + "(x \ mbind' (a#S) F; M x) = (a \ (F a); (x \ mbind' S F; M (a # x)))" + by(rule ext, rename_tac "z",simp add: bind_SE_def split: option.split) + +declare mbind'.simps[simp del] (* use only more abstract definitions *) + + +fun mbind'' :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" +where "mbind'' [] iostep \ = Some([], \)" | + "mbind'' (a#S) iostep \ = + (case iostep a \ of + None \ mbind'' S iostep \ + | Some (out, \') \ (case mbind'' S iostep \' of + None \ None (* does not occur *) + | Some(outs,\'') \ Some(out#outs,\'')))" + +notation mbind'' ("mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e") (* future name: mbind\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e\<^sub>F\<^sub>a\<^sub>i\<^sub>l *) +declare mbind''.simps[simp del] (* use only more abstract definitions *) + + +text{* mbind' as failure strict operator can be seen as a foldr on bind - + if the types would match \ldots *} + +definition try_SE :: "('o,'\) MON\<^sub>S\<^sub>E \ ('o option,'\) MON\<^sub>S\<^sub>E" ("try\<^sub>S\<^sub>E") +where "try\<^sub>S\<^sub>E ioprog = (\\. case ioprog \ of + None \ Some(None, \) + | Some(outs, \') \ Some(Some outs, \'))" +text{* In contrast, mbind as a failure safe operator can roughly be seen + as a foldr on bind - try: + m1 ; try m2 ; try m3; ... Note, that the rough equivalence only holds for + certain predicates in the sequence - length equivalence modulo None, + for example. However, if a conditional is added, the equivalence + can be made precise: *} + +lemma mbind_try: + "(x \ mbind (a#S) F; M x) = + (a' \ try\<^sub>S\<^sub>E(F a); + if a' = None + then (M []) + else (x \ mbind S F; M (the a' # x)))" +apply(rule ext) +apply(simp add: bind_SE_def try_SE_def) +apply(case_tac "F a x", auto) +apply(simp add: bind_SE_def try_SE_def) +apply(case_tac "mbind S F b", auto) +done + +text{* On this basis, a symbolic evaluation scheme can be established + that reduces mbind-code to try\_SE\_code and ite-cascades. *} + +definition alt_SE :: "[('o, '\)MON\<^sub>S\<^sub>E, ('o, '\)MON\<^sub>S\<^sub>E] \ ('o, '\)MON\<^sub>S\<^sub>E" (infixl "\\<^sub>S\<^sub>E" 10) +where "(f \\<^sub>S\<^sub>E g) = (\ \. case f \ of None \ g \ + | Some H \ Some H)" + +definition malt_SE :: "('o, '\)MON\<^sub>S\<^sub>E list \ ('o, '\)MON\<^sub>S\<^sub>E" +where "malt_SE S = foldr alt_SE S fail\<^sub>S\<^sub>E" +notation malt_SE ("\\<^sub>S\<^sub>E") + +lemma malt_SE_mt [simp]: "\\<^sub>S\<^sub>E [] = fail\<^sub>S\<^sub>E" +by(simp add: malt_SE_def) + +lemma malt_SE_cons [simp]: "\\<^sub>S\<^sub>E (a # S) = (a \\<^sub>S\<^sub>E (\\<^sub>S\<^sub>E S))" +by(simp add: malt_SE_def) + + +subsection{* State Backtrack Monads *} +text{*This subsection is still rudimentary and as such an interesting +formal analogue to the previous monad definitions. It is doubtful that it is +interesting for testing and as a cmputational stucture at all. +Clearly more relevant is ``sequence'' instead of ``set,'' which would +rephrase Isabelle's internal tactic concept. *} + +type_synonym ('o, '\) MON\<^sub>S\<^sub>B = "'\ \ ('o \ '\) set" + +definition bind_SB :: "('o, '\)MON\<^sub>S\<^sub>B \ ('o \ ('o', '\)MON\<^sub>S\<^sub>B) \ ('o', '\)MON\<^sub>S\<^sub>B" +where "bind_SB f g \ = \ ((\(out, \). (g out \)) ` (f \))" +notation bind_SB ("bind\<^sub>S\<^sub>B") + +definition unit_SB :: "'o \ ('o, '\)MON\<^sub>S\<^sub>B" ("(returns _)" 8) +where "unit_SB e = (\\. {(e,\)})" +notation unit_SB ("unit\<^sub>S\<^sub>B") + +syntax (xsymbols) + "_bind_SB" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>B,('o','\)MON\<^sub>S\<^sub>B] \ ('o','\)MON\<^sub>S\<^sub>B" + ("(2 _ := _; _)" [5,8,8]8) +translations + "x := f; g" == "CONST bind_SB f (% x . g)" + + + +lemma bind_left_unit_SB : "(x := returns a; m x) = m a" + by (rule ext,simp add: unit_SB_def bind_SB_def) + +lemma bind_right_unit_SB: "(x := m; returns x) = m" + by (rule ext, simp add: unit_SB_def bind_SB_def) + + +lemma bind_assoc_SB: "(y := (x := m; k x); h y) = (x := m; (y := k x; h y))" + by (rule ext, simp add: unit_SB_def bind_SB_def split_def) + + +subsection{* State Backtrack Exception Monad (vulgo: Boogie-PL) *} +text{* The following combination of the previous two Monad-Constructions +allows for the semantic foundation of a simple generic assertion language +in the style of Schirmers Simpl-Language or Rustan Leino's Boogie-PL language. +The key is to use the exceptional element None for violations of +the assert-statement. *} +type_synonym ('o, '\) MON\<^sub>S\<^sub>B\<^sub>E = "'\ \ (('o \ '\) set) option" + + +definition bind_SBE :: "('o,'\)MON\<^sub>S\<^sub>B\<^sub>E \ ('o \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E) \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E" +where "bind_SBE f g = (\\. case f \ of None \ None + | Some S \ (let S' = (\(out, \'). g out \') ` S + in if None \ S' then None + else Some(\ (the ` S'))))" + +syntax (xsymbols) + "_bind_SBE" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>B\<^sub>E,('o','\)MON\<^sub>S\<^sub>B\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E" + ("(2 _ :\ _; _)" [5,8,8]8) +translations + "x :\ f; g" == "CONST bind_SBE f (% x . g)" + +definition unit_SBE :: "'o \ ('o, '\)MON\<^sub>S\<^sub>B\<^sub>E" ("(returning _)" 8) +where "unit_SBE e = (\\. Some({(e,\)}))" + +definition assert_SBE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" +where "assert_SBE e = (\\. if e \ then Some({((),\)}) + else None)" +notation assert_SBE ("assert\<^sub>S\<^sub>B\<^sub>E") + +definition assume_SBE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" +where "assume_SBE e = (\\. if e \ then Some({((),\)}) + else Some {})" +notation assume_SBE ("assume\<^sub>S\<^sub>B\<^sub>E") + + +definition havoc_SBE :: " (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" +where "havoc_SBE = (\\. Some({x. True}))" +notation havoc_SBE ("havoc\<^sub>S\<^sub>B\<^sub>E") + + +lemma bind_left_unit_SBE : "(x :\ returning a; m x) = m a" + apply (rule ext,simp add: unit_SBE_def bind_SBE_def) + apply (case_tac "m a x",auto) + done + +lemma bind_right_unit_SBE: "(x :\ m; returning x) = m" + apply (rule ext, simp add: unit_SBE_def bind_SBE_def) + apply (case_tac "m x", simp_all add:Let_def) + apply (rule HOL.ccontr, simp add: Set.image_iff) + done + + +lemmas aux = trans[OF HOL.neq_commute,OF Option.not_None_eq] + +lemma bind_assoc_SBE: "(y :\ (x :\ m; k); h y) = (x :\ m; (y :\ k; h y))" +proof (rule ext, rename_tac z, simp add: unit_SBE_def bind_SBE_def, + case_tac "m z", simp_all add: Let_def Set.image_iff, safe) + case goal1 then show ?case + by(rule_tac x="(a, b)" in bexI, simp_all) +next + case goal2 then show ?case + apply(rule_tac x="(aa,b)" in bexI, simp_all add:split_def) + apply(erule_tac x="(aa,b)" in ballE) + apply(auto simp: aux image_def split_def intro!: rev_bexI) + done +next + case goal3 then show ?case + by(rule_tac x="(a, b)" in bexI, simp_all) +next + case goal4 then show ?case + apply(erule_tac Q="None = ?X" in contrapos_pp) + apply(erule_tac x="(aa,b)" and P="\ x. None \ split (\out. k) x" in ballE) + apply(auto simp: aux Option.not_None_eq image_def split_def intro!: rev_bexI) + done +next + case goal5 then show ?case + apply simp apply((erule_tac x="(ab,ba)" in ballE)+) + apply(simp_all add: aux Option.not_None_eq, (erule exE)+, simp add:split_def) + apply(erule rev_bexI,case_tac "None\(\p. h (fst p) (snd p))`y",auto simp:split_def) + done + +next + case goal6 then show ?case + apply simp apply((erule_tac x="(a,b)" in ballE)+) + apply(simp_all add: aux Option.not_None_eq, (erule exE)+, simp add:split_def) + apply(erule rev_bexI, case_tac "None\(\p. h(fst p)(snd p))`y",auto simp:split_def) + done +qed + + + +(* TODO: IF THEN ELSE and WHILE + Monadic Laws + Operational Rules. *) + + + +section{* Valid Execution Sequences in the State Exception Monad *} +text{* This is still an unstructured merge of executable monad concepts +and specification oriented high-level properties initiating test procedures. *} + +definition valid_SE :: "'\ \ (bool,'\) MON\<^sub>S\<^sub>E \ bool" (infix "\" 15) +where "(\ \ m) = (m \ \ None \ fst(the (m \)))" +text{* This notation consideres failures as valid -- a definition +inspired by I/O conformance. BUG: It is not possible to define +this concept once and for all in a Hindley-Milner type-system. +For the moment, we present it only for the state-exception +monad, although for the same definition, this notion is applicable +to other monads as well. *} + + +lemma exec_unit_SE [simp]: "(\ \ (return P)) = (P)" +by(auto simp: valid_SE_def unit_SE_def) + +lemma exec_unit_SE' [simp]: "(\\<^sub>0 \ (\\. Some (f \, \))) = (f \\<^sub>0)" +by(simp add: valid_SE_def ) + +lemma exec_fail_SE [simp]: "(\ \ fail\<^sub>S\<^sub>E) = False" +by(auto simp: valid_SE_def fail_SE_def) + + +lemma exec_fail_SE'[simp]: "\(\\<^sub>0 \ (\\. None))" +by(simp add: valid_SE_def ) + +lemma exec_bind_SE_failure: +"A \ = None \ \(\ \ ((s \ A ; M s)))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def) + +lemma exec_bind_SE_success: +"A \ = Some(b,\') \ (\ \ ((s \ A ; M s))) = (\' \ (M b))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def ) + +lemma exec_bind_SE_success': (* atomic boolean Monad "Query Functions" *) +"M \ = Some(f \,\) \ (\ \ M) = f \" +by(simp add: valid_SE_def unit_SE_def bind_SE_def ) + + + +lemma exec_bind_SE_success'': +"\ \ ((s \ A ; M s)) \ \ v \'. the(A \) = (v,\') \ \' \ (M v)" +apply(auto simp: valid_SE_def unit_SE_def bind_SE_def) +apply(cases "A \", simp_all) +apply(simp add: Product_Type.prod_case_unfold) +apply(drule_tac x="A \" and f=the in arg_cong, simp) +apply(rule_tac x="fst aa" in exI) +apply(rule_tac x="snd aa" in exI, auto) +done + + +lemma exec_bind_SE_success''': +"\ \ ((s \ A ; M s)) \ \ a. (A \) = Some a \ (snd a) \ (M (fst a))" +apply(auto simp: valid_SE_def unit_SE_def bind_SE_def) +apply(cases "A \", simp_all) +apply(simp_all add: Product_Type.prod_case_unfold + split: prod.splits) +apply(drule_tac x="A \" and f=the in arg_cong, simp) +apply(rule_tac x="fst aa" in exI) +apply(rule_tac x="snd aa" in exI, auto) +done + + +lemma exec_bind_SE_success'''' : +"\ \ ((s \ A ; M s)) \ \ v \'. A \ = Some(v,\') \ \' \ (M v)" +apply(auto simp: valid_SE_def unit_SE_def bind_SE_def) +apply(cases "A \", simp_all) +apply(simp add: Product_Type.prod_case_unfold) +apply(drule_tac x="A \" and f=the in arg_cong, simp) +apply(rule_tac x="fst aa" in exI) +apply(rule_tac x="snd aa" in exI, auto) +done + + +text{* Recall \verb+mbind_unit+ for the base case. *} + +lemma exec_mbindFSave_failure: +"ioprog a \ = None \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; M s)) = (\ \ (M []))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def) + +lemma exec_mbindFStop_failure: +"ioprog a \ = None \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (a#S) ioprog ; M s)) = (False)" +by(simp add: exec_bind_SE_failure) + +lemma exec_mbindFPurge_failure: +"ioprog a \ = None \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (a#S) ioprog ; M s)) = (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (S) ioprog ; M s))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def mbind''.simps) + + +lemma exec_mbindFSave_success : +"ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; M s)) = + (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog ; M (b#s)))" +unfolding valid_SE_def unit_SE_def bind_SE_def +by(cases "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog \'", auto) + +lemma exec_mbindFStop_success : +"ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (a#S) ioprog ; M s)) = + (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S ioprog ; M (b#s)))" +unfolding valid_SE_def unit_SE_def bind_SE_def +by(cases "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S ioprog \'", auto simp: mbind'.simps) + +lemma exec_mbindFPurge_success : +"ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (a#S) ioprog ; M s)) = + (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog ; M (b#s)))" +unfolding valid_SE_def unit_SE_def bind_SE_def +by(cases "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog \'", auto simp: mbind''.simps) + +lemma exec_mbindFSave: +"(\ \ (s \ mbind (a#S) ioprog ; return (P s))) = + (case ioprog a \ of + None \ (\ \ (return (P []))) + | Some(b,\') \ (\' \ (s \ mbind S ioprog ; return (P (b#s)))))" +apply(case_tac "ioprog a \") +apply(auto simp: exec_mbindFSave_failure exec_mbindFSave_success split: prod.splits) +apply(subst (asm) exec_mbindFSave_success,assumption,simp_all) +by(subst exec_mbindFSave_success,assumption,simp)+ + + +text{* Universal splitting and symbolic execution rule *} +lemma exec_mbindFSave_E: +assumes seq : "(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; (P s)))" + and none: "ioprog a \ = None \ (\ \ (P [])) \ Q" + and some: "\ b \'. ioprog a \ = Some(b,\') \ (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog;(P (b#s)))) \ Q " +shows "Q" +using seq +proof(cases "ioprog a \") + case None assume ass:"ioprog a \ = None" show "Q" + apply(rule none[OF ass]) + apply(insert ass, erule_tac ioprog1=ioprog in exec_mbindFSave_failure[THEN iffD1],rule seq) + done +next + case (Some aa) assume ass:"ioprog a \ = Some aa" show "Q" + apply(insert ass,cases "aa",simp, rename_tac "out" "\'") + apply(erule some) + apply(insert ass,simp) + apply(erule_tac ioprog1=ioprog in exec_mbindFSave_success[THEN iffD1],rule seq) + done +qed + +text{* The next rule reveals the particular interest in deduction; + as an elimination rule, it allows for a linear conversion of a validity judgement + @{term "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p"} over an input list @{term "S"} into a constraint system; without any + branching ... Symbolic execution can even be stopped tactically whenever + @{term "ioprog a \ = Some(b,\')"} comes to a contradiction. *} +lemma exec_mbindFStop_E: +assumes seq : "(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (a#S) ioprog ; (P s)))" + and some: "\b \'. ioprog a \ = Some(b,\') \ (\'\ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S ioprog;(P(b#s)))) \ Q" +shows "Q" +using seq +proof(cases "ioprog a \") + case None assume ass:"ioprog a \ = None" show "Q" + apply(insert ass seq) + apply(drule_tac \=\ and S=S and M=P in exec_mbindFStop_failure, simp) + done +next + case (Some aa) assume ass:"ioprog a \ = Some aa" show "Q" + apply(insert ass,cases "aa",simp, rename_tac "out" "\'") + apply(erule some) + apply(insert ass,simp) + apply(erule_tac ioprog1=ioprog in exec_mbindFStop_success[THEN iffD1],rule seq) + done +qed + + +lemma exec_mbindFPurge_E: +assumes seq : "(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (a#S) ioprog ; (P s)))" + and none: "ioprog a \ = None \ (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog;(P (s)))) \ Q" + and some: "\ b \'. ioprog a \ = Some(b,\') \ (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog;(P (b#s)))) \ Q " +shows "Q" +using seq +proof(cases "ioprog a \") + case None assume ass:"ioprog a \ = None" show "Q" + apply(rule none[OF ass]) + apply(insert ass, erule_tac ioprog1=ioprog in exec_mbindFPurge_failure[THEN iffD1],rule seq) + done +next + case (Some aa) assume ass:"ioprog a \ = Some aa" show "Q" + apply(insert ass,cases "aa",simp, rename_tac "out" "\'") + apply(erule some) + apply(insert ass,simp) + apply(erule_tac ioprog1=ioprog in exec_mbindFPurge_success[THEN iffD1],rule seq) + done +qed + + +lemma assert_disch1 :" P \ \ (\ \ (x \ assert\<^sub>S\<^sub>E P; M x)) = (\ \ (M True))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def) + +lemma assert_disch2 :" \ P \ \ \ (\ \ (x \ assert\<^sub>S\<^sub>E P ; M s))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def) + +lemma assert_disch3 :" \ P \ \ \ (\ \ (assert\<^sub>S\<^sub>E P))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def) + +lemma assert_D : "(\ \ (x \ assert\<^sub>S\<^sub>E P; M x)) \ P \ \ (\ \ (M True))" +by(auto simp: bind_SE_def assert_SE_def valid_SE_def split: HOL.split_if_asm) + +lemma assume_D : "(\ \ (x \ assume\<^sub>S\<^sub>E P; M x)) \ \ \. (P \ \ \ \ (M ()))" +apply(auto simp: bind_SE_def assume_SE_def valid_SE_def split: HOL.split_if_asm) +apply(rule_tac x="Eps P" in exI, auto) +apply(rule_tac x="True" in exI, rule_tac x="b" in exI) +apply(subst Hilbert_Choice.someI,assumption,simp) +apply(subst Hilbert_Choice.someI,assumption,simp) +done +text{* These two rule prove that the SE Monad in connection with the notion of valid sequence +is actually sufficient for a representation of a Boogie-like language. The SBE monad with explicit +sets of states --- to be shown below --- is strictly speaking not necessary (and will therefore +be discontinued in the development). *} + +term "if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi" + +lemma if_SE_D1 : "P \ \ (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = (\ \ B\<^sub>1)" +by(auto simp: if_SE_def valid_SE_def) + +lemma if_SE_D2 : "\ P \ \ (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = (\ \ B\<^sub>2)" +by(auto simp: if_SE_def valid_SE_def) + +lemma if_SE_split_asm : " (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = ((P \ \ (\ \ B\<^sub>1)) \ (\ P \ \ (\ \ B\<^sub>2)))" +by(cases "P \",auto simp: if_SE_D1 if_SE_D2) + +lemma if_SE_split : " (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = ((P \ \ (\ \ B\<^sub>1)) \ (\ P \ \ (\ \ B\<^sub>2)))" +by(cases "P \", auto simp: if_SE_D1 if_SE_D2) + + +lemma if_SE_execE: + assumes A: "\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)" + and B: "P \ \ \ \ B\<^sub>1 \ Q" + and C: "\ P \\ \ \ B\<^sub>2 \ Q" + shows "Q" +by(insert A [simplified if_SE_split],cases "P \", simp_all, auto elim: B C) + + +lemma [code]: + "(\ \ m) = (case (m \) of None \ False | (Some (x,y)) \ x)" + apply(simp add: valid_SE_def) + apply(cases "m \ = None", simp_all) + apply(insert not_None_eq, auto) +done + + +text{* Test-Refinements will be stated in terms of the failsave @{term mbind}, opting + more generality. The following lemma allows for an optimization both in + test execution as well as in symbolic execution for an important special case of + the post-codition: Whenever the latter has the constraint that the length of input and + output sequence equal each other (that is to say: no failure occured), failsave mbind + can be reduced to failstop mbind ... *} +lemma mbindFSave_vs_mbindFStop : + "(\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s ioprog); return(length \s = length os \ P \s os))) = + (\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s ioprog); return(P \s os)))" + apply(rule_tac x=P in spec) + apply(rule_tac x=\ in spec) + proof(induct "\s") + case Nil show ?case by(simp_all add: mbind_try try_SE_def del: Monads.mbind.simps) + case (Cons a \s) show ?case + apply(rule allI, rename_tac "\",rule allI, rename_tac "P") + apply(insert Cons.hyps) + apply(case_tac "ioprog a \") + apply(simp only: exec_mbindFSave_failure exec_mbindFStop_failure, simp) + apply(simp add: split_paired_all del: Monads.mbind.simps ) + apply(rename_tac "\'") + apply(subst exec_mbindFSave_success, assumption) + apply(subst (2) exec_bind_SE_success, assumption) + apply(erule_tac x="\'" in allE) + apply(erule_tac x="\\s s. P (a # \s) (aa # s)" in allE) (* heureka ! *) + apply(simp) + done + qed + + +lemma mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e_vs_mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p: +assumes A: "\ \ \. ioprog \ \ \ None" +shows "(\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s ioprog); P os)) = + (\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s ioprog); P os))" +proof(induct "\s") print_cases + case Nil show ?case by simp +next + case (Cons a \s) + from Cons.hyps + have B:"\ S f \. mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S f \ \ None " by simp + have C:"\\. mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s ioprog \ = mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s ioprog \" + apply(induct \s, simp) + apply(rule allI,rename_tac "\") + apply(simp add: Monads.mbind'.simps(2)) + apply(insert A, erule_tac x="a" in allE) + apply(erule_tac x="\" and P="\\ . ioprog a \ \ None" in allE) + apply(auto split:option.split) + done + show ?case + apply(insert A,erule_tac x="a" in allE,erule_tac x="\" in allE) + apply(simp, elim exE) + apply(rename_tac "out" "\'") + apply(insert B, erule_tac x=\s in allE, erule_tac x=ioprog in allE, erule_tac x=\' in allE) + apply(subst(asm) not_None_eq, elim exE) + apply(subst Monads.exec_bind_SE_success) + apply(simp split: option.split, auto) + apply(rule_tac s="(\ a b c. a # (fst c)) out \' (aa, b)" in trans, simp,rule refl) + apply(rule_tac s="(\ a b c. (snd c)) out \' (aa, b)" in trans, simp,rule refl) + apply(simp_all) + apply(subst Monads.exec_bind_SE_success, assumption) + apply(subst Monads.exec_bind_SE_success) + apply(rule_tac s="Some (aa, b)" in trans,simp_all add:C) + apply(subst(asm) Monads.exec_bind_SE_success, assumption) + apply(subst(asm) Monads.exec_bind_SE_success) + apply(rule_tac s="Some (aa, b)" in trans,simp_all add:C) + done +qed + + +section{* Valid Test Sequences in the State Exception Backtrack Monad *} +text{* This is still an unstructured merge of executable monad concepts +and specification oriented high-level properties initiating test procedures. *} + +definition valid_SBE :: "'\ \ ('a,'\) MON\<^sub>S\<^sub>B\<^sub>E \ bool" (infix "\\<^sub>S\<^sub>B\<^sub>E" 15) +where "\ \\<^sub>S\<^sub>B\<^sub>E m \ (m \ \ None)" +text{* This notation consideres all non-failures as valid. *} + + +lemma assume_assert: "(\ \\<^sub>S\<^sub>B\<^sub>E ( _ :\ assume\<^sub>S\<^sub>B\<^sub>E P ; assert\<^sub>S\<^sub>B\<^sub>E Q)) = (P \ \ Q \)" + by(simp add: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) + +lemma assert_intro: "Q \ \ \ \\<^sub>S\<^sub>B\<^sub>E (assert\<^sub>S\<^sub>B\<^sub>E Q)" + by(simp add: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) + +lemma assume_dest: + "\ \ \\<^sub>S\<^sub>B\<^sub>E (x :\ assume\<^sub>S\<^sub>B\<^sub>E Q; M x); Q \' \ \ \ \\<^sub>S\<^sub>B\<^sub>E M ()" + apply(auto simp: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) + apply(cases "Q \",simp_all) + oops + +text{* This still needs work. What would be needed is a kind + of wp - calculus that comes out of that. So far: nope. *} + +subsection{* Legacy Bindings *} + + +lemma valid_true[simp]: (* legacy: special case *) + "(\ \ (s \ return x ; return (P s))) = P x" by simp + + +(* +lemmas valid_both = exec_mbindFSave (* legacy *) +lemmas valid_success = exec_mbindFSave_success (* legacy *) +lemmas valid_success'' = exec_mbindFSave_success(* legacy *) +lemmas valid_success' = exec_bind_SE_success (* legacy *) +lemmas valid_failure = exec_mbindFSave_failure (* legacy : *) +lemmas valid_failure' = exec_bind_SE_failure (* legacy *) +lemmas valid_failure''=valid_failure (* legacy : *) +lemmas valid_failure''' = exec_mbindFStop_failure (* legacy : *) +lemmas valid_propagate_fail = exec_fail_SE (* legacy *) +lemmas valid_propagate_fail' = exec_fail_SE' (* legacy *) +lemmas valid_propoagate_3' = valid_propagate_fail' (* legacy *) +lemmas valid_propagate_2 = exec_bind_SE_success''(* legacy *) +lemmas valid_propagate_1 = exec_unit_SE (* legacy *) +lemmas valid_successElem = exec_bind_SE_success' (* legacy *) +lemmas valid_propagate_2' = exec_bind_SE_success'''(* legacy *) +lemmas valid_propagate_2'' = exec_bind_SE_success'''' (* legacy *) +lemmas valid_propoagate_3 = exec_unit_SE' (* legacy *) + *) +(* legacy ?: +lemma valid_success'': +"ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind (a#S) ioprog ; return (P s))) = + (\' \ (s \ mbind S ioprog ; return (P (b#s))))" +unfolding valid_SE_def unit_SE_def bind_SE_def +by(cases "mbind S ioprog \'", auto) +*) + +end diff --git a/src/test-gen/src/main/Testing.thy b/src/test-gen/src/main/Testing.thy new file mode 100644 index 0000000..94c6ff0 --- /dev/null +++ b/src/test-gen/src/main/Testing.thy @@ -0,0 +1,57 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * Testing.thy --- wiring everything together. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007 ETH Zurich, Switzerland + * 2008-2015 Achim D. Brucker, Germany + * 2009-2016 Université Paris-Sud, France + * 2016 The University of Sheffield, UK + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +chapter {* Summing up *} + +theory + Testing +imports + TestGen + TestLib + TestScript + "codegen_fsharp/code_fsharp" +begin + + +end diff --git a/src/test-gen/src/main/clocks.ML b/src/test-gen/src/main/clocks.ML new file mode 100644 index 0000000..0c506cd --- /dev/null +++ b/src/test-gen/src/main/clocks.ML @@ -0,0 +1,352 @@ + +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * clocks.ML --- time measurements + * This file is part of HOL-TestGen. + * + * Copyright (c) 2010 University Paris-Sud, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) + +signature CLOCKENV = +sig + type clockenv + + val mt_clockenv : clockenv + val merge_clockenv : clockenv * clockenv -> clockenv + + val text_stats : clockenv -> string + val latex_stats : clockenv -> string + + val start_clock : string -> clockenv -> clockenv + val stop_clock : string -> clockenv -> clockenv + val next_clock : clockenv -> clockenv + + val rename_clock : string -> string -> clockenv -> clockenv +end + +structure ClockEnv : CLOCKENV = +struct + +structure IDtab = + Table(type key = (string * int) list val ord = list_ord (prod_ord fast_string_ord int_ord)); + +val next_time_id = Unsynchronized.ref 0 + +fun add_time tab key time = let + val fresh_id = !next_time_id + val _ = next_time_id := !next_time_id + 1 +in + IDtab.map_default (key, Inttab.empty) (fn stats => Inttab.update_new (fresh_id, time) stats) tab +end + +fun sum_times tab = let + fun fold_tab stats = Inttab.fold (fn (_,time) => fn sum => time + sum) stats Time.zeroTime + val tab' = IDtab.map (K fold_tab) tab +in + IDtab.dest tab' +end + +type clockinfo = { + timer_stack : Timer.real_timer list, + id_stack : (string * int) list, + timetab : Time.time Inttab.table IDtab.table, + error_occurred : bool +} + +datatype clockenv = Clockenv of clockinfo + +fun rep_clockenv (Clockenv X) = X; +fun mk_clockenv (X:clockinfo)= Clockenv X + +val mt_clockenv = Clockenv{timer_stack = [], + id_stack = [], + timetab = IDtab.empty, + error_occurred = false}; + +fun merge_clockenv + (Clockenv{timer_stack = ts, + id_stack = ids, + timetab = tt, + error_occurred = e}, + Clockenv{timer_stack = ts', + id_stack = ids', + timetab = tt', + error_occurred = e'}) = let + + fun merge_stats tab1 tab2 = Inttab.join (fn time_id => fn (x, y) => x) (tab1, tab2) + (* here we always have x = y for the same time_id *) + +in + Clockenv{timer_stack = [], + id_stack = [], + timetab = IDtab.join (fn key => fn (x, y) => merge_stats x y) (tt, tt'), + error_occurred = e orelse e'} +end + +fun clean str = String.translate (fn #" " => "_" | ch => String.str ch) str + +fun string_of_id (name,n) = + if n = 0 then + clean name + else + (clean name) ^ "_" ^ (Int.toString n) + +fun text_stats' (Clockenv{timetab = tt,...}) = let + fun string_of_ids ids = String.concatWith "/" (map string_of_id (rev ids)) + val maxlen = List.foldl Int.max 0 (map (size o string_of_ids) (IDtab.keys tt)) + val string_of_ids' = (StringCvt.padRight #" " maxlen) o string_of_ids + fun string_of_entry (ids, time) = "Total time spent in " ^ (string_of_ids' ids) ^": " ^ (Time.toString time) +in + String.concatWith "\n" (map string_of_entry (sum_times tt)) +end + +fun latex_tab entries = let + fun string_of_ids ids = "\\protect\\path{" ^ (String.concatWith "/" (map string_of_id (rev ids))) ^ "}" + fun string_of_entry (ids, time) = (string_of_ids ids) ^ " & " ^ (Time.toString time) ^ "\\\\ \n" + val inner = String.concat (map string_of_entry entries) + val headers = "Location & Time\\\\ \\hline\n" +in + "\\begin{tabular}{l|r}\n" ^ headers ^ inner ^ "\\end{tabular}\n" +end + +fun latex_stats' (Clockenv{timetab = tt,...}) = let + val entries = sum_times tt + fun toplevel_name entry = (fst o List.last o fst) entry + val toplevel_names = distinct (op =) (map toplevel_name entries) + fun has_name name entry = (toplevel_name entry) = name + val sorted_entries = map (fn name => (name, filter (has_name name) entries)) toplevel_names + fun latex_unit (name, entries) = "\\begin{table}\n\\centering"^ (latex_tab entries) + ^"\\caption{Time consumed by \\protect\\path{" ^ (clean name) ^ "}" + ^"\\label{tab:" ^ (clean name) ^ "}}\n\\end{table}\n%%%\n" + +in + (String.concatWith "\n%%%\n" (map latex_unit sorted_entries))^"\n" +end + +fun check_error f clockenv = + if #error_occurred (rep_clockenv clockenv) then + "An error occurred during profiling." + else + f clockenv + +val text_stats = check_error text_stats' +val latex_stats = check_error latex_stats' + +fun start_id (Clockenv{timer_stack = ts, id_stack = ids, timetab= tt, error_occurred = e}) newid = + Clockenv{timer_stack = (Timer.startRealTimer ()) :: ts, + id_stack = newid :: ids, + timetab = tt, + error_occurred = e}; + +fun stop_id (Clockenv{timer_stack = ts, id_stack = ids, timetab= tt, error_occurred = e}) = let + val (timer::timers) = ts + val elapsed = Timer.checkRealTimer timer + val (id::remaining_ids) = ids +in + Clockenv{timer_stack = timers, + id_stack = remaining_ids, + timetab = add_time tt ids elapsed, + error_occurred = e} +end + +fun rewrite_list find replace x = let + val r = rev x + val prefix = List.take (r, Int.min(length find, length r)) +in + if prefix = find then + rev (replace @ (List.drop (r, length find))) + else + x +end + +fun rename_id str (Clockenv{timer_stack = ts, id_stack = ids, timetab= tt, error_occurred = e}) = let + val ((name, n)::remaining_ids) = ids + val new_ids = (str,n)::remaining_ids + fun rewrite_entry (key, value) = (rewrite_list ids new_ids key, value) +in + Clockenv{timer_stack = ts, + id_stack = new_ids, + timetab = IDtab.make (map rewrite_entry (IDtab.dest tt)), + error_occurred = e} +end + + +fun add_error (Clockenv{timer_stack = ts, id_stack = ids, timetab= tt, error_occurred = e}) = + Clockenv{timer_stack = ts, + id_stack = ids, + timetab = tt, + error_occurred = true}; + +fun start_clock clockname clockenv = start_id clockenv (clockname, 0) + +fun stop_clock clockname clockenv = + if null (#timer_stack (rep_clockenv clockenv)) then + add_error clockenv + else let + val ((idname,_)::ids) = #id_stack (rep_clockenv clockenv) + in + if idname = clockname then + stop_id clockenv + else + add_error clockenv + end + +fun next_clock clockenv = + if null (#timer_stack (rep_clockenv clockenv)) then + add_error clockenv + else let + val ((clockname,n)::ids) = #id_stack (rep_clockenv clockenv) + in + start_id (stop_id clockenv)(clockname,n+1) + end + +fun rename_clock oldname newname clockenv = + if null (#timer_stack (rep_clockenv clockenv)) then + add_error clockenv + else let + val ((idname,_)::ids) = #id_stack (rep_clockenv clockenv) + in + if idname = oldname then + rename_id newname clockenv + else + add_error clockenv + end + +end; + +structure Clocks_DataManagement = Generic_Data +( + type T = ClockEnv.clockenv + val empty = ClockEnv.mt_clockenv + fun extend T = T + val merge = ClockEnv.merge_clockenv +); + +(* FIXME ignored + fun print sg ce = + (writeln "Runtime statistics:"; + writeln (ClockEnv.text_stats ce)); +*) + +signature CLOCKS = +sig + val init_clocks : theory -> unit + val flush_clocks : theory -> theory + + val start_clock : string -> unit + val stop_clock : string -> unit + val next_clock : unit -> unit + + val start_clock_tac : string -> tactic + val stop_clock_tac : string -> tactic + val next_clock_tac : unit -> tactic + + val string_of_clocks: theory -> string + val write_clocks : theory -> string -> unit + + val rename_clock : string -> string -> unit +end + +structure Clocks : CLOCKS = +struct + +val env_ref = Unsynchronized.ref ClockEnv.mt_clockenv + +fun init_clocks thy = env_ref := Clocks_DataManagement.get(Context.Theory thy) + +fun flush_clocks thy = Context.theory_of (Clocks_DataManagement.put (!env_ref) (Context.Theory thy)) + +fun start_clock name = env_ref := ClockEnv.start_clock name (!env_ref) + +fun stop_clock name = env_ref := ClockEnv.stop_clock name (!env_ref) + +fun next_clock () = env_ref := ClockEnv.next_clock (!env_ref) + +fun start_clock_tac clockname thm = (start_clock clockname; all_tac thm) + +fun stop_clock_tac clockname thm = (stop_clock clockname; all_tac thm) + +fun next_clock_tac () thm = (next_clock (); all_tac thm) + +fun rename_clock oldname newname = env_ref := ClockEnv.rename_clock oldname newname (!env_ref) + +fun write_clocks thy fname = let + val _ = init_clocks thy + val to_write = ClockEnv.latex_stats (!env_ref) + val _ = File.write (Path.explode fname) to_write; +in + () +end + +fun string_of_clocks thy = (init_clocks thy; ClockEnv.text_stats (!env_ref)) + +fun start_clock_command clockname thy = let + val result = Context.theory_of (Clocks_DataManagement.map (ClockEnv.start_clock clockname) (Context.Theory thy)) +in + (init_clocks result; result) +end + +val _ = Outer_Syntax.command @{command_spec "start_clock"} "starts a clock for measuring runtime" + (Parse.string >> (Toplevel.theory o start_clock_command)); + +fun stop_clock_command clockname thy = let + val result = Context.theory_of (Clocks_DataManagement.map (ClockEnv.stop_clock clockname) (Context.Theory thy)) +in + (init_clocks result; result) +end + +val _ = Outer_Syntax.command @{command_spec "stop_clock"} "stops a clock for measuring runtime" + (Parse.string >> (Toplevel.theory o stop_clock_command)); + +fun next_clock_command thy = let + val result = Context.theory_of (Clocks_DataManagement.map ClockEnv.next_clock (Context.Theory thy)) +in + (init_clocks result; result) +end + +val _ = Outer_Syntax.command @{command_spec "next_clock"} "increments the ID of the current clock" + (Scan.succeed (Toplevel.theory next_clock_command)); + +fun print_clocks_command thy = (writeln (string_of_clocks thy); init_clocks thy; thy) + +val _ = Outer_Syntax.command @{command_spec "print_clocks"} "print runtime statistics" + (Scan.succeed (Toplevel.theory print_clocks_command)); + +fun write_clocks_command fname thy = (write_clocks thy fname; init_clocks thy; thy) + +val _ = Outer_Syntax.command @{command_spec "write_clocks"} "write a table with the total runtimes measured to a file" + (Parse.string >> (Toplevel.theory o write_clocks_command)); + +end; diff --git a/src/test-gen/src/main/clocks.thy b/src/test-gen/src/main/clocks.thy new file mode 100644 index 0000000..7a61495 --- /dev/null +++ b/src/test-gen/src/main/clocks.thy @@ -0,0 +1,370 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * clocks.ML --- time measurements + * This file is part of HOL-TestGen. + * + * Copyright (c) 2010 University Paris-Sud, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) + +theory clocks +imports Main +keywords "start_clock" "stop_clock" "next_clock" "print_clocks" "write_clocks" :: thy_decl +begin + + +ML{* +signature CLOCKENV = +sig + type clockenv + + val mt_clockenv : clockenv + val merge_clockenv : clockenv * clockenv -> clockenv + + val text_stats : clockenv -> string + val latex_stats : clockenv -> string + + val start_clock : string -> clockenv -> clockenv + val stop_clock : string -> clockenv -> clockenv + val next_clock : clockenv -> clockenv + + val rename_clock : string -> string -> clockenv -> clockenv +end +*} + + +ML{* + +structure ClockEnv : CLOCKENV = +struct + +structure IDtab = + Table(type key = (string * int) list val ord = list_ord (prod_ord fast_string_ord int_ord)); + +val next_time_id = Unsynchronized.ref 0 + +fun add_time tab key time = let + val fresh_id = !next_time_id + val _ = next_time_id := !next_time_id + 1 +in + IDtab.map_default (key, Inttab.empty) (fn stats => Inttab.update_new (fresh_id, time) stats) tab +end + +fun sum_times tab = let + fun fold_tab stats = Inttab.fold (fn (_,time) => fn sum => time + sum) stats Time.zeroTime + val tab' = IDtab.map (K fold_tab) tab +in + IDtab.dest tab' +end + +type clockinfo = { + timer_stack : Timer.real_timer list, + id_stack : (string * int) list, + timetab : Time.time Inttab.table IDtab.table, + error_occurred : bool +} + +datatype clockenv = Clockenv of clockinfo + +fun rep_clockenv (Clockenv X) = X; +fun mk_clockenv (X:clockinfo)= Clockenv X + +val mt_clockenv = Clockenv{timer_stack = [], + id_stack = [], + timetab = IDtab.empty, + error_occurred = false}; + +fun merge_clockenv + (Clockenv{timer_stack = ts, + id_stack = ids, + timetab = tt, + error_occurred = e}, + Clockenv{timer_stack = ts', + id_stack = ids', + timetab = tt', + error_occurred = e'}) = let + + fun merge_stats tab1 tab2 = Inttab.join (fn time_id => fn (x, y) => x) (tab1, tab2) + (* here we always have x = y for the same time_id *) + +in + Clockenv{timer_stack = [], + id_stack = [], + timetab = IDtab.join (fn key => fn (x, y) => merge_stats x y) (tt, tt'), + error_occurred = e orelse e'} +end + +fun clean str = String.translate (fn #" " => "_" | ch => String.str ch) str + +fun string_of_id (name,n) = + if n = 0 then + clean name + else + (clean name) ^ "_" ^ (Int.toString n) + +fun text_stats' (Clockenv{timetab = tt,...}) = let + fun string_of_ids ids = String.concatWith "/" (map string_of_id (rev ids)) + val maxlen = List.foldl Int.max 0 (map (size o string_of_ids) (IDtab.keys tt)) + val string_of_ids' = (StringCvt.padRight #" " maxlen) o string_of_ids + fun string_of_entry (ids, time) = "Total time spent in " ^ (string_of_ids' ids) ^": " ^ (Time.toString time) +in + String.concatWith "\n" (map string_of_entry (sum_times tt)) +end + +fun latex_tab entries = let + fun string_of_ids ids = "\\protect\\path{" ^ (String.concatWith "/" (map string_of_id (rev ids))) ^ "}" + fun string_of_entry (ids, time) = (string_of_ids ids) ^ " & " ^ (Time.toString time) ^ "\\\\ \n" + val inner = String.concat (map string_of_entry entries) + val headers = "Location & Time\\\\ \\hline\n" +in + "\\begin{tabular}{l|r}\n" ^ headers ^ inner ^ "\\end{tabular}\n" +end + +fun latex_stats' (Clockenv{timetab = tt,...}) = let + val entries = sum_times tt + fun toplevel_name entry = (fst o List.last o fst) entry + val toplevel_names = distinct (op =) (map toplevel_name entries) + fun has_name name entry = (toplevel_name entry) = name + val sorted_entries = map (fn name => (name, filter (has_name name) entries)) toplevel_names + fun latex_unit (name, entries) = "\\begin{table}\n\\centering"^ (latex_tab entries) + ^"\\caption{Time consumed by \\protect\\path{" ^ (clean name) ^ "}" + ^"\\label{tab:" ^ (clean name) ^ "}}\n\\end{table}\n%%%\n" + +in + (String.concatWith "\n%%%\n" (map latex_unit sorted_entries))^"\n" +end + +fun check_error f clockenv = + if #error_occurred (rep_clockenv clockenv) then + "An error occurred during profiling." + else + f clockenv + +val text_stats = check_error text_stats' +val latex_stats = check_error latex_stats' + +fun start_id (Clockenv{timer_stack = ts, id_stack = ids, timetab= tt, error_occurred = e}) newid = + Clockenv{timer_stack = (Timer.startRealTimer ()) :: ts, + id_stack = newid :: ids, + timetab = tt, + error_occurred = e}; + +fun stop_id (Clockenv{timer_stack = ts, id_stack = ids, timetab= tt, error_occurred = e}) = let + val (timer::timers) = ts + val elapsed = Timer.checkRealTimer timer + val (id::remaining_ids) = ids +in + Clockenv{timer_stack = timers, + id_stack = remaining_ids, + timetab = add_time tt ids elapsed, + error_occurred = e} +end + +fun rewrite_list find replace x = let + val r = rev x + val prefix = List.take (r, Int.min(length find, length r)) +in + if prefix = find then + rev (replace @ (List.drop (r, length find))) + else + x +end + +fun rename_id str (Clockenv{timer_stack = ts, id_stack = ids, timetab= tt, error_occurred = e}) = let + val ((name, n)::remaining_ids) = ids + val new_ids = (str,n)::remaining_ids + fun rewrite_entry (key, value) = (rewrite_list ids new_ids key, value) +in + Clockenv{timer_stack = ts, + id_stack = new_ids, + timetab = IDtab.make (map rewrite_entry (IDtab.dest tt)), + error_occurred = e} +end + + +fun add_error (Clockenv{timer_stack = ts, id_stack = ids, timetab= tt, error_occurred = e}) = + Clockenv{timer_stack = ts, + id_stack = ids, + timetab = tt, + error_occurred = true}; + +fun start_clock clockname clockenv = start_id clockenv (clockname, 0) + +fun stop_clock clockname clockenv = + if null (#timer_stack (rep_clockenv clockenv)) then + add_error clockenv + else let + val ((idname,_)::ids) = #id_stack (rep_clockenv clockenv) + in + if idname = clockname then + stop_id clockenv + else + add_error clockenv + end + +fun next_clock clockenv = + if null (#timer_stack (rep_clockenv clockenv)) then + add_error clockenv + else let + val ((clockname,n)::ids) = #id_stack (rep_clockenv clockenv) + in + start_id (stop_id clockenv)(clockname,n+1) + end + +fun rename_clock oldname newname clockenv = + if null (#timer_stack (rep_clockenv clockenv)) then + add_error clockenv + else let + val ((idname,_)::ids) = #id_stack (rep_clockenv clockenv) + in + if idname = oldname then + rename_id newname clockenv + else + add_error clockenv + end + +end; +*} + + +ML{* +structure Clocks_DataManagement = Generic_Data +( + type T = ClockEnv.clockenv + val empty = ClockEnv.mt_clockenv + fun extend T = T + val merge = ClockEnv.merge_clockenv +); + +(* FIXME ignored + fun print sg ce = + (writeln "Runtime statistics:"; + writeln (ClockEnv.text_stats ce)); +*) + +signature CLOCKS = +sig + val init_clocks : theory -> unit + val flush_clocks : theory -> theory + + val start_clock : string -> unit + val stop_clock : string -> unit + val next_clock : unit -> unit + + val start_clock_tac : string -> tactic + val stop_clock_tac : string -> tactic + val next_clock_tac : unit -> tactic + + val string_of_clocks: theory -> string + val write_clocks : theory -> string -> unit + + val rename_clock : string -> string -> unit +end +*} + +ML{* +structure Clocks : CLOCKS = +struct + +val env_ref = Unsynchronized.ref ClockEnv.mt_clockenv + +fun init_clocks thy = env_ref := Clocks_DataManagement.get(Context.Theory thy) + +fun flush_clocks thy = Context.theory_of (Clocks_DataManagement.put (!env_ref) (Context.Theory thy)) + +fun start_clock name = env_ref := ClockEnv.start_clock name (!env_ref) + +fun stop_clock name = env_ref := ClockEnv.stop_clock name (!env_ref) + +fun next_clock () = env_ref := ClockEnv.next_clock (!env_ref) + +fun start_clock_tac clockname thm = (start_clock clockname; all_tac thm) + +fun stop_clock_tac clockname thm = (stop_clock clockname; all_tac thm) + +fun next_clock_tac () thm = (next_clock (); all_tac thm) + +fun rename_clock oldname newname = env_ref := ClockEnv.rename_clock oldname newname (!env_ref) + +fun write_clocks thy fname = let + val _ = init_clocks thy + val to_write = ClockEnv.latex_stats (!env_ref) + val _ = File.write (Path.explode fname) to_write; +in + () +end + +fun string_of_clocks thy = (init_clocks thy; ClockEnv.text_stats (!env_ref)) + +fun start_clock_command clockname thy = let + val result = Context.theory_of (Clocks_DataManagement.map (ClockEnv.start_clock clockname) (Context.Theory thy)) +in + (init_clocks result; result) +end + +val _ = Outer_Syntax.command @{command_keyword start_clock} "starts a clock for measuring runtime" + (Parse.string >> (Toplevel.theory o start_clock_command)); + +fun stop_clock_command clockname thy = let + val result = Context.theory_of (Clocks_DataManagement.map (ClockEnv.stop_clock clockname) (Context.Theory thy)) +in + (init_clocks result; result) +end + +val _ = Outer_Syntax.command @{command_keyword stop_clock} "stops a clock for measuring runtime" + (Parse.string >> (Toplevel.theory o stop_clock_command)); + +fun next_clock_command thy = let + val result = Context.theory_of (Clocks_DataManagement.map ClockEnv.next_clock (Context.Theory thy)) +in + (init_clocks result; result) +end + +val _ = Outer_Syntax.command @{command_keyword next_clock} "increments the ID of the current clock" + (Scan.succeed (Toplevel.theory next_clock_command)); + +fun print_clocks_command thy = (writeln (string_of_clocks thy); init_clocks thy; thy) + +val _ = Outer_Syntax.command @{command_keyword print_clocks} "print runtime statistics" + (Scan.succeed (Toplevel.theory print_clocks_command)); + +fun write_clocks_command fname thy = (write_clocks thy fname; init_clocks thy; thy) + +val _ = Outer_Syntax.command @{command_keyword write_clocks} "write a table with the total runtimes measured to a file" + (Parse.string >> (Toplevel.theory o write_clocks_command)); + +end; +*} + +end diff --git a/src/test-gen/src/main/codegen_C_pthread/Code_C_pthread.thy b/src/test-gen/src/main/codegen_C_pthread/Code_C_pthread.thy new file mode 100644 index 0000000..5d2bb3c --- /dev/null +++ b/src/test-gen/src/main/codegen_C_pthread/Code_C_pthread.thy @@ -0,0 +1,86 @@ +theory Code_C_pthread +imports Main "../TestLib" +keywords "gen_C_pthread" :: "qed_global" +begin + +subsection {*C pthread Header term*} + +ML {* + +val next_line = @{term "''~''"}; + +val stdio_term = @{term "''#include ''"}; +val stdlib_term = @{term "''#include ''"}; +val pthread_term = @{term "''#include ''"} + +val C_pthread_header = stdio_term $ stdlib_term $ pthread_term; + +*} + +subsection {*C instructions term*} +(*A C instruction can be a variable declaration or a call to another existing function or + affectation or conditional or a loop*) + +subsection {*C functions term*} + +ML {* +val next_instr = @{term "'';''"}; +val open_bracket = @{term "''{''"}; +val close_bracket = @{term "''}''"}; +val next_arg = @{term "'',''"} +val open_par = @{term "''(''"}; +val close_par = @{term "'')''"}; + + +fun C_function_header fun_type fun_name fun_args = fun_type $ fun_name $ fun_args; + +fun discharge_intrs [] = @{term"''/**/''"} +| discharge_intrs [C_instr] = C_instr $ next_instr $ next_line +| discharge_intrs (C_instr::C_instrs) = C_instr $ next_instr $ next_line $ + discharge_intrs C_instrs; + +fun discharge_args [] = @{term"''/**/''"} +| discharge_args [C_arg] = C_arg $ next_arg +| discharge_args (C_arg::C_args) = C_arg $ next_arg $ + discharge_args C_args; + +fun C_function fun_type fun_name fun_args C_instrs = + C_function_header fun_type fun_name (open_par $ discharge_args fun_args $ close_par) $ next_line $ + open_bracket $ next_line $ + discharge_intrs C_instrs $ next_line $ + close_bracket $ next_line ; + +fun C_void_function fun_name fun_args C_instrs = + C_function @{term"''void''"} fun_name fun_args C_instrs; + +fun C_int_function fun_name fun_args C_instrs = + C_function @{term"''int''"} fun_name fun_args C_instrs; + +fun C_string_function fun_name fun_args C_instrs = + C_function @{term"''string''"} fun_name fun_args C_instrs; + +*} + + +subsection {*C File term*} + +ML {* + +fun discharge_funs [] = @{term"''/**/''"} +| discharge_funs [C_fun] = C_fun $ next_line +| discharge_funs (C_fun::C_funs) = C_fun $ next_line $ + discharge_funs C_funs; + +fun C_file C_header C_funs = C_header $ discharge_funs C_funs ; + +*} + +subsection {*Jump to the next line*} + +ML {* fun replace_next_line nil = [] + | replace_next_line (x::xs) = (if x = #"~" + then replace_next_line (#"\n"::xs) + else x::replace_next_line xs); + *} + +end diff --git a/src/test-gen/src/main/codegen_fsharp/Code_Char_Fsharp.thy b/src/test-gen/src/main/codegen_fsharp/Code_Char_Fsharp.thy new file mode 100644 index 0000000..2b33d07 --- /dev/null +++ b/src/test-gen/src/main/codegen_fsharp/Code_Char_Fsharp.thy @@ -0,0 +1,103 @@ +(* Title: HOL/Library/Code_Char.thy + Author: Florian Haftmann +*) + +chapter {* Code generation of pretty characters (and strings) *} + +theory Code_Char_Fsharp +imports Main (* Char_ord *) +begin + +code_printing + type_constructor char \ + (SML) "char" + and (OCaml) "char" + and (Haskell) "Prelude.Char" + and (Scala) "Char" + +setup {* + fold String_Code.add_literal_char ["SML", "OCaml", "Haskell", "Scala"] + #> String_Code.add_literal_list_string "Haskell" +*} + +code_printing + class_instance char :: equal \ + (Haskell) - +| constant "HOL.equal :: char \ char \ bool" \ + (SML) "!((_ : char) = _)" + and (OCaml) "!((_ : char) = _)" + and (Haskell) infix 4 "==" + and (Scala) infixl 5 "==" +| constant "Code_Evaluation.term_of :: char \ term" \ + (Eval) "HOLogic.mk'_char/ (IntInf.fromInt/ (Char.ord/ _))" + +code_reserved SML + char + +code_reserved OCaml + char + +code_reserved Scala + char + +definition implode :: "string \ String.literal" where + "implode = STR" + +code_reserved SML String + +code_printing + constant String.implode \ + (SML) "String.implode" + and (OCaml) "!(let l = _ in let res = String.create (List.length l) in let rec imp i = function | [] -> res | c :: l -> String.set res i c; imp (i + 1) l in imp 0 l)" + and (Haskell) "_" + and (Scala) "!(\"\" ++/ _)" +| constant String.explode \ + (SML) "String.explode" + and (OCaml) "!(let s = _ in let rec exp i l = if i < 0 then l else exp (i - 1) (String.get s i :: l) in exp (String.length s - 1) [])" + and (Haskell) "_" + and (Scala) "!(_.toList)" + + +definition integer_of_char :: "char \ integer" +where + "integer_of_char = integer_of_nat o nat_of_char" + +definition char_of_integer :: "integer \ char" +where + "char_of_integer = char_of_nat \ nat_of_integer" + +lemma [code]: + "nat_of_char = nat_of_integer o integer_of_char" + by (simp add: integer_of_char_def fun_eq_iff) + +lemma [code]: + "char_of_nat = char_of_integer o integer_of_nat" + by (simp add: char_of_integer_def fun_eq_iff) + + (* +code_printing + constant integer_of_char \ + (SML) "!(IntInf.fromInt o Char.ord)" + and (OCaml) "Big'_int.big'_int'_of'_int (Char.code _)" + and (Haskell) "Prelude.toInteger (Prelude.fromEnum (_ :: Prelude.Char))" + and (Scala) "BigInt(_.toInt)" +| constant char_of_integer \ + (SML) "!(Char.chr o IntInf.toInt)" + and (OCaml) "Char.chr (Big'_int.int'_of'_big'_int _)" + and (Haskell) "!(let chr k | (0 <= k && k < 256) = Prelude.toEnum k :: Prelude.Char in chr . Prelude.fromInteger)" + and (Scala) "!((k: BigInt) => if (BigInt(0) <= k && k < BigInt(256)) k.charValue else error(\"character value out of range\"))" +| constant "Orderings.less_eq :: char \ char \ bool" \ + (SML) "!((_ : char) <= _)" + and (OCaml) "!((_ : char) <= _)" + and (Haskell) infix 4 "<=" + and (Scala) infixl 4 "<=" + and (Eval) infixl 6 "<=" +| constant "Orderings.less :: char \ char \ bool" \ + (SML) "!((_ : char) < _)" + and (OCaml) "!((_ : char) < _)" + and (Haskell) infix 4 "<" + and (Scala) infixl 4 "<" + and (Eval) infixl 6 "<" +*) +end + diff --git a/src/test-gen/src/main/codegen_fsharp/Code_Char_chr_Fsharp.thy b/src/test-gen/src/main/codegen_fsharp/Code_Char_chr_Fsharp.thy new file mode 100644 index 0000000..74271fd --- /dev/null +++ b/src/test-gen/src/main/codegen_fsharp/Code_Char_chr_Fsharp.thy @@ -0,0 +1,83 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * Code_Char_chr_Fsharp.thy --- Isar setup for HOL-TestGen + * This file is part of HOL-TestGen. + * + * Copyright (c) 2010-2012 ETH Zurich, Switzerland + * 2010-2013 Achim D. Brucker, Germany + * 2016 The University of Sheffield, UK + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: Code_Char_chr_Fsharp.thy 12648 2016-06-17 09:26:51Z brucker $ *) + +theory Code_Char_chr_Fsharp +imports +(* "~~/src/HOL/Library/Char_ord" *) + Code_Char_Fsharp + code_fsharp +begin + +definition + "int_of_char = int o nat_of_char" + +lemma [code]: + "nat_of_char = nat o int_of_char" + unfolding int_of_char_def by (simp add: fun_eq_iff) + +definition + "char_of_int = char_of_nat o nat" + +lemma [code]: + "char_of_nat = char_of_int o int" + unfolding char_of_int_def by (simp add: fun_eq_iff) + + +code_printing + constant "Unity" \ + (Fsharp) "()" + +code_printing + constant int_of_char \ + (SML) "!(IntInf.fromInt o Char.ord)" and + (OCaml) "Big'_int.big'_int'_of'_int (Char.code _)" and + (Fsharp) "Big'_int.big'_int'_of'_int (Char.code _)" and + (Haskell) "toInteger (fromEnum (_ :: Char))" and + (Scala) "BigInt(_.toInt)" +| constant char_of_int \ + (SML) "!(Char.chr o IntInf.toInt)" and + (OCaml) "Char.chr (Big'_int.int'_of'_big'_int _)" and + (Fsharp) "Char.chr (Big'_int.int'_of'_big'_int _)" and + (Haskell) "!(let chr k | (0 <= k && k < 256) = toEnum k :: Char in chr . fromInteger)" and + (Scala) "!((k: BigInt) => if (BigInt(0) <= k && k < BigInt(256)) k.charValue else error(\"character value out of range\"))" + +end diff --git a/src/test-gen/src/main/codegen_fsharp/Code_Integer_Fsharp.thy b/src/test-gen/src/main/codegen_fsharp/Code_Integer_Fsharp.thy new file mode 100644 index 0000000..1ce7463 --- /dev/null +++ b/src/test-gen/src/main/codegen_fsharp/Code_Integer_Fsharp.thy @@ -0,0 +1,101 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * Code_Integer_Fsharp.thy --- + * This file is part of HOL-TestGen. + * + * Copyright (c) 2010-2012 ETH Zurich, Switzerland + * 2010-2012 Achim D. Brucker, Germany + * 2016 The University of Sheffield, UK + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: Code_Integer_Fsharp.thy 12803 2016-09-01 08:38:47Z brucker $ *) + + +theory Code_Integer_Fsharp +imports + code_fsharp +begin +text {* + Representation-ignorant code equations for conversions. +*} + +text {* + HOL numeral expressions are mapped to integer literals + in target languages, using predefined target language + operations for abstract integer operations. +*} + +code_printing + type_constructor integer \ + (Fsharp) "int" + +code_printing + constant "0::integer" \ + (Fsharp) "0" + +setup \ + fold (fn target => + Numeral.add_code @{const_name Code_Numeral.Pos} I Code_Printer.literal_numeral target + #> Numeral.add_code @{const_name Code_Numeral.Neg} (op ~) Code_Printer.literal_numeral target) + ["SML", "OCaml", "Haskell", "Scala", "Fsharp"] +\ + +code_printing + constant "plus :: integer \ _ \ _" \ + (Fsharp) infixl 8 "+" +| constant "uminus :: integer \ _" \ + (Fsharp) "-/ _" +| constant "minus :: integer \ _" \ + (Fsharp) infixl 8 "-" +| constant Code_Numeral.dup \ + (Fsharp) "failwith/ \"dup\"" +| constant Code_Numeral.sub \ + (Fsharp) "failwith/ \"sub\"" +| constant "times :: integer \ _ \ _" \ + (Fsharp) infixl 9 "*" +| constant Code_Numeral.divmod_abs \ + (SML) "IntInf.divMod/ (IntInf.abs _,/ IntInf.abs _)" +| constant "HOL.equal :: integer \ _ \ bool" \ + (Fsharp) infixl 6 "=" +| constant "less_eq :: integer \ _ \ bool" \ + (Fsharp) infixl 6 "<=" +| constant "less :: integer \ _ \ bool" \ + (Fsharp) infixl 6 "<" + + +code_identifier + code_module Int \ (SML) Arith and (OCaml) Arith and (Haskell) Arith + and (Fsharp) + +end diff --git a/src/test-gen/src/main/codegen_fsharp/Code_String_Fsharp.thy b/src/test-gen/src/main/codegen_fsharp/Code_String_Fsharp.thy new file mode 100644 index 0000000..2590967 --- /dev/null +++ b/src/test-gen/src/main/codegen_fsharp/Code_String_Fsharp.thy @@ -0,0 +1,68 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * Code_String_Fsharp.thy --- + * This file is part of HOL-TestGen. + * + * Copyright (c) 2010-2012 ETH Zurich, Switzerland + * 2010-2012 Achim D. Brucker, Germany + * 2016 The University of Sheffield, UK + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: Code_String_Fsharp.thy 12984 2017-01-06 09:39:05Z brucker $ *) + + +chapter {* Character and string types *} + +theory Code_String_Fsharp +imports + code_fsharp +begin + + +subsection {* Code generator *} + +code_printing type_constructor "String.literal" \ + (Fsharp) "string" + +setup \ + fold String_Code.add_literal_string ["SML", "OCaml", "Haskell", "Scala", "Fsharp"] +\ + +code_printing + constant "HOL.equal :: String.literal \ String.literal \ bool" \ + (Fsharp) "!((_ : string) = _)" + +code_printing constant Code.abort \ + (Fsharp) "failwith" +end diff --git a/src/test-gen/src/main/codegen_fsharp/code_fsharp.ML b/src/test-gen/src/main/codegen_fsharp/code_fsharp.ML new file mode 100644 index 0000000..5dacaef --- /dev/null +++ b/src/test-gen/src/main/codegen_fsharp/code_fsharp.ML @@ -0,0 +1,618 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * code_fsharp.ML --- main file + * This file is part of HOL-TestGen. + * + * Copyright (c) 2010-2012 ETH Zurich, Switzerland + * Copyright (c) 2010-2013 Achim D. Brucker, Germany + * Copyright (c) 2016 The University of Sheffield, UK + * + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: code_fsharp.ML 9990 2013-11-16 16:30:56Z brucker $ *) + +(* + This implementation is based the OCaml code generator that is part + of the Isabelle distribution. +*) + +signature CODE_FSharp = +sig + val target_Fsharp: string +end; + +structure Code_FSharp : CODE_FSharp = +struct + +open Basic_Code_Symbol; +open Basic_Code_Thingol; +open Code_Printer; + +infixr 5 @@; +infixr 5 @|; + + +(** generic **) + +open Code_ML; + +val target_Fsharp = "Fsharp"; + + +fun string_of_int i = + if i < 0 then ("-"^(Int.toString (~1*i))) + else Int.toString i; + +datatype ml_binding = + ML_Function of string * (typscheme * ((iterm list * iterm) * (thm option * bool)) list) + | ML_Instance of (string * class) * { class: class, tyco: string, vs: (vname * sort) list, + superinsts: (class * dict list list) list, + inst_params: ((string * (const * int)) * (thm * bool)) list, + superinst_params: ((string * (const * int)) * (thm * bool)) list }; + +datatype ml_stmt = + ML_Exc of string * (typscheme * int) + | ML_Val of ml_binding + | ML_Funs of (Code_Namespace.export * ml_binding) list * Code_Symbol.T list + | ML_Datas of (string * (vname list * ((string * vname list) * itype list) list)) list + | ML_Class of string * (vname * ((class * class) list * (string * itype) list)); + +fun print_product _ [] = NONE + | print_product print [x] = SOME (print x) + | print_product print xs = (SOME o enum " *" "" "") (map print xs); + +fun tuplify _ _ [] = NONE + | tuplify print fxy [x] = SOME (print fxy x) + | tuplify print _ xs = SOME (enum "," "(" ")" (map (print NOBR) xs)); + +(** Fsharp serializer **) + + +fun print_fsharp_stmt tyco_syntax const_syntax reserved is_constr deresolve = + let + val deresolve_const = deresolve o Constant; + val deresolve_class = deresolve o Type_Class; + val deresolve_classrel = deresolve o Class_Relation; + val deresolve_inst = deresolve o Class_Instance; + fun print_tyco_expr (sym, []) = (str o deresolve) sym + | print_tyco_expr (sym, [ty]) = + concat [print_typ BR ty, (str o deresolve) sym] + | print_tyco_expr (sym, tys) = + concat [enum "," "(" ")" (map (print_typ BR) tys), (str o deresolve) sym] + and print_typ fxy (tyco `%% tys) = (case tyco_syntax tyco + of NONE => print_tyco_expr (Type_Constructor tyco, tys) + | SOME (_, print) => print print_typ fxy tys) + | print_typ fxy (ITyVar v) = str ("'" ^ v); + fun print_dicttyp (class, ty) = print_tyco_expr (Type_Class class, [ty]); + fun print_typscheme_prefix (vs, p) = enum " ->" "" "" + (map_filter (fn (v, sort) => + (print_product (fn class => print_dicttyp (class, ITyVar v)) sort)) vs @| p); + fun print_typscheme (vs, ty) = print_typscheme_prefix (vs, print_typ NOBR ty); + fun print_dicttypscheme (vs, class_ty) = print_typscheme_prefix (vs, print_dicttyp class_ty); + val print_classrels = + fold_rev (fn classrel => fn p => Pretty.block [p, str ".", (str o deresolve_classrel) classrel]) + fun print_dict is_pseudo_fun fxy (Dict (classrels, x)) = + print_plain_dict is_pseudo_fun fxy x + |> print_classrels classrels + and print_plain_dict is_pseudo_fun fxy (Dict_Const (inst, dss)) = + brackify BR ((str o deresolve_inst) inst :: + (if is_pseudo_fun (Class_Instance inst) then [str "()"] + else map_filter (print_dicts is_pseudo_fun BR) dss)) + | print_plain_dict is_pseudo_fun fxy (Dict_Var {var = v, index = i, length = k, unique = u, class = c}) = + str (if k = 1 then "_" ^ Name.enforce_case true v + else "_" ^ Name.enforce_case true v ^ string_of_int (i+1)) + and print_dicts is_pseudo_fun = tuplify (print_dict is_pseudo_fun); + val print_dict_args = map_filter (fn (v, sort) => print_dicts (K false) BR + (map_index (fn (i, c) => Dict ([], Dict_Var {var = v, index = i, length = length sort, unique = false, class = c})) sort)); + fun print_term is_pseudo_fun some_thm vars fxy (IConst const) = + print_app is_pseudo_fun some_thm vars fxy (const, []) + | print_term is_pseudo_fun some_thm vars fxy (IVar NONE) = + str "_" + | print_term is_pseudo_fun some_thm vars fxy (IVar (SOME v)) = + str (lookup_var vars v) + | print_term is_pseudo_fun some_thm vars fxy (t as t1 `$ t2) = + (case Code_Thingol.unfold_const_app t + of SOME app => print_app is_pseudo_fun some_thm vars fxy app + | NONE => brackify fxy [print_term is_pseudo_fun some_thm vars NOBR t1, + print_term is_pseudo_fun some_thm vars BR t2]) + | print_term is_pseudo_fun some_thm vars fxy (t as _ `|=> _) = + let + val (binds, t') = Code_Thingol.unfold_pat_abs t; + val (ps, vars') = fold_map (print_bind is_pseudo_fun some_thm BR o fst) binds vars; + in brackets (str "fun" :: ps @ str "->" @@ print_term is_pseudo_fun some_thm vars' NOBR t') end + | print_term is_pseudo_fun some_thm vars fxy (ICase case_expr) = + (case Code_Thingol.unfold_const_app (#primitive case_expr) + of SOME (app as ({ sym = Constant const, ... }, _)) => + if is_none (const_syntax const) + then print_case is_pseudo_fun some_thm vars fxy case_expr + else print_app is_pseudo_fun some_thm vars fxy app + | NONE => print_case is_pseudo_fun some_thm vars fxy case_expr) + and print_app_expr is_pseudo_fun some_thm vars (app as ({ sym, dicts = dss, dom = dom, ... }, ts)) = + if is_constr sym then + let val k = length dom in + if length ts = k + then (str o deresolve) sym + :: the_list (tuplify (print_term is_pseudo_fun some_thm vars) BR ts) + else [print_term is_pseudo_fun some_thm vars BR (Code_Thingol.eta_expand k app)] + end + else if is_pseudo_fun sym + then (str o deresolve) sym @@ str "()" + else (str o deresolve) sym :: map_filter (print_dicts is_pseudo_fun BR) dss + @ map (print_term is_pseudo_fun some_thm vars BR) ts + and print_app is_pseudo_fun some_thm vars = gen_print_app (print_app_expr is_pseudo_fun) + (print_term is_pseudo_fun) const_syntax some_thm vars + and print_bind is_pseudo_fun = gen_print_bind (print_term is_pseudo_fun) + and print_case is_pseudo_fun some_thm vars fxy { clauses = [], ... } = + (concat o map str) ["failwith", "\"empty case\""] + | print_case is_pseudo_fun some_thm vars fxy (case_expr as { clauses = [_], ... }) = + let + val (binds, body) = Code_Thingol.unfold_let (ICase case_expr); + fun print_let ((pat, _), t) vars = + vars + |> print_bind is_pseudo_fun some_thm NOBR pat + |>> (fn p => concat + [str "let", p, str "=", print_term is_pseudo_fun some_thm vars NOBR t, str "in"]) + val (ps, vars') = fold_map print_let binds vars; + in + brackets [Pretty.chunks ps, print_term is_pseudo_fun some_thm vars' NOBR body] + end + | print_case is_pseudo_fun some_thm vars fxy { term = t, typ = ty, clauses = clauses, ... } = + let + fun print_select delim (pat, body) = + let + val (p, vars') = print_bind is_pseudo_fun some_thm NOBR pat vars; + in concat [str delim, p, str "->", print_term is_pseudo_fun some_thm vars' NOBR body] end; + in + brackets ( + str "match" + :: print_term is_pseudo_fun some_thm vars NOBR t + :: str "with\n" + :: map (print_select "|") clauses + ) + end + | print_case is_pseudo_fun some_thm vars fxy ({clauses=[], ...}) = + (concat o map str) ["failwith", "\"empty case\""]; + fun print_val_decl print_typscheme (sym, typscheme) = concat + [str "val", str (deresolve sym), str ":", print_typscheme typscheme]; + fun print_datatype_decl definer (tyco, (vs, cos)) = + let + fun print_co ((co, _), []) = str (deresolve_const co) + | print_co ((co, _), tys) = concat [str (deresolve_const co), str "of", + enum " *" "" "" (map (print_typ (INFX (2, X))) tys)]; + fun separateByList l (x::y::xs) = (x :: l) @ separateByList l (y::xs) + | separateByList l x = x + in + concat ( + str definer + :: print_tyco_expr (Type_Constructor tyco, map ITyVar vs) + :: str "=" + :: [Pretty.blk (0, + separateByList [Pretty.brk 1, str "| "] (map print_co cos) + )] + ) + end; + fun print_def is_pseudo_fun needs_typ definer + (ML_Function (const, (vs_ty as (vs, ty), eqs))) = + let + fun print_eqn ((ts, t), (some_thm, _)) = + let + val vars = reserved + |> intro_base_names_for (is_none o const_syntax) + deresolve (t :: ts) + |> intro_vars ((fold o Code_Thingol.fold_varnames) + (insert (op =)) ts []); + in concat [ + (Pretty.block o commas) + (map (print_term is_pseudo_fun some_thm vars NOBR) ts), + str "->", + print_term is_pseudo_fun some_thm vars NOBR t + ] end; + fun print_eqns is_pseudo [((ts, t), (some_thm, _))] = + let + val vars = reserved + |> intro_base_names_for (is_none o const_syntax) + deresolve (t :: ts) + |> intro_vars ((fold o Code_Thingol.fold_varnames) + (insert (op =)) ts []); + in + concat ( + (if is_pseudo then [str "()"] + else map (print_term is_pseudo_fun some_thm vars BR) ts) + @ str "=" + @@ print_term is_pseudo_fun some_thm vars NOBR t + ) + end + | print_eqns _ eqs = + let + val vars = reserved + |> intro_base_names_for (is_none o const_syntax) + deresolve (map (snd o fst) eqs) + val dummy_parms = (map str o aux_params vars o map (fst o fst)) eqs; + in + Pretty.block ( + Pretty.breaks dummy_parms + @ Pretty.brk 1 + :: str "=" + :: Pretty.brk 1 + :: Pretty.blk (2, + str "match" + :: Pretty.brk 1 + :: (Pretty.block o commas) dummy_parms + :: Pretty.brk 1 + :: str "with" + :: maps (append [Pretty.fbrk, str "|", Pretty.brk 1] + o single o print_eqn) eqs + ) + :: [] + ) + end; + val prolog = if needs_typ then + concat [str definer, (str o deresolve_const) const, str ":", print_typ NOBR ty] + else (concat o map str) [definer, deresolve_const const]; + in pair + (print_val_decl print_typscheme (Constant const, vs_ty)) + (concat ( + prolog + :: print_dict_args vs + @| print_eqns (is_pseudo_fun (Constant const)) eqs + )) + end + | print_def is_pseudo_fun _ definer + (ML_Instance (inst as (tyco, class), { vs, superinsts, inst_params, ... })) = + let + fun print_super_instance (super_class, x) = + concat [ + (str o deresolve_classrel) (class, super_class), + str "=", + print_dict is_pseudo_fun NOBR (Dict ([], Dict_Const ((tyco, super_class), x))) + ]; + fun print_classparam_instance ((classparam, (const, _)), (thm, _)) = + concat [ + (str o deresolve_const) classparam, + str "=", + print_app (K false) (SOME thm) reserved NOBR (const, []) + ]; + in pair + (print_val_decl print_dicttypscheme + (Class_Instance inst, (vs, (class, tyco `%% map (ITyVar o fst) vs)))) + (concat ( + str definer + :: (str o deresolve_inst) inst + :: (if is_pseudo_fun (Class_Instance inst) then [str "()"] + else print_dict_args vs) + @ str "=" + @@ brackets [ + enum_default "()" ";" "{" "}" (map print_super_instance superinsts + @ map print_classparam_instance inst_params), + str ":", + print_dicttyp (class, tyco `%% map (ITyVar o fst) vs) + ] + )) + end; + fun print_stmt _ (ML_Exc (const, (vs_ty, n))) = pair + [print_val_decl print_typscheme (Constant const, vs_ty)] + ((doublesemicolon o map str) ( + "let" + :: deresolve_const const + :: replicate n "_" + @ "=" + :: "failwith" + @@ ML_Syntax.print_string const + )) + | print_stmt _ (ML_Val binding) = + let + val (sig_p, p) = print_def (K false) true "let" binding + in pair + [sig_p] + (doublesemicolon [p]) + end + | print_stmt _ (ML_Funs ((export, binding) :: exports_bindings, pseudo_funs)) = + let + val print_def' = print_def (member (op =) pseudo_funs) false; + fun print_pseudo_fun sym = concat [ + str "let", + (str o deresolve) sym, + str "=", + (str o deresolve) sym, + str "();;" + ]; + val (sig_ps, (ps, p)) = (apsnd split_last o split_list) + (print_def' "let rec" binding :: map (print_def' "and" o snd) exports_bindings); + val pseudo_ps = map print_pseudo_fun pseudo_funs; + in pair + (map_filter (fn (export, p) => if Code_Namespace.not_private export then SOME p else NONE) + ((export :: map fst exports_bindings) ~~ sig_ps)) + (Pretty.chunks (ps @ doublesemicolon [p] :: pseudo_ps)) + end + | print_stmt _ (ML_Datas [(tyco, (vs, []))]) = + let + val ty_p = print_tyco_expr (Type_Constructor tyco, map ITyVar vs); + in + pair + [concat [str "type", ty_p]] + (doublesemicolon [str "type", ty_p, str "=", str "EMPTY__"]) + end + | print_stmt export (ML_Datas (data :: datas)) = + let + val decl_ps = print_datatype_decl "type" data + :: map (print_datatype_decl "and") datas; + val (ps, p) = split_last decl_ps; + in pair + (if Code_Namespace.is_public export + then decl_ps + else map (fn (tyco, (vs, _)) => + concat [str "type", print_tyco_expr (Type_Constructor tyco, map ITyVar vs)]) + (data :: datas)) + (Pretty.chunks (ps @| doublesemicolon [p])) + end + | print_stmt export (ML_Class (class, (v, (classrels, classparams)))) = + let + fun print_field s p = concat [str s, str ":", p]; + fun print_super_class_field (classrel as (_, super_class)) = + print_field (deresolve_classrel classrel) (print_dicttyp (super_class, ITyVar v)); + fun print_classparam_decl (classparam, ty) = + print_val_decl print_typscheme + (Constant classparam, ([(v, [class])], ty)); + fun print_classparam_field (classparam, ty) = + print_field (deresolve_const classparam) (print_typ NOBR ty); + val w = "_" ^ Name.enforce_case true v; + fun print_classparam_proj (classparam, _) = + (concat o map str) ["let", deresolve_const classparam, w, "=", + w ^ "." ^ deresolve_const classparam ^ ";;"]; + val type_decl_p = concat [ + str "type", + print_dicttyp (class, ITyVar v), + str "=", + enum_default "unit" ";" "{" "}" ( + map print_super_class_field classrels + @ map print_classparam_field classparams + ) + ]; + in pair + (if Code_Namespace.is_public export + then type_decl_p :: map print_classparam_decl classparams + else [concat [str "type", print_dicttyp (class, ITyVar v)]]) + (Pretty.chunks ( + doublesemicolon [type_decl_p] + :: map print_classparam_proj classparams + )) + end; + in print_stmt end; + +fun print_fsharp_module name some_decls body = + Pretty.chunks2 ( + str ("module " ^ name) + :: body + ); + +val literals_fsharp = let + fun chr i = + let + val xs = string_of_int i; + val ys = replicate_string (3 - length (raw_explode xs)) "0"; + in "\\" ^ ys ^ xs end; + fun char_fsharp c = + let + val i = ord c; + val s = if i < 32 orelse i = 34 orelse i = 39 orelse i = 92 orelse i > 126 + then chr i else c + in s end; + fun numeral_fsharp k = string_of_int k +(* + fun numeral_fsharp k = if k < 0 + then "(Big_int.minus_big_int " ^ numeral_fsharp (~ k) ^ ")" + else if k <= 1073741823 + then "(Big_int.big_int_of_int " ^ string_of_int k ^ ")" + else "(Big_int.big_int_of_string " ^ quote (string_of_int k) ^ ")" +*) +in Literals { + literal_char = Library.enclose "'" "'" o char_fsharp, + literal_string = quote o translate_string char_fsharp, + literal_numeral = numeral_fsharp, + literal_list = enum ";" "[" "]", + infix_cons = (6, "::") +} end; + + + + +(* +val serializer_fsharp : Code_Target.serializer = + Code_Target.parse_args (Scan.optional (Args.$$$ "no_signatures" >> K false) true + >> (fn with_signatures => serialize_ml print_fsharp_module print_fsharp_stmt with_signatures)); +*) + +fun ml_program_of_program ctxt module_name reserved identifiers = + let + fun namify_const upper base (nsp_const, nsp_type) = + let + val (base', nsp_const') = Name.variant (Name.enforce_case upper base) nsp_const + in (base', (nsp_const', nsp_type)) end; + fun namify_type base (nsp_const, nsp_type) = + let + val (base', nsp_type') = Name.variant (Name.enforce_case false base) nsp_type + in (base', (nsp_const, nsp_type')) end; + fun namify_stmt (Code_Thingol.Fun _) = namify_const false + | namify_stmt (Code_Thingol.Datatype _) = namify_type + | namify_stmt (Code_Thingol.Datatypecons _) = namify_const true + | namify_stmt (Code_Thingol.Class _) = namify_type + | namify_stmt (Code_Thingol.Classrel _) = namify_const false + | namify_stmt (Code_Thingol.Classparam _) = namify_const false + | namify_stmt (Code_Thingol.Classinst _) = namify_const false; + fun ml_binding_of_stmt (sym as Constant const, (export, Code_Thingol.Fun ((tysm as (vs, ty), raw_eqs), _))) = + let + val eqs = filter (snd o snd) raw_eqs; + val (eqs', some_sym) = if null (filter_out (null o snd) vs) then case eqs + of [(([], t), some_thm)] => if (not o null o fst o Code_Thingol.unfold_fun) ty + then ([(([IVar (SOME "x")], t `$ IVar (SOME "x")), some_thm)], NONE) + else (eqs, SOME (sym, member (op =) (Code_Thingol.add_constsyms t []) sym)) + | _ => (eqs, NONE) + else (eqs, NONE) + in ((export, ML_Function (const, (tysm, eqs'))), some_sym) end + | ml_binding_of_stmt (sym as Class_Instance inst, (export, Code_Thingol.Classinst (stmt as { vs, ... }))) = + ((export, ML_Instance (inst, stmt)), + if forall (null o snd) vs then SOME (sym, false) else NONE) + | ml_binding_of_stmt (sym, _) = + error ("Binding block containing illegal statement: " ^ + Code_Symbol.quote ctxt sym) + fun modify_fun (sym, (export, stmt)) = + let + val ((export', binding), some_value_sym) = ml_binding_of_stmt (sym, (export, stmt)); + val ml_stmt = case binding + of ML_Function (const, ((vs, ty), [])) => + ML_Exc (const, ((vs, ty), + (length o filter_out (null o snd)) vs + (length o fst o Code_Thingol.unfold_fun) ty)) + | _ => case some_value_sym + of NONE => ML_Funs ([(export', binding)], []) + | SOME (sym, true) => ML_Funs ([(export, binding)], [sym]) + | SOME (sym, false) => ML_Val binding + in SOME (export, ml_stmt) end; + fun modify_funs stmts = single (SOME + (Code_Namespace.Opaque, ML_Funs (map_split ml_binding_of_stmt stmts |> (apsnd o map_filter o Option.map) fst))) + fun modify_datatypes stmts = + map_filter + (fn (Type_Constructor tyco, (export, Code_Thingol.Datatype stmt)) => SOME (export, (tyco, stmt)) | _ => NONE) stmts + |> split_list + |> apfst Code_Namespace.join_exports + |> apsnd ML_Datas + |> SOME + |> single; + fun modify_class stmts = + the_single (map_filter + (fn (Type_Class class, (export, Code_Thingol.Class stmt)) => SOME (export, (class, stmt)) | _ => NONE) stmts) + |> apsnd ML_Class + |> SOME + |> single; + fun modify_stmts ([stmt as (_, (_, stmt' as Code_Thingol.Fun _))]) = + if Code_Thingol.is_case stmt' then [] else [modify_fun stmt] + | modify_stmts ((stmts as (_, (_, Code_Thingol.Fun _)) :: _)) = + modify_funs (filter_out (Code_Thingol.is_case o snd o snd) stmts) + | modify_stmts ((stmts as (_, (_, Code_Thingol.Datatypecons _)) :: _)) = + modify_datatypes stmts + | modify_stmts ((stmts as (_, (_, Code_Thingol.Datatype _)) :: _)) = + modify_datatypes stmts + | modify_stmts ((stmts as (_, (_, Code_Thingol.Class _)) :: _)) = + modify_class stmts + | modify_stmts ((stmts as (_, (_, Code_Thingol.Classrel _)) :: _)) = + modify_class stmts + | modify_stmts ((stmts as (_, (_, Code_Thingol.Classparam _)) :: _)) = + modify_class stmts + | modify_stmts ([stmt as (_, (_, Code_Thingol.Classinst _))]) = + [modify_fun stmt] + | modify_stmts ((stmts as (_, (_, Code_Thingol.Classinst _)) :: _)) = + modify_funs stmts + | modify_stmts stmts = error ("Illegal mutual dependencies: " ^ + (Library.commas o map (Code_Symbol.quote ctxt o fst)) stmts); + in + Code_Namespace.hierarchical_program ctxt { + module_name = module_name, reserved = reserved, identifiers = identifiers, + empty_nsp = (reserved, reserved), namify_module = pair, namify_stmt = namify_stmt, + cyclic_modules = false, class_transitive = true, + class_relation_public = true, empty_data = (), + memorize_data = K I, modify_stmts = modify_stmts } + end; + +fun serialize_ml print_ml_module print_ml_stmt ctxt + { module_name, reserved_syms, identifiers, includes, + class_syntax, tyco_syntax, const_syntax } exports program = + let + + (* build program *) + val { deresolver, hierarchical_program = ml_program } = + ml_program_of_program ctxt module_name (Name.make_context reserved_syms) + identifiers exports program; + + (* print statements *) + fun print_stmt prefix_fragments (_, (export, stmt)) = print_ml_stmt + tyco_syntax const_syntax (make_vars reserved_syms) + (Code_Thingol.is_constr program) (deresolver prefix_fragments) export stmt + |> apfst (fn decl => if Code_Namespace.not_private export then SOME decl else NONE); + + (* print modules *) + fun print_module _ base _ xs = + let + val (raw_decls, body) = split_list xs; + val decls = maps these raw_decls + in (NONE, print_ml_module base decls body) end; + + (* serialization *) + val p = Pretty.chunks2 (map snd includes + @ map snd (Code_Namespace.print_hierarchical { + print_module = print_module, print_stmt = print_stmt, + lift_markup = apsnd } ml_program)); + fun write width NONE = writeln o format [] width + | write width (SOME p) = File.write p o format [] width; + fun prepare syms width p = ([("", format syms width p)], try (deresolver [])); + in + Code_Target.serialization write prepare p + end; + + +val serializer_fsharp : Code_Target.serializer = + Code_Target.parse_args (Scan.succeed ()) #> K (serialize_ml print_fsharp_module print_fsharp_stmt); + +(** Isar setup **) + +fun fun_syntax print_typ fxy [ty1, ty2] = + brackify_infix (1, R) fxy ( + print_typ (INFX (1, X)) ty1, + str "->", + print_typ (INFX (1, R)) ty2 + ); + + + +val _ = Theory.setup + (Code_Target.add_language + (target_Fsharp, { serializer = serializer_fsharp, literals = literals_fsharp, + check = { env_var = "EXEC_FSHARP", make_destination = fn p => Path.append p (Path.explode "ROOT.fsharp"), + make_command = fn fsharp => fsharp ^ " -w pu nums.cma ROOT.fsharp" } }) + #> Code_Target.set_printings (Code_Symbol.Type_Constructor ("fun", [(target_Fsharp, SOME (2, fun_syntax))])) +(* Source: http://msdn.microsoft.com/en-us/library/dd233249.aspx *) + #> fold (Code_Target.add_reserved target_Fsharp) [ + "abstract", "and", "as", "asr", "assert", "atomic", "base", "begin", "break", + "class", "checked", "component", "const", "constraint", "constructor", "continue", + "default", "delegate", "do", "done", "downcast", "downto", "eager", "elif", + "else", "end", "event", "exception", "extern", "external", "false", "fixed", + "for", "fun", "function", "functor", "global", "if", "in", "include", "inherit", + "inline", "int", "interface", "internal", "land", "lazy", "let", "lor", "lsl", "lsr", + "lxor", "match", "member", "method", "mixin", "mod", "module", "mutable", + "namespace", "new", "not", "null", "object", "of", "open", "or", "override", + "parallel", "private", "process", "protected", "public", "pure", "rec", + "return", "sealed", "sig", "static", "string", "struct", "tailcall", "then", "to", + "trait", "true", "try", "type", "upcast", "use", "val", "virtual", "void", + "volatile", "when", "while", "with", "yield" + ]); + +end; (*struct*) diff --git a/src/test-gen/src/main/codegen_fsharp/code_fsharp.thy b/src/test-gen/src/main/codegen_fsharp/code_fsharp.thy new file mode 100644 index 0000000..e16d4ca --- /dev/null +++ b/src/test-gen/src/main/codegen_fsharp/code_fsharp.thy @@ -0,0 +1,130 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * code_fsharp.thy --- + * This file is part of HOL-TestGen. + * + * Copyright (c) 2010-2012 ETH Zurich, Switzerland + * 2010-2013 Achim D. Brucker, Germany + * 2016 The University of Sheffield, UK + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: code_fsharp.thy 12648 2016-06-17 09:26:51Z brucker $ *) + +theory code_fsharp +imports + Main +begin + +ML_file "code_fsharp.ML" + +(* In file HOL/HOL.thy *) + +code_printing + type_constructor bool \ + (Fsharp) "bool" + +code_printing + constant True \ + (Fsharp) "true" +| constant False \ + (Fsharp) "false" +| constant Not \ + (Fsharp) "not" +| constant HOL.conj \ + (Fsharp) infixl 3 "&&" +| constant HOL.disj \ + (Fsharp) infixl 2 "||" +| constant HOL.implies \ + (Fsharp) "!(if (_)/ then (_)/ else true)" +| constant If \ + (Fsharp) "!(if (_)/ then (_)/ else (_))" + +code_reserved Fsharp + bool + +code_printing + constant undefined \ + (Fsharp) "failwith/ \"undefined\"" + + +(* In file HOL/Option.thy *) +code_printing + type_constructor option \ + (Fsharp) "_ option" + +code_printing + constant None \ + (Fsharp) "None" +| constant Some \ + (Fsharp) "Some _" + +code_reserved Fsharp + option None Some + +(* In file HOL/List.thy *) +code_printing + type_constructor list \ + (Fsharp) "_ list" +| constant Nil \ + (Fsharp) "[]" +| constant Cons \ + (Fsharp) "(_ ::/ _)" + +code_reserved Fsharp + list + +code_printing + constant "op @" \ + (Fsharp) infixr 6 "@" + +code_printing + type_constructor "unit" \ + (Fsharp) "unit" + +code_printing + constant "Unity" \ + (Fsharp) "()" + +code_reserved Fsharp + unit + +code_printing + type_constructor prod \ + (Fsharp) infix 2 "*" + +code_printing + constant "Pair" \ + (Fsharp) "!((_),/ (_))" + +end diff --git a/src/test-gen/src/main/codegen_fsharp/examples/AQ.thy b/src/test-gen/src/main/codegen_fsharp/examples/AQ.thy new file mode 100755 index 0000000..c5b70f0 --- /dev/null +++ b/src/test-gen/src/main/codegen_fsharp/examples/AQ.thy @@ -0,0 +1,71 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * AQ.thy thy --- + * This file is part of HOL-TestGen. + * + * Copyright (c) 2010-2012 ETH Zurich, Switzerland + * 2010-2012 Achim D. Brucker, Germany + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: AQ.thy 9263 2011-12-25 15:49:36Z brucker $ *) + + +theory AQ +imports code_fsharp +begin + +datatype 'a queue = AQueue "'a list" "'a list" + +definition empty :: "'a queue" where + "empty = AQueue [] []" + +primrec enqueue :: "'a \ 'a queue \ 'a queue" where + "enqueue x (AQueue xs ys) = AQueue (x # xs) ys" + +fun dequeue :: "'a queue \ 'a option \ 'a queue" where + "dequeue (AQueue [] []) = (None, AQueue [] [])" + | "dequeue (AQueue xs (y # ys)) = (Some y, AQueue xs ys)" + | "dequeue (AQueue xs []) = + (case rev xs of y # ys \ (Some y, AQueue [] ys))" + +fun not :: "bool \ bool" where + "not True = False" + | "not False = True" + +fun head2 :: "('b list) list \ 'b option" where + "head2 [] = None" + | "head2 (x # xs) = (case x of [] \ None | (y # ys) \ Some y)" + +export_code empty dequeue enqueue not head2 in Fsharp + module_name Example file "test.fs" diff --git a/src/test-gen/src/main/codegen_fsharp/examples/SemiG.thy b/src/test-gen/src/main/codegen_fsharp/examples/SemiG.thy new file mode 100755 index 0000000..c70368d --- /dev/null +++ b/src/test-gen/src/main/codegen_fsharp/examples/SemiG.thy @@ -0,0 +1,92 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * SemiG.thy --- + * This file is part of HOL-TestGen. + * + * Copyright (c) 2010-2012 ETH Zurich, Switzerland + * 2010-2012 Achim D. Brucker, Germany + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: SemiG.thy 9263 2011-12-25 15:49:36Z brucker $ *) + +theory SemiG +imports Main +begin + +class semigroup = + fixes mult :: "'a \ 'a \ 'a" (infixl "\" 70) + assumes assoc: "(x \ y) \ z = x \ (y \ z)" + +class monoid = semigroup + + fixes neutral :: "'a" ("\") + assumes neutl: "x \ \ = x" + and neutr: "\ \ x = x" + +instantiation nat :: monoid +begin + primrec mult_nat where + "0 \ n = (0::nat)" + | "Suc m \ n = n + m \ n" + + definition neutral_nat where + "\ = Suc 0" + + lemma add_mult_distrib: + fixes n m q :: nat + shows "(n + m) \ q = n \ q + m \ q" + by (induct n) simp_all + + instance proof + fix m n q :: nat + show "m \ n \ q = m \ (n \ q)" + by (induct m) (simp_all add: add_mult_distrib) + show "\ \ n = n" + by (simp add: neutral_nat_def) + show "m \ \ = m" + by (induct m) (simp_all add: neutral_nat_def) + qed +end + +primrec (in monoid) pow :: "nat \ 'a \ 'a" where + "pow 0 a = \" +| "pow (Suc n) a = a \ pow n a" + +definition bexp :: "nat \ nat" where + "bexp n = pow n (Suc (Suc 0))" + +export_code pow bexp in OCaml + module_name SemiG file "SemiG.ml" + +end + diff --git a/src/test-gen/src/main/codegen_fsharp/upstream/code_ml.ML b/src/test-gen/src/main/codegen_fsharp/upstream/code_ml.ML new file mode 100644 index 0000000..6f0e004 --- /dev/null +++ b/src/test-gen/src/main/codegen_fsharp/upstream/code_ml.ML @@ -0,0 +1,898 @@ +(* Title: Tools/Code/code_ml.ML + Author: Florian Haftmann, TU Muenchen + +Serializer for SML and OCaml. +*) + +(* +signature CODE_ML = +sig + val target_SML: string + val target_OCaml: string +end; +*) + +structure Code_ML (*: CODE_ML *) = +struct + +open Basic_Code_Symbol; +open Basic_Code_Thingol; +open Code_Printer; + +infixr 5 @@; +infixr 5 @|; + + +(** generic **) + +val target_SML = "SML"; +val target_OCaml = "OCaml"; + +datatype ml_binding = + ML_Function of string * (typscheme * ((iterm list * iterm) * (thm option * bool)) list) + | ML_Instance of (string * class) * { class: class, tyco: string, vs: (vname * sort) list, + superinsts: (class * dict list list) list, + inst_params: ((string * (const * int)) * (thm * bool)) list, + superinst_params: ((string * (const * int)) * (thm * bool)) list }; + +datatype ml_stmt = + ML_Exc of string * (typscheme * int) + | ML_Val of ml_binding + | ML_Funs of (Code_Namespace.export * ml_binding) list * Code_Symbol.T list + | ML_Datas of (string * (vname list * ((string * vname list) * itype list) list)) list + | ML_Class of string * (vname * ((class * class) list * (string * itype) list)); + +fun print_product _ [] = NONE + | print_product print [x] = SOME (print x) + | print_product print xs = (SOME o enum " *" "" "") (map print xs); + +fun tuplify _ _ [] = NONE + | tuplify print fxy [x] = SOME (print fxy x) + | tuplify print _ xs = SOME (enum "," "(" ")" (map (print NOBR) xs)); + + +(** SML serializer **) + +fun print_char_any_ml s = + if Symbol.is_char s then ML_Syntax.print_char s else "\\092" ^ unprefix "\\" s; + +val print_string_any_ml = quote o implode o map print_char_any_ml o Symbol.explode; + +fun print_sml_stmt tyco_syntax const_syntax reserved is_constr deresolve = + let + val deresolve_const = deresolve o Constant; + val deresolve_class = deresolve o Type_Class; + val deresolve_classrel = deresolve o Class_Relation; + val deresolve_inst = deresolve o Class_Instance; + fun print_tyco_expr (sym, []) = (str o deresolve) sym + | print_tyco_expr (sym, [ty]) = + concat [print_typ BR ty, (str o deresolve) sym] + | print_tyco_expr (sym, tys) = + concat [enum "," "(" ")" (map (print_typ BR) tys), (str o deresolve) sym] + and print_typ fxy (tyco `%% tys) = (case tyco_syntax tyco + of NONE => print_tyco_expr (Type_Constructor tyco, tys) + | SOME (_, print) => print print_typ fxy tys) + | print_typ fxy (ITyVar v) = str ("'" ^ v); + fun print_dicttyp (class, ty) = print_tyco_expr (Type_Class class, [ty]); + fun print_typscheme_prefix (vs, p) = enum " ->" "" "" + (map_filter (fn (v, sort) => + (print_product (fn class => print_dicttyp (class, ITyVar v)) sort)) vs @| p); + fun print_typscheme (vs, ty) = print_typscheme_prefix (vs, print_typ NOBR ty); + fun print_dicttypscheme (vs, class_ty) = print_typscheme_prefix (vs, print_dicttyp class_ty); + fun print_classrels fxy [] ps = brackify fxy ps + | print_classrels fxy [classrel] ps = brackify fxy [(str o deresolve_classrel) classrel, brackify BR ps] + | print_classrels fxy classrels ps = + brackify fxy [enum " o" "(" ")" (map (str o deresolve_classrel) classrels), brackify BR ps] + fun print_dict is_pseudo_fun fxy (Dict (classrels, x)) = + print_classrels fxy classrels (print_plain_dict is_pseudo_fun fxy x) + and print_plain_dict is_pseudo_fun fxy (Dict_Const (inst, dss)) = + ((str o deresolve_inst) inst :: + (if is_pseudo_fun (Class_Instance inst) then [str "()"] + else map_filter (print_dicts is_pseudo_fun BR) dss)) + | print_plain_dict is_pseudo_fun fxy (Dict_Var (v, (i, k))) = + [str (if k = 1 then Name.enforce_case true v ^ "_" + else Name.enforce_case true v ^ string_of_int (i+1) ^ "_")] + and print_dicts is_pseudo_fun = tuplify (print_dict is_pseudo_fun); + val print_dict_args = map_filter (fn (v, sort) => print_dicts (K false) BR + (map_index (fn (i, _) => Dict ([], Dict_Var (v, (i, length sort)))) sort)); + fun print_term is_pseudo_fun some_thm vars fxy (IConst const) = + print_app is_pseudo_fun some_thm vars fxy (const, []) + | print_term is_pseudo_fun some_thm vars fxy (IVar NONE) = + str "_" + | print_term is_pseudo_fun some_thm vars fxy (IVar (SOME v)) = + str (lookup_var vars v) + | print_term is_pseudo_fun some_thm vars fxy (t as t1 `$ t2) = + (case Code_Thingol.unfold_const_app t + of SOME app => print_app is_pseudo_fun some_thm vars fxy app + | NONE => brackify fxy [print_term is_pseudo_fun some_thm vars NOBR t1, + print_term is_pseudo_fun some_thm vars BR t2]) + | print_term is_pseudo_fun some_thm vars fxy (t as _ `|=> _) = + let + val (binds, t') = Code_Thingol.unfold_pat_abs t; + fun print_abs (pat, ty) = + print_bind is_pseudo_fun some_thm NOBR pat + #>> (fn p => concat [str "fn", p, str "=>"]); + val (ps, vars') = fold_map print_abs binds vars; + in brackets (ps @ [print_term is_pseudo_fun some_thm vars' NOBR t']) end + | print_term is_pseudo_fun some_thm vars fxy (ICase case_expr) = + (case Code_Thingol.unfold_const_app (#primitive case_expr) + of SOME (app as ({ sym = Constant const, ... }, _)) => + if is_none (const_syntax const) + then print_case is_pseudo_fun some_thm vars fxy case_expr + else print_app is_pseudo_fun some_thm vars fxy app + | NONE => print_case is_pseudo_fun some_thm vars fxy case_expr) + and print_app_expr is_pseudo_fun some_thm vars (app as ({ sym, dicts = dss, dom = dom, ... }, ts)) = + if is_constr sym then + let val k = length dom in + if k < 2 orelse length ts = k + then (str o deresolve) sym + :: the_list (tuplify (print_term is_pseudo_fun some_thm vars) BR ts) + else [print_term is_pseudo_fun some_thm vars BR (Code_Thingol.eta_expand k app)] + end + else if is_pseudo_fun sym + then (str o deresolve) sym @@ str "()" + else (str o deresolve) sym :: map_filter (print_dicts is_pseudo_fun BR) dss + @ map (print_term is_pseudo_fun some_thm vars BR) ts + and print_app is_pseudo_fun some_thm vars = gen_print_app (print_app_expr is_pseudo_fun) + (print_term is_pseudo_fun) const_syntax some_thm vars + and print_bind is_pseudo_fun = gen_print_bind (print_term is_pseudo_fun) + and print_case is_pseudo_fun some_thm vars fxy { clauses = [], ... } = + (concat o map str) ["raise", "Fail", "\"empty case\""] + | print_case is_pseudo_fun some_thm vars fxy (case_expr as { clauses = [_], ... }) = + let + val (binds, body) = Code_Thingol.unfold_let (ICase case_expr); + fun print_match ((pat, _), t) vars = + vars + |> print_bind is_pseudo_fun some_thm NOBR pat + |>> (fn p => semicolon [str "val", p, str "=", + print_term is_pseudo_fun some_thm vars NOBR t]) + val (ps, vars') = fold_map print_match binds vars; + in + Pretty.chunks [ + Pretty.block [str "let", Pretty.fbrk, Pretty.chunks ps], + Pretty.block [str "in", Pretty.fbrk, print_term is_pseudo_fun some_thm vars' NOBR body], + str "end" + ] + end + | print_case is_pseudo_fun some_thm vars fxy { term = t, typ = ty, clauses = clause :: clauses, ... } = + let + fun print_select delim (pat, body) = + let + val (p, vars') = print_bind is_pseudo_fun some_thm NOBR pat vars; + in + concat [str delim, p, str "=>", print_term is_pseudo_fun some_thm vars' NOBR body] + end; + in + brackets ( + str "case" + :: print_term is_pseudo_fun some_thm vars NOBR t + :: print_select "of" clause + :: map (print_select "|") clauses + ) + end; + fun print_val_decl print_typscheme (sym, typscheme) = concat + [str "val", str (deresolve sym), str ":", print_typscheme typscheme]; + fun print_datatype_decl definer (tyco, (vs, cos)) = + let + fun print_co ((co, _), []) = str (deresolve_const co) + | print_co ((co, _), tys) = concat [str (deresolve_const co), str "of", + enum " *" "" "" (map (print_typ (INFX (2, X))) tys)]; + in + concat ( + str definer + :: print_tyco_expr (Type_Constructor tyco, map ITyVar vs) + :: str "=" + :: separate (str "|") (map print_co cos) + ) + end; + fun print_def is_pseudo_fun needs_typ definer + (ML_Function (const, (vs_ty as (vs, ty), eq :: eqs))) = + let + fun print_eqn definer ((ts, t), (some_thm, _)) = + let + val vars = reserved + |> intro_base_names_for (is_none o const_syntax) + deresolve (t :: ts) + |> intro_vars ((fold o Code_Thingol.fold_varnames) + (insert (op =)) ts []); + val prolog = if needs_typ then + concat [str definer, (str o deresolve_const) const, str ":", print_typ NOBR ty] + else (concat o map str) [definer, deresolve_const const]; + in + concat ( + prolog + :: (if is_pseudo_fun (Constant const) then [str "()"] + else print_dict_args vs + @ map (print_term is_pseudo_fun some_thm vars BR) ts) + @ str "=" + @@ print_term is_pseudo_fun some_thm vars NOBR t + ) + end + val shift = if null eqs then I else + map (Pretty.block o single o Pretty.block o single); + in pair + (print_val_decl print_typscheme (Constant const, vs_ty)) + ((Pretty.block o Pretty.fbreaks o shift) ( + print_eqn definer eq + :: map (print_eqn "|") eqs + )) + end + | print_def is_pseudo_fun _ definer + (ML_Instance (inst as (tyco, class), { vs, superinsts, inst_params, ... })) = + let + fun print_super_instance (super_class, x) = + concat [ + (str o Long_Name.base_name o deresolve_classrel) (class, super_class), + str "=", + print_dict is_pseudo_fun NOBR (Dict ([], Dict_Const ((tyco, super_class), x))) + ]; + fun print_classparam_instance ((classparam, (const, _)), (thm, _)) = + concat [ + (str o Long_Name.base_name o deresolve_const) classparam, + str "=", + print_app (K false) (SOME thm) reserved NOBR (const, []) + ]; + in pair + (print_val_decl print_dicttypscheme + (Class_Instance inst, (vs, (class, tyco `%% map (ITyVar o fst) vs)))) + (concat ( + str definer + :: (str o deresolve_inst) inst + :: (if is_pseudo_fun (Class_Instance inst) then [str "()"] + else print_dict_args vs) + @ str "=" + :: enum "," "{" "}" + (map print_super_instance superinsts + @ map print_classparam_instance inst_params) + :: str ":" + @@ print_dicttyp (class, tyco `%% map (ITyVar o fst) vs) + )) + end; + fun print_stmt _ (ML_Exc (const, (vs_ty, n))) = pair + [print_val_decl print_typscheme (Constant const, vs_ty)] + ((semicolon o map str) ( + (if n = 0 then "val" else "fun") + :: deresolve_const const + :: replicate n "_" + @ "=" + :: "raise" + :: "Fail" + @@ print_string_any_ml const + )) + | print_stmt _ (ML_Val binding) = + let + val (sig_p, p) = print_def (K false) true "val" binding + in pair + [sig_p] + (semicolon [p]) + end + | print_stmt _ (ML_Funs ((export, binding) :: exports_bindings, pseudo_funs)) = + let + val print_def' = print_def (member (op =) pseudo_funs) false; + fun print_pseudo_fun sym = concat [ + str "val", + (str o deresolve) sym, + str "=", + (str o deresolve) sym, + str "();" + ]; + val (sig_ps, (ps, p)) = (apsnd split_last o split_list) + (print_def' "fun" binding :: map (print_def' "and" o snd) exports_bindings); + val pseudo_ps = map print_pseudo_fun pseudo_funs; + in pair + (map_filter (fn (export, p) => if Code_Namespace.not_private export then SOME p else NONE) + ((export :: map fst exports_bindings) ~~ sig_ps)) + (Pretty.chunks (ps @ semicolon [p] :: pseudo_ps)) + end + | print_stmt _ (ML_Datas [(tyco, (vs, []))]) = + let + val ty_p = print_tyco_expr (Type_Constructor tyco, map ITyVar vs); + in + pair + [concat [str "type", ty_p]] + (semicolon [str "datatype", ty_p, str "=", str "EMPTY__"]) + end + | print_stmt export (ML_Datas (data :: datas)) = + let + val decl_ps = print_datatype_decl "datatype" data + :: map (print_datatype_decl "and") datas; + val (ps, p) = split_last decl_ps; + in pair + (if Code_Namespace.is_public export + then decl_ps + else map (fn (tyco, (vs, _)) => + concat [str "type", print_tyco_expr (Type_Constructor tyco, map ITyVar vs)]) + (data :: datas)) + (Pretty.chunks (ps @| semicolon [p])) + end + | print_stmt export (ML_Class (class, (v, (classrels, classparams)))) = + let + fun print_field s p = concat [str s, str ":", p]; + fun print_proj s p = semicolon + (map str ["val", s, "=", "#" ^ s, ":"] @| p); + fun print_super_class_decl (classrel as (_, super_class)) = + print_val_decl print_dicttypscheme + (Class_Relation classrel, ([(v, [class])], (super_class, ITyVar v))); + fun print_super_class_field (classrel as (_, super_class)) = + print_field (deresolve_classrel classrel) (print_dicttyp (super_class, ITyVar v)); + fun print_super_class_proj (classrel as (_, super_class)) = + print_proj (deresolve_classrel classrel) + (print_dicttypscheme ([(v, [class])], (super_class, ITyVar v))); + fun print_classparam_decl (classparam, ty) = + print_val_decl print_typscheme + (Constant classparam, ([(v, [class])], ty)); + fun print_classparam_field (classparam, ty) = + print_field (deresolve_const classparam) (print_typ NOBR ty); + fun print_classparam_proj (classparam, ty) = + print_proj (deresolve_const classparam) + (print_typscheme ([(v, [class])], ty)); + in pair + (concat [str "type", print_dicttyp (class, ITyVar v)] + :: (if Code_Namespace.is_public export + then map print_super_class_decl classrels + @ map print_classparam_decl classparams + else [])) + (Pretty.chunks ( + concat [ + str "type", + print_dicttyp (class, ITyVar v), + str "=", + enum "," "{" "};" ( + map print_super_class_field classrels + @ map print_classparam_field classparams + ) + ] + :: map print_super_class_proj classrels + @ map print_classparam_proj classparams + )) + end; + in print_stmt end; + +fun print_sml_module name decls body = + Pretty.chunks2 ( + Pretty.chunks [ + str ("structure " ^ name ^ " : sig"), + (indent 2 o Pretty.chunks) decls, + str "end = struct" + ] + :: body + @| str ("end; (*struct " ^ name ^ "*)") + ); + +val literals_sml = Literals { + literal_char = prefix "#" o quote o ML_Syntax.print_char, + literal_string = print_string_any_ml, + literal_numeral = fn k => "(" ^ string_of_int k ^ " : IntInf.int)", + literal_list = enum "," "[" "]", + infix_cons = (7, "::") +}; + + +(** OCaml serializer **) + +fun print_ocaml_stmt tyco_syntax const_syntax reserved is_constr deresolve = + let + val deresolve_const = deresolve o Constant; + val deresolve_class = deresolve o Type_Class; + val deresolve_classrel = deresolve o Class_Relation; + val deresolve_inst = deresolve o Class_Instance; + fun print_tyco_expr (sym, []) = (str o deresolve) sym + | print_tyco_expr (sym, [ty]) = + concat [print_typ BR ty, (str o deresolve) sym] + | print_tyco_expr (sym, tys) = + concat [enum "," "(" ")" (map (print_typ BR) tys), (str o deresolve) sym] + and print_typ fxy (tyco `%% tys) = (case tyco_syntax tyco + of NONE => print_tyco_expr (Type_Constructor tyco, tys) + | SOME (_, print) => print print_typ fxy tys) + | print_typ fxy (ITyVar v) = str ("'" ^ v); + fun print_dicttyp (class, ty) = print_tyco_expr (Type_Class class, [ty]); + fun print_typscheme_prefix (vs, p) = enum " ->" "" "" + (map_filter (fn (v, sort) => + (print_product (fn class => print_dicttyp (class, ITyVar v)) sort)) vs @| p); + fun print_typscheme (vs, ty) = print_typscheme_prefix (vs, print_typ NOBR ty); + fun print_dicttypscheme (vs, class_ty) = print_typscheme_prefix (vs, print_dicttyp class_ty); + val print_classrels = + fold_rev (fn classrel => fn p => Pretty.block [p, str ".", (str o deresolve_classrel) classrel]) + fun print_dict is_pseudo_fun fxy (Dict (classrels, x)) = + print_plain_dict is_pseudo_fun fxy x + |> print_classrels classrels + and print_plain_dict is_pseudo_fun fxy (Dict_Const (inst, dss)) = + brackify BR ((str o deresolve_inst) inst :: + (if is_pseudo_fun (Class_Instance inst) then [str "()"] + else map_filter (print_dicts is_pseudo_fun BR) dss)) + | print_plain_dict is_pseudo_fun fxy (Dict_Var (v, (i, k))) = + str (if k = 1 then "_" ^ Name.enforce_case true v + else "_" ^ Name.enforce_case true v ^ string_of_int (i+1)) + and print_dicts is_pseudo_fun = tuplify (print_dict is_pseudo_fun); + val print_dict_args = map_filter (fn (v, sort) => print_dicts (K false) BR + (map_index (fn (i, _) => Dict ([], Dict_Var (v, (i, length sort)))) sort)); + fun print_term is_pseudo_fun some_thm vars fxy (IConst const) = + print_app is_pseudo_fun some_thm vars fxy (const, []) + | print_term is_pseudo_fun some_thm vars fxy (IVar NONE) = + str "_" + | print_term is_pseudo_fun some_thm vars fxy (IVar (SOME v)) = + str (lookup_var vars v) + | print_term is_pseudo_fun some_thm vars fxy (t as t1 `$ t2) = + (case Code_Thingol.unfold_const_app t + of SOME app => print_app is_pseudo_fun some_thm vars fxy app + | NONE => brackify fxy [print_term is_pseudo_fun some_thm vars NOBR t1, + print_term is_pseudo_fun some_thm vars BR t2]) + | print_term is_pseudo_fun some_thm vars fxy (t as _ `|=> _) = + let + val (binds, t') = Code_Thingol.unfold_pat_abs t; + val (ps, vars') = fold_map (print_bind is_pseudo_fun some_thm BR o fst) binds vars; + in brackets (str "fun" :: ps @ str "->" @@ print_term is_pseudo_fun some_thm vars' NOBR t') end + | print_term is_pseudo_fun some_thm vars fxy (ICase case_expr) = + (case Code_Thingol.unfold_const_app (#primitive case_expr) + of SOME (app as ({ sym = Constant const, ... }, _)) => + if is_none (const_syntax const) + then print_case is_pseudo_fun some_thm vars fxy case_expr + else print_app is_pseudo_fun some_thm vars fxy app + | NONE => print_case is_pseudo_fun some_thm vars fxy case_expr) + and print_app_expr is_pseudo_fun some_thm vars (app as ({ sym, dicts = dss, dom = dom, ... }, ts)) = + if is_constr sym then + let val k = length dom in + if length ts = k + then (str o deresolve) sym + :: the_list (tuplify (print_term is_pseudo_fun some_thm vars) BR ts) + else [print_term is_pseudo_fun some_thm vars BR (Code_Thingol.eta_expand k app)] + end + else if is_pseudo_fun sym + then (str o deresolve) sym @@ str "()" + else (str o deresolve) sym :: map_filter (print_dicts is_pseudo_fun BR) dss + @ map (print_term is_pseudo_fun some_thm vars BR) ts + and print_app is_pseudo_fun some_thm vars = gen_print_app (print_app_expr is_pseudo_fun) + (print_term is_pseudo_fun) const_syntax some_thm vars + and print_bind is_pseudo_fun = gen_print_bind (print_term is_pseudo_fun) + and print_case is_pseudo_fun some_thm vars fxy { clauses = [], ... } = + (concat o map str) ["failwith", "\"empty case\""] + | print_case is_pseudo_fun some_thm vars fxy (case_expr as { clauses = [_], ... }) = + let + val (binds, body) = Code_Thingol.unfold_let (ICase case_expr); + fun print_let ((pat, _), t) vars = + vars + |> print_bind is_pseudo_fun some_thm NOBR pat + |>> (fn p => concat + [str "let", p, str "=", print_term is_pseudo_fun some_thm vars NOBR t, str "in"]) + val (ps, vars') = fold_map print_let binds vars; + in + brackets [Pretty.chunks ps, print_term is_pseudo_fun some_thm vars' NOBR body] + end + | print_case is_pseudo_fun some_thm vars fxy { term = t, typ = ty, clauses = clause :: clauses, ... } = + let + fun print_select delim (pat, body) = + let + val (p, vars') = print_bind is_pseudo_fun some_thm NOBR pat vars; + in concat [str delim, p, str "->", print_term is_pseudo_fun some_thm vars' NOBR body] end; + in + brackets ( + str "match" + :: print_term is_pseudo_fun some_thm vars NOBR t + :: print_select "with" clause + :: map (print_select "|") clauses + ) + end; + fun print_val_decl print_typscheme (sym, typscheme) = concat + [str "val", str (deresolve sym), str ":", print_typscheme typscheme]; + fun print_datatype_decl definer (tyco, (vs, cos)) = + let + fun print_co ((co, _), []) = str (deresolve_const co) + | print_co ((co, _), tys) = concat [str (deresolve_const co), str "of", + enum " *" "" "" (map (print_typ (INFX (2, X))) tys)]; + in + concat ( + str definer + :: print_tyco_expr (Type_Constructor tyco, map ITyVar vs) + :: str "=" + :: separate (str "|") (map print_co cos) + ) + end; + fun print_def is_pseudo_fun needs_typ definer + (ML_Function (const, (vs_ty as (vs, ty), eqs))) = + let + fun print_eqn ((ts, t), (some_thm, _)) = + let + val vars = reserved + |> intro_base_names_for (is_none o const_syntax) + deresolve (t :: ts) + |> intro_vars ((fold o Code_Thingol.fold_varnames) + (insert (op =)) ts []); + in concat [ + (Pretty.block o commas) + (map (print_term is_pseudo_fun some_thm vars NOBR) ts), + str "->", + print_term is_pseudo_fun some_thm vars NOBR t + ] end; + fun print_eqns is_pseudo [((ts, t), (some_thm, _))] = + let + val vars = reserved + |> intro_base_names_for (is_none o const_syntax) + deresolve (t :: ts) + |> intro_vars ((fold o Code_Thingol.fold_varnames) + (insert (op =)) ts []); + in + concat ( + (if is_pseudo then [str "()"] + else map (print_term is_pseudo_fun some_thm vars BR) ts) + @ str "=" + @@ print_term is_pseudo_fun some_thm vars NOBR t + ) + end + | print_eqns _ ((eq as (([_], _), _)) :: eqs) = + Pretty.block ( + str "=" + :: Pretty.brk 1 + :: str "function" + :: Pretty.brk 1 + :: print_eqn eq + :: maps (append [Pretty.fbrk, str "|", Pretty.brk 1] + o single o print_eqn) eqs + ) + | print_eqns _ (eqs as eq :: eqs') = + let + val vars = reserved + |> intro_base_names_for (is_none o const_syntax) + deresolve (map (snd o fst) eqs) + val dummy_parms = (map str o aux_params vars o map (fst o fst)) eqs; + in + Pretty.block ( + Pretty.breaks dummy_parms + @ Pretty.brk 1 + :: str "=" + :: Pretty.brk 1 + :: str "match" + :: Pretty.brk 1 + :: (Pretty.block o commas) dummy_parms + :: Pretty.brk 1 + :: str "with" + :: Pretty.brk 1 + :: print_eqn eq + :: maps (append [Pretty.fbrk, str "|", Pretty.brk 1] + o single o print_eqn) eqs' + ) + end; + val prolog = if needs_typ then + concat [str definer, (str o deresolve_const) const, str ":", print_typ NOBR ty] + else (concat o map str) [definer, deresolve_const const]; + in pair + (print_val_decl print_typscheme (Constant const, vs_ty)) + (concat ( + prolog + :: print_dict_args vs + @| print_eqns (is_pseudo_fun (Constant const)) eqs + )) + end + | print_def is_pseudo_fun _ definer + (ML_Instance (inst as (tyco, class), { vs, superinsts, inst_params, ... })) = + let + fun print_super_instance (super_class, x) = + concat [ + (str o deresolve_classrel) (class, super_class), + str "=", + print_dict is_pseudo_fun NOBR (Dict ([], Dict_Const ((tyco, super_class), x))) + ]; + fun print_classparam_instance ((classparam, (const, _)), (thm, _)) = + concat [ + (str o deresolve_const) classparam, + str "=", + print_app (K false) (SOME thm) reserved NOBR (const, []) + ]; + in pair + (print_val_decl print_dicttypscheme + (Class_Instance inst, (vs, (class, tyco `%% map (ITyVar o fst) vs)))) + (concat ( + str definer + :: (str o deresolve_inst) inst + :: (if is_pseudo_fun (Class_Instance inst) then [str "()"] + else print_dict_args vs) + @ str "=" + @@ brackets [ + enum_default "()" ";" "{" "}" (map print_super_instance superinsts + @ map print_classparam_instance inst_params), + str ":", + print_dicttyp (class, tyco `%% map (ITyVar o fst) vs) + ] + )) + end; + fun print_stmt _ (ML_Exc (const, (vs_ty, n))) = pair + [print_val_decl print_typscheme (Constant const, vs_ty)] + ((doublesemicolon o map str) ( + "let" + :: deresolve_const const + :: replicate n "_" + @ "=" + :: "failwith" + @@ ML_Syntax.print_string const + )) + | print_stmt _ (ML_Val binding) = + let + val (sig_p, p) = print_def (K false) true "let" binding + in pair + [sig_p] + (doublesemicolon [p]) + end + | print_stmt _ (ML_Funs ((export, binding) :: exports_bindings, pseudo_funs)) = + let + val print_def' = print_def (member (op =) pseudo_funs) false; + fun print_pseudo_fun sym = concat [ + str "let", + (str o deresolve) sym, + str "=", + (str o deresolve) sym, + str "();;" + ]; + val (sig_ps, (ps, p)) = (apsnd split_last o split_list) + (print_def' "let rec" binding :: map (print_def' "and" o snd) exports_bindings); + val pseudo_ps = map print_pseudo_fun pseudo_funs; + in pair + (map_filter (fn (export, p) => if Code_Namespace.not_private export then SOME p else NONE) + ((export :: map fst exports_bindings) ~~ sig_ps)) + (Pretty.chunks (ps @ doublesemicolon [p] :: pseudo_ps)) + end + | print_stmt _ (ML_Datas [(tyco, (vs, []))]) = + let + val ty_p = print_tyco_expr (Type_Constructor tyco, map ITyVar vs); + in + pair + [concat [str "type", ty_p]] + (doublesemicolon [str "type", ty_p, str "=", str "EMPTY__"]) + end + | print_stmt export (ML_Datas (data :: datas)) = + let + val decl_ps = print_datatype_decl "type" data + :: map (print_datatype_decl "and") datas; + val (ps, p) = split_last decl_ps; + in pair + (if Code_Namespace.is_public export + then decl_ps + else map (fn (tyco, (vs, _)) => + concat [str "type", print_tyco_expr (Type_Constructor tyco, map ITyVar vs)]) + (data :: datas)) + (Pretty.chunks (ps @| doublesemicolon [p])) + end + | print_stmt export (ML_Class (class, (v, (classrels, classparams)))) = + let + fun print_field s p = concat [str s, str ":", p]; + fun print_super_class_field (classrel as (_, super_class)) = + print_field (deresolve_classrel classrel) (print_dicttyp (super_class, ITyVar v)); + fun print_classparam_decl (classparam, ty) = + print_val_decl print_typscheme + (Constant classparam, ([(v, [class])], ty)); + fun print_classparam_field (classparam, ty) = + print_field (deresolve_const classparam) (print_typ NOBR ty); + val w = "_" ^ Name.enforce_case true v; + fun print_classparam_proj (classparam, _) = + (concat o map str) ["let", deresolve_const classparam, w, "=", + w ^ "." ^ deresolve_const classparam ^ ";;"]; + val type_decl_p = concat [ + str "type", + print_dicttyp (class, ITyVar v), + str "=", + enum_default "unit" ";" "{" "}" ( + map print_super_class_field classrels + @ map print_classparam_field classparams + ) + ]; + in pair + (if Code_Namespace.is_public export + then type_decl_p :: map print_classparam_decl classparams + else [concat [str "type", print_dicttyp (class, ITyVar v)]]) + (Pretty.chunks ( + doublesemicolon [type_decl_p] + :: map print_classparam_proj classparams + )) + end; + in print_stmt end; + +fun print_ocaml_module name decls body = + Pretty.chunks2 ( + Pretty.chunks [ + str ("module " ^ name ^ " : sig"), + (indent 2 o Pretty.chunks) decls, + str "end = struct" + ] + :: body + @| str ("end;; (*struct " ^ name ^ "*)") + ); + +val literals_ocaml = let + fun chr i = + let + val xs = string_of_int i; + val ys = replicate_string (3 - length (raw_explode xs)) "0"; + in "\\" ^ ys ^ xs end; + fun char_ocaml c = + let + val i = ord c; + val s = if i < 32 orelse i = 34 orelse i = 39 orelse i = 92 orelse i > 126 + then chr i else c + in s end; + fun numeral_ocaml k = if k < 0 + then "(Big_int.minus_big_int " ^ numeral_ocaml (~ k) ^ ")" + else if k <= 1073741823 + then "(Big_int.big_int_of_int " ^ string_of_int k ^ ")" + else "(Big_int.big_int_of_string " ^ quote (string_of_int k) ^ ")" +in Literals { + literal_char = Library.enclose "'" "'" o char_ocaml, + literal_string = quote o translate_string char_ocaml, + literal_numeral = numeral_ocaml, + literal_list = enum ";" "[" "]", + infix_cons = (6, "::") +} end; + + + +(** SML/OCaml generic part **) + +fun ml_program_of_program ctxt module_name reserved identifiers = + let + fun namify_const upper base (nsp_const, nsp_type) = + let + val (base', nsp_const') = Name.variant (Name.enforce_case upper base) nsp_const + in (base', (nsp_const', nsp_type)) end; + fun namify_type base (nsp_const, nsp_type) = + let + val (base', nsp_type') = Name.variant (Name.enforce_case false base) nsp_type + in (base', (nsp_const, nsp_type')) end; + fun namify_stmt (Code_Thingol.Fun _) = namify_const false + | namify_stmt (Code_Thingol.Datatype _) = namify_type + | namify_stmt (Code_Thingol.Datatypecons _) = namify_const true + | namify_stmt (Code_Thingol.Class _) = namify_type + | namify_stmt (Code_Thingol.Classrel _) = namify_const false + | namify_stmt (Code_Thingol.Classparam _) = namify_const false + | namify_stmt (Code_Thingol.Classinst _) = namify_const false; + fun ml_binding_of_stmt (sym as Constant const, (export, Code_Thingol.Fun ((tysm as (vs, ty), raw_eqs), _))) = + let + val eqs = filter (snd o snd) raw_eqs; + val (eqs', some_sym) = if null (filter_out (null o snd) vs) then case eqs + of [(([], t), some_thm)] => if (not o null o fst o Code_Thingol.unfold_fun) ty + then ([(([IVar (SOME "x")], t `$ IVar (SOME "x")), some_thm)], NONE) + else (eqs, SOME (sym, member (op =) (Code_Thingol.add_constsyms t []) sym)) + | _ => (eqs, NONE) + else (eqs, NONE) + in ((export, ML_Function (const, (tysm, eqs'))), some_sym) end + | ml_binding_of_stmt (sym as Class_Instance inst, (export, Code_Thingol.Classinst (stmt as { vs, ... }))) = + ((export, ML_Instance (inst, stmt)), + if forall (null o snd) vs then SOME (sym, false) else NONE) + | ml_binding_of_stmt (sym, _) = + error ("Binding block containing illegal statement: " ^ + Code_Symbol.quote ctxt sym) + fun modify_fun (sym, (export, stmt)) = + let + val ((export', binding), some_value_sym) = ml_binding_of_stmt (sym, (export, stmt)); + val ml_stmt = case binding + of ML_Function (const, ((vs, ty), [])) => + ML_Exc (const, ((vs, ty), + (length o filter_out (null o snd)) vs + (length o fst o Code_Thingol.unfold_fun) ty)) + | _ => case some_value_sym + of NONE => ML_Funs ([(export', binding)], []) + | SOME (sym, true) => ML_Funs ([(export, binding)], [sym]) + | SOME (sym, false) => ML_Val binding + in SOME (export, ml_stmt) end; + fun modify_funs stmts = single (SOME + (Code_Namespace.Opaque, ML_Funs (map_split ml_binding_of_stmt stmts |> (apsnd o map_filter o Option.map) fst))) + fun modify_datatypes stmts = + map_filter + (fn (Type_Constructor tyco, (export, Code_Thingol.Datatype stmt)) => SOME (export, (tyco, stmt)) | _ => NONE) stmts + |> split_list + |> apfst Code_Namespace.join_exports + |> apsnd ML_Datas + |> SOME + |> single; + fun modify_class stmts = + the_single (map_filter + (fn (Type_Class class, (export, Code_Thingol.Class stmt)) => SOME (export, (class, stmt)) | _ => NONE) stmts) + |> apsnd ML_Class + |> SOME + |> single; + fun modify_stmts ([stmt as (_, (_, stmt' as Code_Thingol.Fun _))]) = + if Code_Thingol.is_case stmt' then [] else [modify_fun stmt] + | modify_stmts ((stmts as (_, (_, Code_Thingol.Fun _)) :: _)) = + modify_funs (filter_out (Code_Thingol.is_case o snd o snd) stmts) + | modify_stmts ((stmts as (_, (_, Code_Thingol.Datatypecons _)) :: _)) = + modify_datatypes stmts + | modify_stmts ((stmts as (_, (_, Code_Thingol.Datatype _)) :: _)) = + modify_datatypes stmts + | modify_stmts ((stmts as (_, (_, Code_Thingol.Class _)) :: _)) = + modify_class stmts + | modify_stmts ((stmts as (_, (_, Code_Thingol.Classrel _)) :: _)) = + modify_class stmts + | modify_stmts ((stmts as (_, (_, Code_Thingol.Classparam _)) :: _)) = + modify_class stmts + | modify_stmts ([stmt as (_, (_, Code_Thingol.Classinst _))]) = + [modify_fun stmt] + | modify_stmts ((stmts as (_, (_, Code_Thingol.Classinst _)) :: _)) = + modify_funs stmts + | modify_stmts stmts = error ("Illegal mutual dependencies: " ^ + (Library.commas o map (Code_Symbol.quote ctxt o fst)) stmts); + in + Code_Namespace.hierarchical_program ctxt { + module_name = module_name, reserved = reserved, identifiers = identifiers, + empty_nsp = (reserved, reserved), namify_module = pair, namify_stmt = namify_stmt, + cyclic_modules = false, class_transitive = true, + class_relation_public = true, empty_data = (), + memorize_data = K I, modify_stmts = modify_stmts } + end; + +fun serialize_ml print_ml_module print_ml_stmt ctxt + { module_name, reserved_syms, identifiers, includes, + class_syntax, tyco_syntax, const_syntax } exports program = + let + + (* build program *) + val { deresolver, hierarchical_program = ml_program } = + ml_program_of_program ctxt module_name (Name.make_context reserved_syms) + identifiers exports program; + + (* print statements *) + fun print_stmt prefix_fragments (_, (export, stmt)) = print_ml_stmt + tyco_syntax const_syntax (make_vars reserved_syms) + (Code_Thingol.is_constr program) (deresolver prefix_fragments) export stmt + |> apfst (fn decl => if Code_Namespace.not_private export then SOME decl else NONE); + + (* print modules *) + fun print_module _ base _ xs = + let + val (raw_decls, body) = split_list xs; + val decls = maps these raw_decls + in (NONE, print_ml_module base decls body) end; + + (* serialization *) + val p = Pretty.chunks2 (map snd includes + @ map snd (Code_Namespace.print_hierarchical { + print_module = print_module, print_stmt = print_stmt, + lift_markup = apsnd } ml_program)); + fun write width NONE = writeln o format [] width + | write width (SOME p) = File.write p o format [] width; + fun prepare syms width p = ([("", format syms width p)], try (deresolver [])); + in + Code_Target.serialization write prepare p + end; + +val serializer_sml : Code_Target.serializer = + Code_Target.parse_args (Scan.succeed ()) #> K (serialize_ml print_sml_module print_sml_stmt); + +val serializer_ocaml : Code_Target.serializer = + Code_Target.parse_args (Scan.succeed ()) #> K (serialize_ml print_ocaml_module print_ocaml_stmt); + + +(** Isar setup **) + +fun fun_syntax print_typ fxy [ty1, ty2] = + brackify_infix (1, R) fxy ( + print_typ (INFX (1, X)) ty1, + str "->", + print_typ (INFX (1, R)) ty2 + ); + +(* +val _ = Theory.setup + (Code_Target.add_language + (target_SML, { serializer = serializer_sml, literals = literals_sml, + check = { env_var = "ISABELLE_PROCESS", + make_destination = fn p => Path.append p (Path.explode "ROOT.ML"), + make_command = fn _ => + "\"$ISABELLE_PROCESS\" -r -q -e 'datatype ref = datatype Unsynchronized.ref; use \"ROOT.ML\" handle _ => exit 1' Pure" } }) + #> Code_Target.add_language + (target_OCaml, { serializer = serializer_ocaml, literals = literals_ocaml, + check = { env_var = "ISABELLE_OCAML", + make_destination = fn p => Path.append p (Path.explode "ROOT.ocaml"), + make_command = fn _ => "\"$ISABELLE_OCAML\" -w pu nums.cma ROOT.ocaml" } }) + #> Code_Target.set_printings (Type_Constructor ("fun", + [(target_SML, SOME (2, fun_syntax)), (target_OCaml, SOME (2, fun_syntax))])) + #> fold (Code_Target.add_reserved target_SML) ML_Syntax.reserved_names + #> fold (Code_Target.add_reserved target_SML) + ["ref" (*rebinding is illegal*), "o" (*dictionary projections use it already*), + "Fail", "div", "mod" (*standard infixes*), "IntInf"] + #> fold (Code_Target.add_reserved target_OCaml) [ + "and", "as", "assert", "begin", "class", + "constraint", "do", "done", "downto", "else", "end", "exception", + "external", "false", "for", "fun", "function", "functor", "if", + "in", "include", "inherit", "initializer", "lazy", "let", "match", "method", + "module", "mutable", "new", "object", "of", "open", "or", "private", "rec", + "sig", "struct", "then", "to", "true", "try", "type", "val", + "virtual", "when", "while", "with" + ] + #> fold (Code_Target.add_reserved target_OCaml) ["failwith", "mod", "Big_int"]); +*) +end; (*struct*) diff --git a/src/test-gen/src/main/codegen_gdb/Code_gdb_script.thy b/src/test-gen/src/main/codegen_gdb/Code_gdb_script.thy new file mode 100644 index 0000000..115354a --- /dev/null +++ b/src/test-gen/src/main/codegen_gdb/Code_gdb_script.thy @@ -0,0 +1,322 @@ +theory Code_gdb_script +imports Main "../TestLib" +(*keywords "gen_gdb_script" :: "qed_global"*) +begin + +datatype gdb_comand = + break string gdb_comand + | commands gdb_comand + | silent gdb_comand + | continue gdb_comand + | thread gdb_comand + | "end" gdb_comand + | sharp string + |start + +datatype gdb_option = + logging gdb_option + |on + |off + |pagination gdb_option + |"file" string + |print gdb_option + + + +subsection {*writing on file using Isabelle/ML*} +ML{* + val file_path_try = "../../add-ons/OS-IFP-test/OS_kernel_model/IPC/example_gdb_impl/c/yakoub.gdb" + |> Path.explode + |> Path.append (Resources.master_directory @{theory }); + val file_check = file_path_try |> File.exists; + (*val file_write = File.write file_path_office "#yakoub";*) + +*} + +(*Generation of a set of gdb files*) + +ML{* + fun writeFiles _ _ [] = [] + | writeFiles filePath fileExtension (gdb_script :: gdb_script_list) = + ([filePath] @ [(gdb_script :: gdb_script_list) |> length |> Int.toString] @ + [fileExtension] |> String.concat |> Path.explode |> File.write_list) gdb_script:: + writeFiles filePath fileExtension gdb_script_list; + *} +(*master parth*) +ML{* (*Thy_Load.master_directory*) + Resources.master_directory @{theory}; + *} +ML {*Resources.master_directory @{theory}; + fun masterPath_add theory Path = Path + |> Path.explode + |> Path.append (Resources.master_directory theory) + |> Path.implode; + *} + +subsection {*Printing a list of terms in column using Pretty*} +ML{* + fun pretty_terms' context terms = terms |> (Syntax.pretty_term context + |> List.map) + |> Pretty.chunks; + + Pretty.writeln (pretty_terms' @{context} [@{term "2::int"}, @{term "2::int"}]); + *} + +subsection {*Going from a list of terms to ASCII string*} +ML {*(*fun render_thm ctxt thm = + Print_Mode.setmp ["xsymbols"] + (fn _ => Display.pretty_thm ctxt thm + |> Pretty.str_of + |> YXML.parse_body + |> XML.content_of) (); + render_thm @{context} @{thm "conjI"};*) + fun render_term ctxt term = + Print_Mode.setmp ["xsymbols"] + (fn _ => Syntax.pretty_term ctxt term + |> Pretty.string_of + |> YXML.parse_body + |> XML.content_of) (); + + render_term @{context} @{term "1::int"}; + + fun render_term_list ctxt term = + Print_Mode.setmp ["xsymbols"] + (fn _ => pretty_terms' ctxt term + |> Pretty.string_of + |> YXML.parse_body + |> XML.content_of) (); + render_term_list @{context} [@{term "1::int"}, @{term "1::int"}]; +*} + +subsection {*GDB terms script to control scheduler*} + +ML {*val gdb_header = + @{term "''#setting gdb options''"} $ @{term "''{''"} $ + @{term "set"} $ @{term "logging (file ''Example_sequential.log'')"} $ @{term "''{''"} $ + @{term "set"} $ @{term "logging on"} $ @{term "''{''"} $ + @{term "set"} $ @{term "pagination off"} $ @{term "''{''"} $ + @{term "set ''target-async''"} $ @{term " on"} $ @{term "''{''"} $ + @{term "set ''non-stop''"} $ @{term " on"} $ @{term "''{''"} $ + @{term "set ''print thread-events off''"} $ @{term "''{''"} $ @{term "''{''"} + ; + + fun gdb_break_point_entry fun_nam_term thread_id_term = + @{term "''#setting thread entry''"} $ @{term "''{''"} $ + @{term "break"} $ fun_nam_term $ @{term "''{''"} $ + @{term "commands"} $ @{term "''{''"} $ + @{term "silent"} $ @{term "''{''"} $ + @{term "thread"} $ thread_id_term $ @{term "''{''"} $ + @{term "continue"} $ @{term "''{''"} $ + @{term "end"} $ @{term "''{''"} $ @{term "''{''"}; + + + fun gdb_break_point_exist line_number_term thread_id_term = + @{term "''#setting thread exit''"} $ @{term "''{''"} $ + @{term "break"} $ line_number_term $ @{term "''{''"} $ + @{term "commands"} $ @{term "''{''"} $ + @{term "silent"} $ @{term "''{''"} $ + @{term "thread"} $ thread_id_term $ @{term "''{''"} $ + @{term "continue"} $ @{term "''{''"} $ + @{term "end"} $ @{term "''{''"} $ @{term "''{''"}; + + fun gdb_break_main_entry fun_nam_term = + @{term "''#setting main thread entry''"} $ @{term "''{''"} $ + @{term "break"} $ fun_nam_term $ @{term "''{''"} $ + @{term "commands"} $ @{term "''{''"} $ + @{term "silent"} $ @{term "''{''"} $ + @{term "set"} $ @{term "''scheduler-locking''"} $ @{term " on"} $ @{term "''{''"} $ + @{term "continue"} $ @{term "''{''"} $ + @{term "end"} $ @{term "''{''"} $ @{term "''{''"}; + + fun gdb_break_main_exit line_number_term thread_id_term = + @{term "''#wait for thread creation''"} $ @{term "''{''"} $ + @{term "break"} $ line_number_term $ @{term "''{''"} $ + @{term "commands"} $ @{term "''{''"} $ + @{term "silent"} $ @{term "''{''"} $ + @{term "thread"} $ thread_id_term $ @{term "''{''"} $ + @{term "continue"} $ @{term "''{''"} $ + @{term "end"} $ @{term "''{''"} $ @{term "''{''"}; + + val gdb_start_term = @{term "start"} $ @{term "''{''"}; + + val gdb_endFile = @{term "''#endFile''"} + +*} + +ML {* gdb_header*} + +subsection {*removing quotes and parentheses from ASCII string*} +ML {* fun remove_char nil = [] + | remove_char (x::xs) = (if ((x = #"(" orelse x = #")") orelse x = #"'") + then remove_char xs + else x::remove_char xs); + *} + +subsection {*Jump to the next line*} + +ML {* fun next_line nil = [] + | next_line (x::xs) = (if x = #"{" + then next_line (#"\n"::xs) + else x::next_line xs); + *} + +subsection {*Going from a simple list to a list of terms*} + +ML {*render_term_list @{context} [@{term " ''{''"}]*} + +subsection {*Terms constructors and scheme destructors*} + +ML{* + fun thm_to_term thm = thm + |> Thm.concl_of |> HOLogic.dest_Trueprop; + fun thms_to_terms thms = thms + |> (thm_to_term |> map); + + fun dest_valid_SE_term terms = terms |> ((fn term => case term of + ((Const(@{const_name "valid_SE"},_) $ _) + $(Const(@{const_name "bind_SE"},_) $ T $ _)) => T + | _ => term) + |> map); + + fun dest_mbind_term terms = terms |> ((fn term => case term of + Const (@{const_name "mbind"}, _) + $ LIST $ _ => LIST + |_ => term ) + |> map); + + fun dest_mbind_term' terms = terms |> ((fn term => case term of + Const (@{const_name "mbind'"}, _) + $ LIST $ _ => LIST + |_ => term ) + |> map); + + fun dest_List_term terms = terms |> ((fn term => HOLogic.dest_list term) |> map); + + *} + +subsection {*From a test thm to terms of input sequences*} + +ML {*fun thm_to_inputSeqTerms test_facts = + test_facts + |> thms_to_terms |> dest_valid_SE_term + |> dest_mbind_term |> dest_List_term; + + fun thm_to_inputSeqTerms' test_facts = + test_facts + |> thms_to_terms |> dest_valid_SE_term + |> dest_mbind_term' |> dest_List_term; + *} +subsection {*from input seuquences to strings*} + +ML {* fun inputSeq_to_gdbStrings actTerm_to_gdbTerm inputSeqTerms = + inputSeqTerms + |> ((fn terms => [gdb_header] + @(terms |> (actTerm_to_gdbTerm |> map)) + @[gdb_start_term] + |> (render_term @{context} |> map)) + |> map); + + fun + breakpoint_setup (term::terms) = + ((term::terms) |> length) :: (terms |> breakpoint_setup) ; + + *} + +ML {*open List*} +ML {*open HOLogic;*} +subsection {*from sequeces of strings to a gdb script*} + +ML {* fun gdbStrings_to_gdbScripts gdbStrings = + gdbStrings + |> ((fn strings => strings + |> (String.implode o next_line o + remove_char o String.explode |> map)) + |> map); + *} + + +subsection{*concat terms*} +ML {* +fun add_entry_exist_terms [] [] = [] + | add_entry_exist_terms terms [] = terms + | add_entry_exist_terms [] terms = terms + | add_entry_exist_terms (term :: terms) (term'::terms') = + term $ term':: add_entry_exist_terms terms terms'; + + fun add_entry_exist_termsS [] [] = [] + | add_entry_exist_termsS termsS [] = termsS + | add_entry_exist_termsS [] termsS = termsS + | add_entry_exist_termsS (terms :: termsS) (terms'::termsS') = + add_entry_exist_terms terms terms'::add_entry_exist_termsS termsS termsS'; + + fun add_entry_exist_termsS' [] [] = [] + | add_entry_exist_termsS' termsS [] = termsS + | add_entry_exist_termsS' [] termsS = termsS + | add_entry_exist_termsS' (terms :: termsS) (terms'::termsS') = + (terms @ terms')::add_entry_exist_termsS' termsS termsS'; + +*} + +subsection {*from thms to gdb scripts*} + +ML {* +fun thms_to_gdbScripts inputSeq_to_gdbEn inputSeq_to_gdbEx inputSeq_to_gdbMain infos thms = + thms + |> thm_to_inputSeqTerms + |> ((fn terms => inputSeq_to_gdbMain infos terms) |> map) + |> add_entry_exist_termsS' + (thms |> thm_to_inputSeqTerms |> ((fn terms => inputSeq_to_gdbEx infos terms)|> map)) + |> add_entry_exist_termsS + (thms |> thm_to_inputSeqTerms |> ((fn terms => inputSeq_to_gdbEn infos terms)|> map)) + |> inputSeq_to_gdbStrings (fn term => term) + |> gdbStrings_to_gdbScripts; + +fun thms_to_gdbScripts' inputSeq_to_gdbEn inputSeq_to_gdbEx inputSeq_to_gdbMain infos thms = + thms + |> thm_to_inputSeqTerms' + |> ((fn terms => inputSeq_to_gdbMain infos terms) |> map) + |> add_entry_exist_termsS' + (thms |> thm_to_inputSeqTerms' |> ((fn terms => inputSeq_to_gdbEx infos terms)|> map)) + |> add_entry_exist_termsS + (thms |> thm_to_inputSeqTerms' |> ((fn terms => inputSeq_to_gdbEn infos terms)|> map)) + |> inputSeq_to_gdbStrings (fn term => term) + |> gdbStrings_to_gdbScripts; + +*} + + + +subsection {*isa markup*} + +ML {* + + fun gen_gdb_scripts + inputSeq_to_gdbEn inputSeq_to_gdbEx inputSeq_to_gdbMain infos theory path thms = + thms + |> thms_to_gdbScripts inputSeq_to_gdbEn inputSeq_to_gdbEx inputSeq_to_gdbMain infos + |> writeFiles (path |> masterPath_add theory) ".gdb"; + + + (*For mbind'*) + fun gen_gdb_scripts' + inputSeq_to_gdbEn inputSeq_to_gdbEx inputSeq_to_gdbMain infos theory path thms = + thms + |> thms_to_gdbScripts' inputSeq_to_gdbEn inputSeq_to_gdbEx inputSeq_to_gdbMain infos + |> writeFiles (path |> masterPath_add theory) ".gdb"; + + + (* val _ = Outer_Syntax.command + @{command_spec "gen_gdb_script"} + "store test state (theorem)" + ;*) + + (*For mbind*) + + (*val gen_gdb_script = @{thms mykeos_simple.test_data} + |> thm_to_inputSeqTerms + |> inputSeq_to_gdbStrings actTerm_to_gdbTerm + |> gdbStrings_to_gdbScripts*) + +*} + +end diff --git a/src/test-gen/src/main/config.sml b/src/test-gen/src/main/config.sml new file mode 100644 index 0000000..9e7fa19 --- /dev/null +++ b/src/test-gen/src/main/config.sml @@ -0,0 +1,93 @@ +(***************************************************************************** + * W A R N I N G + * + * Information in this file will be updated by the packaging process, + * respectively the owner of the build process. + * Please, DO NOT EDIT THIS FILE MANUALLY. + ******************************************************************************) + +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * config.sml --- main configuration file for HOL-TestGen + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007 ETH Zurich, Switzerland + * 2013 Achim D. Brucker, Germany + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) + + +(** Major version number. + * + * Modify when incompatible changes are made to published interfaces. + *) +val testgen_ver_major = 1 + +(* Minor version number. + * + * Modify when new functionality is added or new interfaces are defined, + * but all changes are backward compatible. + *) +val testgen_ver_minor = 9 + +(** Patch number. + * + * Modify for every released patch. + *) +val testgen_ver_micro= 1 + +(** Version tag: a string describing the version. + * + * This tag remains " (dev build: )" in the repository so that we can + * always see from "version" that the software has been built from the + * repository rather than a "blessed" distribution. + * + * When rolling a tarball, we automatically replace this text with + * " (build: " for final releases; in prereleases, it becomes + * " (Alpha: )", " (Beta )", etc., as appropriate. + *) +val testgen_ver_tag = " (development build)" + +(** Supported Isabelle version. + * + * If build with a different version, a warning during the build + * process is displayed. + *) + +val isabelle_version = "Isabelle2016-1: December 2016" + +(** URL of the HOL-TestGen Homepage. + * + *) +val testgen_url = "http://www.brucker.ch/projects/hol-testgen/" diff --git a/src/test-gen/src/main/debug/profiling_begin.thy b/src/test-gen/src/main/debug/profiling_begin.thy new file mode 100644 index 0000000..b002a26 --- /dev/null +++ b/src/test-gen/src/main/debug/profiling_begin.thy @@ -0,0 +1,49 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * profiling_begin.thy --- workaround for profiling + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007, ETH Zurich, Switzerland + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) + +theory + profiling_begin +imports + Testing +begin + +declare [[testgen_profiling]] + +end diff --git a/src/test-gen/src/main/debug/profiling_end.thy b/src/test-gen/src/main/debug/profiling_end.thy new file mode 100644 index 0000000..a48e5b3 --- /dev/null +++ b/src/test-gen/src/main/debug/profiling_end.thy @@ -0,0 +1,51 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * profiling_end.thy --- workaround for profiling + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007, ETH Zurich, Switzerland + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) + +theory + profiling_end +imports + max_test List_test List_Verified_test Triangle_test AVL_test RBT_test Sequence_test +begin + +write_clocks "document/time_stats.tex" + +declare [[testgen_profiling = false]] + +end \ No newline at end of file diff --git a/src/test-gen/src/main/isar_setup.ML b/src/test-gen/src/main/isar_setup.ML new file mode 100644 index 0000000..4cef168 --- /dev/null +++ b/src/test-gen/src/main/isar_setup.ML @@ -0,0 +1,178 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * isar_setup.sml --- Isar setup for HOL-TestGen + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2007 ETH Zurich, Switzerland + * 2008-2013 Achim D. Brucker, Germany + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: isar_setup.ML 9653 2013-04-17 23:17:08Z abderrahmane.feliachi $ *) + +(* at the moment, calling store_test_thm without the optional argument works only + in an ProofGeneral session because otherweise the call of Toplevel.pretty_state + results in an empty list \ +*) + +fun store_test_thm name = Toplevel.end_proof (fn _ => fn state => + let + val ctxt = Proof.context_of state; + val _ = if Config.get ctxt TestGen.profiling then + (Clocks.start_clock "unnamed_test_thm"; + Clocks.rename_clock "unnamed_test_thm" name; + Clocks.stop_clock name) + else + () + + val _ = Proof.assert_backward state; + val {context = goal_ctxt, goal, ...} = Proof.goal state + fun up_thy thy = + let + fun add_to_dm name thm thy = + let + val thy = TestEnv.map_data_global (TestEnv.add_test_case (name,thm)) thy + val t = LogThy.get_tc_delta () + val num_of_tc = List.length + (List.filter (fn tc => TestGen.is_test tc) (prems_of thm)) + val _ = writeln (String.concat ["Test theorem (gen_test_cases) '",name,"': ", + Int.toString num_of_tc, + " test cases in ",Time.toString t, " seconds"]) + + val _ = LogThy.append (String.concat [Context.theory_name thy, ", " + ,name, ", " + ,"test case, " + ,Int.toString num_of_tc,", " + , Time.toString t,"\n"]) + in + thy + end + in + thy |> Sign.add_path (space_implode "_" [name]) + |> (Global_Theory.add_thms [((@{binding test_thm}, goal), [])]) + |> snd + |> Sign.parent_path + |> add_to_dm (space_implode "_" [name]) goal + |> Clocks.flush_clocks + end + + in goal_ctxt |> Local_Theory.background_theory up_thy end); + +(*val _ = + Outer_Syntax.command ("store_test_thm", (Keyword.tag_proof Keyword.qed)) "store test state (theorem)" + (Parse.name >> store_test_thm);*) + +val _ = + Outer_Syntax.command @{command_spec "store_test_thm"} "store test state (theorem)" + (Parse.name >> store_test_thm); + + + + + + +(**********************) +fun gen_test_dataT name thy = + let + fun gen_test_data name = + let + val _ = LogThy.start_td_timer () + val _ = if Config.get_global thy TestGen.profiling then + (Clocks.init_clocks thy; + Clocks.start_clock name; + Clocks.start_clock "gen_test_data") + else + () + + val thy = Context.theory_map (TestGen.gen_test_data name) thy + val t = LogThy.get_td_delta () + val thy = if Config.get_global thy TestGen.profiling then + (Clocks.stop_clock "gen_test_data"; + Clocks.stop_clock name; + Clocks.flush_clocks thy) + else + thy + + val data = TestGen.get_test_data thy name + val hyps = TestGen.get_test_hyps thy name + val pos = TestGen.get_pos thy name + val thy = Sign.add_path (space_implode "_" [name]) thy; + val thy = snd(Global_Theory.add_thmss [((@{binding test_hyps},hyps),[])] (thy)); + val thy = snd(Global_Theory.add_thmss [((@{binding test_data},data),[])] (thy)) + val thy = snd(Global_Theory.add_thmss [((@{binding pos},pos),[])] (thy)) + val _ = writeln (String.concat ["Test theorem (gen_test_data) '",name,"': ", + Int.toString (List.length data), + " test cases in ",Time.toString t, " seconds"]) + val _ = if not (null pos) then + writeln ("Warning: There were unsolved POs.") + else + () + + val _ = LogThy.append (String.concat [Context.theory_name thy, ", " + ,name, ", " + ,"test data, " + ,Int.toString (List.length data),", " + ,Time.toString t,"\n"]) + + val thy = Sign.parent_path thy; + in + thy + end + val thy = gen_test_data name + in + thy + end + +(*val _ = + Outer_Syntax.command ("gen_test_data", Keyword.thy_script) "generate test data" + (Parse.name >> (Toplevel.theory o gen_test_dataT));*) + +val _ = + Outer_Syntax.command @{command_spec "gen_test_data"} "generate test data" + (Parse.name >> (Toplevel.theory o gen_test_dataT)); + +(**********************) + +val _ = + (* Outer_Syntax.local_theory_to_proof ("test_spec", Keyword.thy_schematic_goal) "define test specification"*) + Outer_Syntax.local_theory_to_proof @{command_spec "test_spec"} "define test specification" + (Scan.optional (Parse_Spec.opt_thm_name ":" --| + Scan.ahead (Parse_Spec.includes >> K "" || + Parse_Spec.locale_keyword || Parse_Spec.statement_keyword)) Attrib.empty_binding -- + Scan.optional Parse_Spec.includes [] -- + Parse_Spec.general_statement >> (fn ((a, includes), (elems, concl)) => fn lthy => + let val _ = if Config.get lthy TestGen.profiling then Clocks.init_clocks (Proof_Context.theory_of lthy) else () + in + Specification.schematic_theorem_cmd "test_spec" NONE (K I) a includes elems concl false lthy + end)); + diff --git a/src/test-gen/src/main/log.thy b/src/test-gen/src/main/log.thy new file mode 100644 index 0000000..dab6bd7 --- /dev/null +++ b/src/test-gen/src/main/log.thy @@ -0,0 +1,132 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * log_thy.thy --- Simple Logging Framework for HOL-TestGen + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2009 ETH Zurich, Switzerland + * 2009-2013 Achim D. Brucker, Germany + * 2010-2013 Université Paris-Sud, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: log.thy 11032 2015-01-04 10:02:45Z wolff $ *) + +theory + log +imports + version +begin + +text{* Attempts/Elements for a reform: replace the ref's by a proper Isabelle state- +management. by bu*} +ML {* val tc_timer = Attrib.setup_config_int @{binding tc_timer} (K 0) *} +ML_val {* @{assert} (Config.get @{context} tc_timer = 0) *} +ML{* Config.put tc_timer 4 @{context} *} +ML{* + +type timer_config = {tc_timer : Time.time, + spec_time : Time.time, + td_time : Time.time, + log_file : string} + +(* val tc_timer_raw = Config.declare_option "tc_timer"; +ROOT.ML:val quick_and_dirty = Config.bool quick_and_dirty_raw; +goal.ML: if Config.get ctxt quick_and_dirty then *) +*} + +ML{* open Config ; Int 3; *} +(* Conclusion - bu: must be done with a global functor instatiation on + timer_config. I temporarily leave the ref's here in order not + to break to many interfaces ... bu +*) + +text{* Back to the original ...*} + +ML {* +structure LogThy = +struct + + +val tc_timer = Unsynchronized.ref (Timer.startRealTimer ()) +val spec_time = Unsynchronized.ref (Timer.checkRealTimer (!tc_timer)) + +val td_time = Unsynchronized.ref (Timer.checkRealTimer (!tc_timer)) + +val log_file = Unsynchronized.ref ""; + +fun start () = (spec_time := Timer.checkRealTimer (!tc_timer)) + +fun get_tc_delta () = Time.-(Timer.checkRealTimer (!tc_timer),!spec_time) +fun get_td_delta () = Time.-(Timer.checkRealTimer (!tc_timer),!td_time) + +fun start_td_timer () = (td_time := Timer.checkRealTimer (!tc_timer)) + + +fun set_log_file ctxt n = let + val _ = if Config.get ctxt quick_and_dirty + then () + else ((log_file := n);()) + val today = (Date.toString(Date.fromTimeUniv (Time.now())))^" (UTC)"; + val hostname = the_default "hostname not set" (OS.Process.getEnv "HOSTNAME"); +in + if (!log_file) = "" + then () + else + File.write (Path.explode (!log_file)) + ( "# This file was generated automatically \n" + ^"# by HOL-TestGen "^testgen_version^"\n" + ^"# on "^today^"\n" + ^"# Host: "^hostname^"\n" + ^"# \n" + ^"# theory, test case name, type, num. of tests cases/data, time in seconds\n") +end + +fun append s = if (!log_file) = "" then () else File.append (Path.explode (!log_file)) s + + + +fun reset_log_file ctxt = set_log_file ctxt "" + +fun log_thy ctxt thy = +let + val _ = set_log_file ctxt (thy^".csv") + val _ = use_thy thy + val _ = reset_log_file ctxt +in () end; + +end + +val log_thy = LogThy.log_thy + +*} +end diff --git a/src/test-gen/src/main/new_smt_patch/SMT_patch.thy b/src/test-gen/src/main/new_smt_patch/SMT_patch.thy new file mode 100644 index 0000000..55dba53 --- /dev/null +++ b/src/test-gen/src/main/new_smt_patch/SMT_patch.thy @@ -0,0 +1,435 @@ +(* Title: HOL/SMT.thy + Author: Sascha Boehme, TU Muenchen +*) + +section \Bindings to Satisfiability Modulo Theories (SMT) solvers based on SMT-LIB 2\ + +theory SMT_patch +imports HOL.Divides +keywords "smt_status_patch" :: diag +begin + +subsection \A skolemization tactic and proof method\ + +lemma choices: + "\Q. \x. \y ya. Q x y ya \ \f fa. \x. Q x (f x) (fa x)" + "\Q. \x. \y ya yb. Q x y ya yb \ \f fa fb. \x. Q x (f x) (fa x) (fb x)" + "\Q. \x. \y ya yb yc. Q x y ya yb yc \ \f fa fb fc. \x. Q x (f x) (fa x) (fb x) (fc x)" + "\Q. \x. \y ya yb yc yd. Q x y ya yb yc yd \ + \f fa fb fc fd. \x. Q x (f x) (fa x) (fb x) (fc x) (fd x)" + "\Q. \x. \y ya yb yc yd ye. Q x y ya yb yc yd ye \ + \f fa fb fc fd fe. \x. Q x (f x) (fa x) (fb x) (fc x) (fd x) (fe x)" + "\Q. \x. \y ya yb yc yd ye yf. Q x y ya yb yc yd ye yf \ + \f fa fb fc fd fe ff. \x. Q x (f x) (fa x) (fb x) (fc x) (fd x) (fe x) (ff x)" + "\Q. \x. \y ya yb yc yd ye yf yg. Q x y ya yb yc yd ye yf yg \ + \f fa fb fc fd fe ff fg. \x. Q x (f x) (fa x) (fb x) (fc x) (fd x) (fe x) (ff x) (fg x)" + by metis+ + +lemma bchoices: + "\Q. \x \ S. \y ya. Q x y ya \ \f fa. \x \ S. Q x (f x) (fa x)" + "\Q. \x \ S. \y ya yb. Q x y ya yb \ \f fa fb. \x \ S. Q x (f x) (fa x) (fb x)" + "\Q. \x \ S. \y ya yb yc. Q x y ya yb yc \ \f fa fb fc. \x \ S. Q x (f x) (fa x) (fb x) (fc x)" + "\Q. \x \ S. \y ya yb yc yd. Q x y ya yb yc yd \ + \f fa fb fc fd. \x \ S. Q x (f x) (fa x) (fb x) (fc x) (fd x)" + "\Q. \x \ S. \y ya yb yc yd ye. Q x y ya yb yc yd ye \ + \f fa fb fc fd fe. \x \ S. Q x (f x) (fa x) (fb x) (fc x) (fd x) (fe x)" + "\Q. \x \ S. \y ya yb yc yd ye yf. Q x y ya yb yc yd ye yf \ + \f fa fb fc fd fe ff. \x \ S. Q x (f x) (fa x) (fb x) (fc x) (fd x) (fe x) (ff x)" + "\Q. \x \ S. \y ya yb yc yd ye yf yg. Q x y ya yb yc yd ye yf yg \ + \f fa fb fc fd fe ff fg. \x \ S. Q x (f x) (fa x) (fb x) (fc x) (fd x) (fe x) (ff x) (fg x)" + by metis+ + +ML \ +fun moura_tac ctxt = + Atomize_Elim.atomize_elim_tac ctxt THEN' + SELECT_GOAL (Clasimp.auto_tac (ctxt addSIs @{thms choice choices bchoice bchoices}) THEN + ALLGOALS (Metis_Tactic.metis_tac (take 1 ATP_Proof_Reconstruct.partial_type_encs) + ATP_Proof_Reconstruct.default_metis_lam_trans ctxt [] ORELSE' + blast_tac ctxt)) +\ + +method_setup moura = \ + Scan.succeed (SIMPLE_METHOD' o moura_tac) +\ "solve skolemization goals, especially those arising from Z3 proofs" + +hide_fact (open) choices bchoices + + +subsection \Triggers for quantifier instantiation\ + +text \ +Some SMT solvers support patterns as a quantifier instantiation +heuristics. Patterns may either be positive terms (tagged by "pat") +triggering quantifier instantiations -- when the solver finds a +term matching a positive pattern, it instantiates the corresponding +quantifier accordingly -- or negative terms (tagged by "nopat") +inhibiting quantifier instantiations. A list of patterns +of the same kind is called a multipattern, and all patterns in a +multipattern are considered conjunctively for quantifier instantiation. +A list of multipatterns is called a trigger, and their multipatterns +act disjunctively during quantifier instantiation. Each multipattern +should mention at least all quantified variables of the preceding +quantifier block. +\ + +typedecl 'a symb_list + +consts + Symb_Nil :: "'a symb_list" + Symb_Cons :: "'a \ 'a symb_list \ 'a symb_list" + +typedecl pattern + +consts + pat :: "'a \ pattern" + nopat :: "'a \ pattern" + +definition trigger :: "pattern symb_list symb_list \ bool \ bool" where + "trigger _ P = P" + + +subsection \Higher-order encoding\ + +text \ +Application is made explicit for constants occurring with varying +numbers of arguments. This is achieved by the introduction of the +following constant. +\ + +definition fun_app :: "'a \ 'a" where "fun_app f = f" + +text \ +Some solvers support a theory of arrays which can be used to encode +higher-order functions. The following set of lemmas specifies the +properties of such (extensional) arrays. +\ + +lemmas array_rules = ext fun_upd_apply fun_upd_same fun_upd_other fun_upd_upd fun_app_def + + +subsection \Normalization\ + +lemma case_bool_if[abs_def]: "case_bool x y P = (if P then x else y)" + by simp + +lemmas Ex1_def_raw = Ex1_def[abs_def] +lemmas Ball_def_raw = Ball_def[abs_def] +lemmas Bex_def_raw = Bex_def[abs_def] +lemmas abs_if_raw = abs_if[abs_def] +lemmas min_def_raw = min_def[abs_def] +lemmas max_def_raw = max_def[abs_def] + + +subsection \Integer division and modulo for Z3\ + +text \ +The following Z3-inspired definitions are overspecified for the case where \l = 0\. This +Schönheitsfehler is corrected in the \div_as_z3div\ and \mod_as_z3mod\ theorems. +\ + +definition z3div :: "int \ int \ int" where + "z3div k l = (if l \ 0 then k div l else - (k div - l))" + +definition z3mod :: "int \ int \ int" where + "z3mod k l = k mod (if l \ 0 then l else - l)" + +lemma div_as_z3div: + "\k l. k div l = (if l = 0 then 0 else if l > 0 then z3div k l else z3div (- k) (- l))" + by (simp add: z3div_def) + +lemma mod_as_z3mod: + "\k l. k mod l = (if l = 0 then k else if l > 0 then z3mod k l else - z3mod (- k) (- l))" + by (simp add: z3mod_def) + + +subsection \Setup\ + +ML_file "~~/src/HOL/Tools/SMT/smt_util.ML" +ML_file "~~/src/HOL/Tools/SMT/smt_failure.ML" +ML_file "smt_config_patch.ML" +ML_file "~~/src/HOL/Tools/SMT/smt_builtin.ML" +ML_file "~~/src/HOL/Tools/SMT/smt_datatypes.ML" +ML_file "smt_normalize_patch.ML" +ML_file "~~/src/HOL/Tools/SMT/smt_translate.ML" +ML_file "smtlib_patch.ML" +ML_file "smtlib_interface_patch.ML" +ML_file "~~/src/HOL/Tools/SMT/smtlib_proof.ML" +ML_file "~~/src/HOL/Tools/SMT/smtlib_isar.ML" +ML_file "~~/src/HOL/Tools/SMT/z3_proof.ML" +ML_file "~~/src/HOL/Tools/SMT/z3_isar.ML" +ML_file "smt_solver_patch.ML" +(* We currently do not use CVC4 nor veriT so we remove them from the patch *) +(* ML_file "~~/src/HOL/Tools/SMT/cvc4_interface.ML" +ML_file "~~/src/HOL/Tools/SMT/cvc4_proof_parse.ML" +ML_file "~~/src/HOL/Tools/SMT/verit_proof.ML" +ML_file "~~/src/HOL/Tools/SMT/verit_isar.ML" +ML_file "~~/src/HOL/Tools/SMT/verit_proof_parse.ML" *) +ML_file "~~/src/HOL/Tools/SMT/conj_disj_perm.ML" +ML_file "~~/src/HOL/Tools/SMT/z3_interface.ML" +(*ML_file "../SMT/z3_replay_util.ML"*) +ML_file "~~/src/HOL/Tools/SMT/z3_replay_rules.ML" +ML_file "~~/src/HOL/Tools/SMT/z3_replay_methods.ML" +ML_file "z3_replay_patch.ML" +ML_file "z3_model.ML" +ML_file "smt_systems_patch.ML" + +method_setup smt = \ + Scan.optional Attrib.thms [] >> + (fn thms => fn ctxt => + METHOD (fn facts => HEADGOAL (SMT_patch_Solver.smt_tac ctxt (thms @ facts)))) +\ "apply an SMT solver to the current goal" + + +subsection \Configuration\ + +text \ +The current configuration can be printed by the command +\smt_status_patch\, which shows the values of most options. +\ + + +subsection \General configuration options\ + +text \ +The option \smt_solver\ can be used to change the target SMT +solver. The possible values can be obtained from the \smt_status_patch\ +command. +\ + +declare [[smt_solver = z3]] + +text \ +Since SMT solvers are potentially nonterminating, there is a timeout +(given in seconds) to restrict their runtime. +\ + +declare [[smt_timeout = 20]] + +text \ +SMT solvers apply randomized heuristics. In case a problem is not +solvable by an SMT solver, changing the following option might help. +\ + +declare [[smt_random_seed = 1]] + +text \ +In general, the binding to SMT solvers runs as an oracle, i.e, the SMT +solvers are fully trusted without additional checks. The following +option can cause the SMT solver to run in proof-producing mode, giving +a checkable certificate. This is currently only implemented for Z3. +\ + +declare [[smt_oracle = false]] + +text \ +Each SMT solver provides several commandline options to tweak its +behaviour. They can be passed to the solver by setting the following +options. +\ + +declare [[cvc3_options = ""]] +declare [[cvc4_options = "--full-saturate-quant --inst-when=full-last-call --inst-no-entail --term-db-mode=relevant"]] +declare [[verit_options = ""]] +declare [[z3_options = ""]] + +text \ +The SMT method provides an inference mechanism to detect simple triggers +in quantified formulas, which might increase the number of problems +solvable by SMT solvers (note: triggers guide quantifier instantiations +in the SMT solver). To turn it on, set the following option. +\ + +declare [[smt_infer_triggers = false]] + +text \ +Enable the following option to use built-in support for datatypes, +codatatypes, and records in CVC4. Currently, this is implemented only +in oracle mode. +\ + +declare [[cvc4_extensions = false]] + +text \ +Enable the following option to use built-in support for div/mod, datatypes, +and records in Z3. Currently, this is implemented only in oracle mode. +\ + +declare [[z3_extensions = false]] + + +subsection \Certificates\ + +text \ +By setting the option \smt_certificates\ to the name of a file, +all following applications of an SMT solver a cached in that file. +Any further application of the same SMT solver (using the very same +configuration) re-uses the cached certificate instead of invoking the +solver. An empty string disables caching certificates. + +The filename should be given as an explicit path. It is good +practice to use the name of the current theory (with ending +\.certs\ instead of \.thy\) as the certificates file. +Certificate files should be used at most once in a certain theory context, +to avoid race conditions with other concurrent accesses. +\ + +declare [[smt_certificates = ""]] + +text \ +The option \smt_read_only_certificates\ controls whether only +stored certificates are should be used or invocation of an SMT solver +is allowed. When set to \true\, no SMT solver will ever be +invoked and only the existing certificates found in the configured +cache are used; when set to \false\ and there is no cached +certificate for some proposition, then the configured SMT solver is +invoked. +\ + +declare [[smt_read_only_certificates = false]] + + +subsection \Tracing\ + +text \ +The SMT method, when applied, traces important information. To +make it entirely silent, set the following option to \false\. +\ + +declare [[smt_verbose = true]] + +text \ +For tracing the generated problem file given to the SMT solver as +well as the returned result of the solver, the option +\smt_trace\ should be set to \true\. +\ + +declare [[smt_trace = false]] + + +subsection \Schematic rules for Z3 proof reconstruction\ + +text \ +Several prof rules of Z3 are not very well documented. There are two +lemma groups which can turn failing Z3 proof reconstruction attempts +into succeeding ones: the facts in \z3_rule\ are tried prior to +any implemented reconstruction procedure for all uncertain Z3 proof +rules; the facts in \z3_simp\ are only fed to invocations of +the simplifier when reconstructing theory-specific proof steps. +\ + +lemmas [z3_rule] = + refl eq_commute conj_commute disj_commute simp_thms nnf_simps + ring_distribs field_simps times_divide_eq_right times_divide_eq_left + if_True if_False not_not + NO_MATCH_def + +lemma [z3_rule]: + "(P \ Q) = (\ (\ P \ \ Q))" + "(P \ Q) = (\ (\ Q \ \ P))" + "(\ P \ Q) = (\ (P \ \ Q))" + "(\ P \ Q) = (\ (\ Q \ P))" + "(P \ \ Q) = (\ (\ P \ Q))" + "(P \ \ Q) = (\ (Q \ \ P))" + "(\ P \ \ Q) = (\ (P \ Q))" + "(\ P \ \ Q) = (\ (Q \ P))" + by auto + +lemma [z3_rule]: + "(P \ Q) = (Q \ \ P)" + "(\ P \ Q) = (P \ Q)" + "(\ P \ Q) = (Q \ P)" + "(True \ P) = P" + "(P \ True) = True" + "(False \ P) = True" + "(P \ P) = True" + "(\ (A \ \ B)) \ (A \ B)" + by auto + +lemma [z3_rule]: + "((P = Q) \ R) = (R | (Q = (\ P)))" + by auto + +lemma [z3_rule]: + "(\ True) = False" + "(\ False) = True" + "(x = x) = True" + "(P = True) = P" + "(True = P) = P" + "(P = False) = (\ P)" + "(False = P) = (\ P)" + "((\ P) = P) = False" + "(P = (\ P)) = False" + "((\ P) = (\ Q)) = (P = Q)" + "\ (P = (\ Q)) = (P = Q)" + "\ ((\ P) = Q) = (P = Q)" + "(P \ Q) = (Q = (\ P))" + "(P = Q) = ((\ P \ Q) \ (P \ \ Q))" + "(P \ Q) = ((\ P \ \ Q) \ (P \ Q))" + by auto + +lemma [z3_rule]: + "(if P then P else \ P) = True" + "(if \ P then \ P else P) = True" + "(if P then True else False) = P" + "(if P then False else True) = (\ P)" + "(if P then Q else True) = ((\ P) \ Q)" + "(if P then Q else True) = (Q \ (\ P))" + "(if P then Q else \ Q) = (P = Q)" + "(if P then Q else \ Q) = (Q = P)" + "(if P then \ Q else Q) = (P = (\ Q))" + "(if P then \ Q else Q) = ((\ Q) = P)" + "(if \ P then x else y) = (if P then y else x)" + "(if P then (if Q then x else y) else x) = (if P \ (\ Q) then y else x)" + "(if P then (if Q then x else y) else x) = (if (\ Q) \ P then y else x)" + "(if P then (if Q then x else y) else y) = (if P \ Q then x else y)" + "(if P then (if Q then x else y) else y) = (if Q \ P then x else y)" + "(if P then x else if P then y else z) = (if P then x else z)" + "(if P then x else if Q then x else y) = (if P \ Q then x else y)" + "(if P then x else if Q then x else y) = (if Q \ P then x else y)" + "(if P then x = y else x = z) = (x = (if P then y else z))" + "(if P then x = y else y = z) = (y = (if P then x else z))" + "(if P then x = y else z = y) = (y = (if P then x else z))" + by auto + +lemma [z3_rule]: + "0 + (x::int) = x" + "x + 0 = x" + "x + x = 2 * x" + "0 * x = 0" + "1 * x = x" + "x + y = y + x" + by (auto simp add: mult_2) + +lemma [z3_rule]: (* for def-axiom *) + "P = Q \ P \ Q" + "P = Q \ \ P \ \ Q" + "(\ P) = Q \ \ P \ Q" + "(\ P) = Q \ P \ \ Q" + "P = (\ Q) \ \ P \ Q" + "P = (\ Q) \ P \ \ Q" + "P \ Q \ P \ \ Q" + "P \ Q \ \ P \ Q" + "P \ (\ Q) \ P \ Q" + "(\ P) \ Q \ P \ Q" + "P \ Q \ P \ (\ Q)" + "P \ Q \ (\ P) \ Q" + "P \ \ Q \ P \ Q" + "\ P \ Q \ P \ Q" + "P \ y = (if P then x else y)" + "P \ (if P then x else y) = y" + "\ P \ x = (if P then x else y)" + "\ P \ (if P then x else y) = x" + "P \ R \ \ (if P then Q else R)" + "\ P \ Q \ \ (if P then Q else R)" + "\ (if P then Q else R) \ \ P \ Q" + "\ (if P then Q else R) \ P \ R" + "(if P then Q else R) \ \ P \ \ Q" + "(if P then Q else R) \ P \ \ R" + "(if P then \ Q else R) \ \ P \ Q" + "(if P then Q else \ R) \ P \ R" + by auto + +hide_type (open) symb_list pattern +hide_const (open) Symb_Nil Symb_Cons trigger pat nopat fun_app z3div z3mod + +end diff --git a/src/test-gen/src/main/new_smt_patch/smt_config_patch.ML b/src/test-gen/src/main/new_smt_patch/smt_config_patch.ML new file mode 100644 index 0000000..f994e2f --- /dev/null +++ b/src/test-gen/src/main/new_smt_patch/smt_config_patch.ML @@ -0,0 +1,265 @@ +(* Title: HOL/Tools/SMT/smt_config.ML + Author: Sascha Boehme, TU Muenchen + +Configuration options and diagnostic tools for SMT. +*) + +signature SMT_CONFIG = +sig + (*solver*) + type solver_info = { + name: string, + class: Proof.context -> SMT_Util.class, + avail: unit -> bool, + options: Proof.context -> string list } + val add_solver: solver_info -> Context.generic -> Context.generic + val set_solver_options: string * string -> Context.generic -> Context.generic + val is_available: Proof.context -> string -> bool + val available_solvers_of: Proof.context -> string list + val select_solver: string -> Context.generic -> Context.generic + val solver_of: Proof.context -> string + val solver_class_of: Proof.context -> SMT_Util.class + val solver_options_of: Proof.context -> string list + + (*options*) + val oracle: bool Config.T + val timeout: real Config.T + val reconstruction_step_timeout: real Config.T + val random_seed: int Config.T + val read_only_certificates: bool Config.T + val verbose: bool Config.T + val trace: bool Config.T + val statistics: bool Config.T + val monomorph_limit: int Config.T + val monomorph_instances: int Config.T + val infer_triggers: bool Config.T + val debug_files: string Config.T + val sat_solver: string Config.T + + (*tools*) + val with_time_limit: Proof.context -> real Config.T -> ('a -> 'b) -> 'a -> 'b + val with_timeout: Proof.context -> ('a -> 'b) -> 'a -> 'b + + (*diagnostics*) + val trace_msg: Proof.context -> ('a -> string) -> 'a -> unit + val verbose_msg: Proof.context -> ('a -> string) -> 'a -> unit + val statistics_msg: Proof.context -> ('a -> string) -> 'a -> unit + + (*certificates*) + val select_certificates: string -> Context.generic -> Context.generic + val certificates_of: Proof.context -> Cache_IO.cache option + + (*setup*) + val print_setup: Proof.context -> unit +end; + +structure SMT_Config: SMT_CONFIG = +struct + +(* solver *) + +type solver_info = { + name: string, + class: Proof.context -> SMT_Util.class, + avail: unit -> bool, + options: Proof.context -> string list} + +type data = { + solvers: (solver_info * string list) Symtab.table, + solver: string option, + certs: Cache_IO.cache option} + +fun mk_data solvers solver certs: data = {solvers=solvers, solver=solver, certs=certs} + +val empty_data = mk_data Symtab.empty NONE NONE + +fun solvers_of ({solvers, ...}: data) = solvers +fun solver_of ({solver, ...}: data) = solver +fun certs_of ({certs, ...}: data) = certs + +fun map_solvers f ({solvers, solver, certs}: data) = mk_data (f solvers) solver certs +fun map_solver f ({solvers, solver, certs}: data) = mk_data solvers (f solver) certs +fun put_certs c ({solvers, solver, ...}: data) = mk_data solvers solver c + +fun merge_data ({solvers=ss1,solver=s1,certs=c1}: data, {solvers=ss2,solver=s2,certs=c2}: data) = + mk_data (Symtab.merge (K true) (ss1, ss2)) (merge_options (s1, s2)) (merge_options (c1, c2)) + +structure Data = Generic_Data +( + type T = data + val empty = empty_data + val extend = I + val merge = merge_data +) + +fun set_solver_options (name, options) = + let val opts = String.tokens (Symbol.is_ascii_blank o str) options + in Data.map (map_solvers (Symtab.map_entry name (apsnd (K opts)))) end + +fun add_solver (info as {name, ...} : solver_info) context = + if Symtab.defined (solvers_of (Data.get context)) name then + error ("Solver already registered: " ^ quote name) + else + context + |> Data.map (map_solvers (Symtab.update (name, (info, [])))) + |> Context.map_theory (Attrib.setup (Binding.name (name ^ "_options")) + (Scan.lift (@{keyword "="} |-- Args.name) >> + (Thm.declaration_attribute o K o set_solver_options o pair name)) + ("additional command line options for SMT solver " ^ quote name)) + +fun all_solvers_of ctxt = Symtab.keys (solvers_of (Data.get (Context.Proof ctxt))) + +fun solver_name_of ctxt = solver_of (Data.get (Context.Proof ctxt)) + +fun is_available ctxt name = + (case Symtab.lookup (solvers_of (Data.get (Context.Proof ctxt))) name of + SOME ({avail, ...}, _) => avail () + | NONE => false) + +fun available_solvers_of ctxt = + filter (is_available ctxt) (all_solvers_of ctxt) + +fun warn_solver (Context.Proof ctxt) name = + if Context_Position.is_visible ctxt then + warning ("The SMT solver " ^ quote name ^ " is not installed") + else () + | warn_solver _ _ = () + +fun select_solver name context = + let + val ctxt = Context.proof_of context + val upd = Data.map (map_solver (K (SOME name))) + in + if not (member (op =) (all_solvers_of ctxt) name) then + error ("Trying to select unknown solver: " ^ quote name) + else if not (is_available ctxt name) then + (warn_solver context name; upd context) + else upd context + end + +fun no_solver_err () = error "No SMT solver selected" + +fun solver_of ctxt = + (case solver_name_of ctxt of + SOME name => name + | NONE => no_solver_err ()) + +fun solver_info_of default select ctxt = + (case solver_name_of ctxt of + NONE => default () + | SOME name => select (Symtab.lookup (solvers_of (Data.get (Context.Proof ctxt))) name)) + +fun solver_class_of ctxt = + let fun class_of ({class, ...}: solver_info, _) = class ctxt + in solver_info_of no_solver_err (class_of o the) ctxt end + +fun solver_options_of ctxt = + let + fun all_options NONE = [] + | all_options (SOME ({options, ...} : solver_info, opts)) = + opts @ options ctxt + in solver_info_of (K []) all_options ctxt end + +val setup_solver = + Attrib.setup @{binding smt_solver} + (Scan.lift (@{keyword "="} |-- Args.name) >> + (Thm.declaration_attribute o K o select_solver)) + "SMT solver configuration" + + +(* options *) + +val oracle = Attrib.setup_config_bool @{binding smt_oracle} (K true) +val timeout = Attrib.setup_config_real @{binding smt_timeout} (K 30.0) +val reconstruction_step_timeout = Attrib.setup_config_real @{binding smt_reconstruction_step_timeout} (K 10.0) +val random_seed = Attrib.setup_config_int @{binding smt_random_seed} (K 1) +val read_only_certificates = Attrib.setup_config_bool @{binding smt_read_only_certificates} (K false) +val verbose = Attrib.setup_config_bool @{binding smt_verbose} (K true) +val trace = Attrib.setup_config_bool @{binding smt_trace} (K false) +val statistics = Attrib.setup_config_bool @{binding smt_statistics} (K false) +val monomorph_limit = Attrib.setup_config_int @{binding smt_monomorph_limit} (K 10) +val monomorph_instances = Attrib.setup_config_int @{binding smt_monomorph_instances} (K 500) +val infer_triggers = Attrib.setup_config_bool @{binding smt_infer_triggers} (K false) +val debug_files = Attrib.setup_config_string @{binding smt_debug_files} (K "") +val sat_solver = Attrib.setup_config_string @{binding smt_sat_solver} (K "cdclite") + + +(* diagnostics *) + +fun cond_trace flag f x = if flag then tracing ("SMT: " ^ f x) else () + +fun verbose_msg ctxt = cond_trace (Config.get ctxt verbose) +fun trace_msg ctxt = cond_trace (Config.get ctxt trace) +fun statistics_msg ctxt = cond_trace (Config.get ctxt statistics) + + +(* tools *) + +fun with_time_limit ctxt timeout_config f x = + Timeout.apply (seconds (Config.get ctxt timeout_config)) f x + handle Timeout.TIMEOUT t => raise SMT_Failure.SMT SMT_Failure.Time_Out + +fun with_timeout ctxt = with_time_limit ctxt timeout + + +(* certificates *) + +val certificates_of = certs_of o Data.get o Context.Proof + +val get_certificates_path = Option.map (Cache_IO.cache_path_of) o certificates_of + +fun select_certificates name context = context |> Data.map (put_certs ( + if name = "" then NONE + else + Path.explode name + |> Path.append (Resources.master_directory (Context.theory_of context)) + |> SOME o Cache_IO.unsynchronized_init)) + +val setup_certificates = + Attrib.setup @{binding smt_certificates} + (Scan.lift (@{keyword "="} |-- Args.name) >> + (Thm.declaration_attribute o K o select_certificates)) + "SMT certificates configuration" + + +(* setup *) + +val _ = Theory.setup ( + setup_solver #> + setup_certificates) + +fun print_setup ctxt = + let + fun string_of_bool b = if b then "true" else "false" + + val names = available_solvers_of ctxt + val ns = if null names then ["(none)"] else sort_strings names + val n = the_default "(none)" (solver_name_of ctxt) + val opts = solver_options_of ctxt + + val t = string_of_real (Config.get ctxt timeout) + + val certs_filename = + (case get_certificates_path ctxt of + SOME path => Path.print path + | NONE => "(disabled)") + in + Pretty.writeln (Pretty.big_list "SMT setup:" [ + Pretty.str ("Current SMT solver: " ^ n), + Pretty.str ("Current SMT solver options: " ^ space_implode " " opts), + Pretty.str_list "Available SMT solvers: " "" ns, + Pretty.str ("Current timeout: " ^ t ^ " seconds"), + Pretty.str ("With proofs: " ^ + string_of_bool (not (Config.get ctxt oracle))), + Pretty.str ("Certificates cache: " ^ certs_filename), + Pretty.str ("Fixed certificates: " ^ + string_of_bool (Config.get ctxt read_only_certificates))]) + end + +val _ = + Outer_Syntax.command @{command_keyword smt_status_patch} + "show the available SMT solvers, the currently selected SMT solver, \ + \and the values of SMT configuration options" + (Scan.succeed (Toplevel.keep (print_setup o Toplevel.context_of))) + +end; diff --git a/src/test-gen/src/main/new_smt_patch/smt_normalize_patch.ML b/src/test-gen/src/main/new_smt_patch/smt_normalize_patch.ML new file mode 100644 index 0000000..d0e27fc --- /dev/null +++ b/src/test-gen/src/main/new_smt_patch/smt_normalize_patch.ML @@ -0,0 +1,556 @@ +(* Title: HOL/Tools/SMT/smt_normalize.ML + Author: Sascha Boehme, TU Muenchen + +Normalization steps on theorems required by SMT solvers. +*) + +signature SMT_NORMALIZE = +sig + val drop_fact_warning: Proof.context -> thm -> unit + val atomize_conv: Proof.context -> conv + + val special_quant_table: (string * thm) list + val case_bool_entry: string * thm + val abs_min_max_table: (string * thm) list + + type extra_norm = Proof.context -> thm list * thm list -> thm list * thm list + val add_extra_norm: SMT_Util.class * extra_norm -> Context.generic -> Context.generic + val normalize: Proof.context -> thm list -> (int * thm) list +end; + +structure SMT_Normalize: SMT_NORMALIZE = +struct + +fun drop_fact_warning ctxt = + SMT_Config.verbose_msg ctxt (prefix "Warning: dropping assumption: " o + Thm.string_of_thm ctxt) + + +(* general theorem normalizations *) + +(** instantiate elimination rules **) + +local + val (cpfalse, cfalse) = `SMT_Util.mk_cprop (Thm.cterm_of @{context} @{const False}) + + fun inst f ct thm = + let val cv = f (Drule.strip_imp_concl (Thm.cprop_of thm)) + in Thm.instantiate ([], [(dest_Var (Thm.term_of cv), ct)]) thm end +in + +fun instantiate_elim thm = + (case Thm.concl_of thm of + @{const Trueprop} $ Var (_, @{typ bool}) => inst Thm.dest_arg cfalse thm + | Var _ => inst I cpfalse thm + | _ => thm) + +end + + +(** normalize definitions **) + +fun norm_def thm = + (case Thm.prop_of thm of + @{const Trueprop} $ (Const (@{const_name HOL.eq}, _) $ _ $ Abs _) => + norm_def (thm RS @{thm fun_cong}) + | Const (@{const_name Pure.eq}, _) $ _ $ Abs _ => norm_def (thm RS @{thm meta_eq_to_obj_eq}) + | _ => thm) + + +(** atomization **) + +fun atomize_conv ctxt ct = + (case Thm.term_of ct of + @{const Pure.imp} $ _ $ _ => + Conv.binop_conv (atomize_conv ctxt) then_conv Conv.rewr_conv @{thm atomize_imp} + | Const (@{const_name Pure.eq}, _) $ _ $ _ => + Conv.binop_conv (atomize_conv ctxt) then_conv Conv.rewr_conv @{thm atomize_eq} + | Const (@{const_name Pure.all}, _) $ Abs _ => + Conv.binder_conv (atomize_conv o snd) ctxt then_conv Conv.rewr_conv @{thm atomize_all} + | _ => Conv.all_conv) ct + handle CTERM _ => Conv.all_conv ct + +val setup_atomize = + fold SMT_Builtin.add_builtin_fun_ext'' [@{const_name Pure.imp}, @{const_name Pure.eq}, + @{const_name Pure.all}, @{const_name Trueprop}] + + +(** unfold special quantifiers **) + +val special_quant_table = [ + (@{const_name Ex1}, @{thm Ex1_def_raw}), + (@{const_name Ball}, @{thm Ball_def_raw}), + (@{const_name Bex}, @{thm Bex_def_raw})] + +local + fun special_quant (Const (n, _)) = AList.lookup (op =) special_quant_table n + | special_quant _ = NONE + + fun special_quant_conv _ ct = + (case special_quant (Thm.term_of ct) of + SOME thm => Conv.rewr_conv thm + | NONE => Conv.all_conv) ct +in + +fun unfold_special_quants_conv ctxt = + SMT_Util.if_exists_conv (is_some o special_quant) (Conv.top_conv special_quant_conv ctxt) + +val setup_unfolded_quants = fold (SMT_Builtin.add_builtin_fun_ext'' o fst) special_quant_table + +end + + +(** trigger inference **) + +local + (*** check trigger syntax ***) + + fun dest_trigger (Const (@{const_name pat}, _) $ _) = SOME true + | dest_trigger (Const (@{const_name nopat}, _) $ _) = SOME false + | dest_trigger _ = NONE + + fun eq_list [] = false + | eq_list (b :: bs) = forall (equal b) bs + + fun proper_trigger t = + t + |> these o try SMT_Util.dest_symb_list + |> map (map_filter dest_trigger o these o try SMT_Util.dest_symb_list) + |> (fn [] => false | bss => forall eq_list bss) + + fun proper_quant inside f t = + (case t of + Const (@{const_name All}, _) $ Abs (_, _, u) => proper_quant true f u + | Const (@{const_name Ex}, _) $ Abs (_, _, u) => proper_quant true f u + | @{const trigger} $ p $ u => + (if inside then f p else false) andalso proper_quant false f u + | Abs (_, _, u) => proper_quant false f u + | u1 $ u2 => proper_quant false f u1 andalso proper_quant false f u2 + | _ => true) + + fun check_trigger_error ctxt t = + error ("SMT triggers must only occur under quantifier and multipatterns " ^ + "must have the same kind: " ^ Syntax.string_of_term ctxt t) + + fun check_trigger_conv ctxt ct = + if proper_quant false proper_trigger (SMT_Util.term_of ct) then Conv.all_conv ct + else check_trigger_error ctxt (Thm.term_of ct) + + + (*** infer simple triggers ***) + + fun dest_cond_eq ct = + (case Thm.term_of ct of + Const (@{const_name HOL.eq}, _) $ _ $ _ => Thm.dest_binop ct + | @{const HOL.implies} $ _ $ _ => dest_cond_eq (Thm.dest_arg ct) + | _ => raise CTERM ("no equation", [ct])) + + fun get_constrs thy (Type (n, _)) = these (BNF_LFP_Compat.get_constrs thy n) + | get_constrs _ _ = [] + + fun is_constr thy (n, T) = + let fun match (m, U) = m = n andalso Sign.typ_instance thy (T, U) + in can (the o find_first match o get_constrs thy o Term.body_type) T end + + fun is_constr_pat thy t = + (case Term.strip_comb t of + (Free _, []) => true + | (Const c, ts) => is_constr thy c andalso forall (is_constr_pat thy) ts + | _ => false) + + fun is_simp_lhs ctxt t = + (case Term.strip_comb t of + (Const c, ts as _ :: _) => + not (SMT_Builtin.is_builtin_fun_ext ctxt c ts) andalso + forall (is_constr_pat (Proof_Context.theory_of ctxt)) ts + | _ => false) + + fun has_all_vars vs t = + subset (op aconv) (vs, map Free (Term.add_frees t [])) + + fun minimal_pats vs ct = + if has_all_vars vs (Thm.term_of ct) then + (case Thm.term_of ct of + _ $ _ => + (case apply2 (minimal_pats vs) (Thm.dest_comb ct) of + ([], []) => [[ct]] + | (ctss, ctss') => union (eq_set (op aconvc)) ctss ctss') + | _ => []) + else [] + + fun proper_mpat _ _ _ [] = false + | proper_mpat thy gen u cts = + let + val tps = (op ~~) (`gen (map Thm.term_of cts)) + fun some_match u = tps |> exists (fn (t', t) => + Pattern.matches thy (t', u) andalso not (t aconv u)) + in not (Term.exists_subterm some_match u) end + + val pat = SMT_Util.mk_const_pat @{theory} @{const_name pat} SMT_Util.destT1 + fun mk_pat ct = Thm.apply (SMT_Util.instT' ct pat) ct + + fun mk_clist T = + apply2 (Thm.cterm_of @{context}) (SMT_Util.symb_cons_const T, SMT_Util.symb_nil_const T) + fun mk_list (ccons, cnil) f cts = fold_rev (Thm.mk_binop ccons o f) cts cnil + val mk_pat_list = mk_list (mk_clist @{typ pattern}) + val mk_mpat_list = mk_list (mk_clist @{typ "pattern symb_list"}) + fun mk_trigger ctss = mk_mpat_list (mk_pat_list mk_pat) ctss + + val trigger_eq = mk_meta_eq @{lemma "p = trigger t p" by (simp add: trigger_def)} + + fun insert_trigger_conv [] ct = Conv.all_conv ct + | insert_trigger_conv ctss ct = + let + val (ctr, cp) = Thm.dest_binop (Thm.rhs_of trigger_eq) ||> rpair ct + val inst = map (apfst (dest_Var o Thm.term_of)) [cp, (ctr, mk_trigger ctss)] + in Thm.instantiate ([], inst) trigger_eq end + + fun infer_trigger_eq_conv outer_ctxt (ctxt, cvs) ct = + let + val (lhs, rhs) = dest_cond_eq ct + + val vs = map Thm.term_of cvs + val thy = Proof_Context.theory_of ctxt + + fun get_mpats ct = + if is_simp_lhs ctxt (Thm.term_of ct) then minimal_pats vs ct + else [] + val gen = Variable.export_terms ctxt outer_ctxt + val filter_mpats = filter (proper_mpat thy gen (Thm.term_of rhs)) + + in insert_trigger_conv (filter_mpats (get_mpats lhs)) ct end + + fun has_trigger (@{const trigger} $ _ $ _) = true + | has_trigger _ = false + + fun try_trigger_conv cv ct = + if SMT_Util.under_quant has_trigger (SMT_Util.term_of ct) then Conv.all_conv ct + else Conv.try_conv cv ct + + fun infer_trigger_conv ctxt = + if Config.get ctxt SMT_Config.infer_triggers then + try_trigger_conv (SMT_Util.under_quant_conv (infer_trigger_eq_conv ctxt) ctxt) + else Conv.all_conv +in + +fun trigger_conv ctxt = + SMT_Util.prop_conv (check_trigger_conv ctxt then_conv infer_trigger_conv ctxt) + +val setup_trigger = + fold SMT_Builtin.add_builtin_fun_ext'' + [@{const_name pat}, @{const_name nopat}, @{const_name trigger}] + +end + + +(** combined general normalizations **) + +fun gen_normalize1_conv ctxt = + atomize_conv ctxt then_conv + unfold_special_quants_conv ctxt then_conv + Thm.beta_conversion true then_conv + trigger_conv ctxt + +fun gen_normalize1 ctxt = + instantiate_elim #> + norm_def #> + Conv.fconv_rule (Thm.beta_conversion true then_conv Thm.eta_conversion) #> + Drule.forall_intr_vars #> + Conv.fconv_rule (gen_normalize1_conv ctxt) #> + (* Z3 4.3.1 silently normalizes "P --> Q --> R" to "P & Q --> R" *) + Raw_Simplifier.rewrite_rule ctxt @{thms HOL.imp_conjL[symmetric, THEN eq_reflection]} + +fun gen_norm1_safe ctxt (i, thm) = + (case try (gen_normalize1 ctxt) thm of + SOME thm' => SOME (i, thm') + | NONE => (drop_fact_warning ctxt thm; NONE)) + +fun gen_normalize ctxt iwthms = map_filter (gen_norm1_safe ctxt) iwthms + + +(* unfolding of definitions and theory-specific rewritings *) + +fun expand_head_conv cv ct = + (case Thm.term_of ct of + _ $ _ => + Conv.fun_conv (expand_head_conv cv) then_conv + Conv.try_conv (Thm.beta_conversion false) + | _ => cv) ct + + +(** rewrite bool case expressions as if expressions **) + +val case_bool_entry = (@{const_name "bool.case_bool"}, @{thm case_bool_if}) + +local + fun is_case_bool (Const (@{const_name "bool.case_bool"}, _)) = true + | is_case_bool _ = false + + fun unfold_conv _ = + SMT_Util.if_true_conv (is_case_bool o Term.head_of) + (expand_head_conv (Conv.rewr_conv @{thm case_bool_if})) +in + +fun rewrite_case_bool_conv ctxt = + SMT_Util.if_exists_conv is_case_bool (Conv.top_conv unfold_conv ctxt) + +val setup_case_bool = SMT_Builtin.add_builtin_fun_ext'' @{const_name "bool.case_bool"} + +end + + +(** unfold abs, min and max **) + +val abs_min_max_table = [ + (@{const_name min}, @{thm min_def_raw}), + (@{const_name max}, @{thm max_def_raw}), + (@{const_name abs}, @{thm abs_if_raw})] + +local + fun abs_min_max ctxt (Const (n, Type (@{type_name fun}, [T, _]))) = + (case AList.lookup (op =) abs_min_max_table n of + NONE => NONE + | SOME thm => if SMT_Builtin.is_builtin_typ_ext ctxt T then SOME thm else NONE) + | abs_min_max _ _ = NONE + + fun unfold_amm_conv ctxt ct = + (case abs_min_max ctxt (Term.head_of (Thm.term_of ct)) of + SOME thm => expand_head_conv (Conv.rewr_conv thm) + | NONE => Conv.all_conv) ct +in + +fun unfold_abs_min_max_conv ctxt = + SMT_Util.if_exists_conv (is_some o abs_min_max ctxt) (Conv.top_conv unfold_amm_conv ctxt) + +val setup_abs_min_max = fold (SMT_Builtin.add_builtin_fun_ext'' o fst) abs_min_max_table + +end + + +(** embedding of standard natural number operations into integer operations **) + +local + val nat_embedding = @{lemma + "ALL n. nat (int n) = n" + "ALL i. i >= 0 --> int (nat i) = i" + "ALL i. i < 0 --> int (nat i) = 0" + by simp_all} + + val simple_nat_ops = [ + @{const less (nat)}, @{const less_eq (nat)}, + @{const Suc}, @{const plus (nat)}, @{const minus (nat)}] + + val mult_nat_ops = + [@{const times (nat)}, @{const divide (nat)}, @{const modulo (nat)}] + + val nat_ops = simple_nat_ops @ mult_nat_ops + + val nat_consts = nat_ops @ [@{const numeral (nat)}, + @{const zero_class.zero (nat)}, @{const one_class.one (nat)}] + + val nat_int_coercions = [@{const of_nat (int)}, @{const nat}] + + val builtin_nat_ops = nat_int_coercions @ simple_nat_ops + + val is_nat_const = member (op aconv) nat_consts + + fun is_nat_const' @{const of_nat (int)} = true + | is_nat_const' t = is_nat_const t + + val expands = map mk_meta_eq @{lemma + "0 = nat 0" + "1 = nat 1" + "(numeral :: num => nat) = (%i. nat (numeral i))" + "op < = (%a b. int a < int b)" + "op <= = (%a b. int a <= int b)" + "Suc = (%a. nat (int a + 1))" + "op + = (%a b. nat (int a + int b))" + "op - = (%a b. nat (int a - int b))" + "op * = (%a b. nat (int a * int b))" + "op div = (%a b. nat (int a div int b))" + "op mod = (%a b. nat (int a mod int b))" + by (fastforce simp add: nat_mult_distrib nat_div_distrib nat_mod_distrib)+} + + val ints = map mk_meta_eq @{lemma + "int 0 = 0" + "int 1 = 1" + "int (Suc n) = int n + 1" + "int (n + m) = int n + int m" + "int (n - m) = int (nat (int n - int m))" + "int (n * m) = int n * int m" + "int (n div m) = int n div int m" + "int (n mod m) = int n mod int m" + by (auto simp add: zdiv_int zmod_int)} + + val int_if = mk_meta_eq @{lemma + "int (if P then n else m) = (if P then int n else int m)" + by simp} + + fun mk_number_eq ctxt i lhs = + let + val eq = SMT_Util.mk_cequals lhs (Numeral.mk_cnumber @{ctyp int} i) + val tac = + Simplifier.simp_tac (put_simpset HOL_ss ctxt addsimps [@{thm of_nat_numeral [where 'a=int]}]) 1 + in Goal.norm_result ctxt (Goal.prove_internal ctxt [] eq (K tac)) end + + fun ite_conv cv1 cv2 = + Conv.combination_conv (Conv.combination_conv (Conv.arg_conv cv1) cv2) cv2 + + fun int_conv ctxt ct = + (case Thm.term_of ct of + @{const of_nat (int)} $ (n as (@{const numeral (nat)} $ _)) => + Conv.rewr_conv (mk_number_eq ctxt (snd (HOLogic.dest_number n)) ct) + | @{const of_nat (int)} $ _ => + (Conv.rewrs_conv ints then_conv Conv.sub_conv ints_conv ctxt) else_conv + (Conv.rewr_conv int_if then_conv + ite_conv (nat_conv ctxt) (int_conv ctxt)) else_conv + Conv.sub_conv (Conv.top_sweep_conv nat_conv) ctxt + | _ => Conv.no_conv) ct + + and ints_conv ctxt = Conv.top_sweep_conv int_conv ctxt + + and expand_conv ctxt = + SMT_Util.if_conv (is_nat_const o Term.head_of) + (expand_head_conv (Conv.rewrs_conv expands) then_conv ints_conv ctxt) + (int_conv ctxt) + + and nat_conv ctxt = SMT_Util.if_exists_conv is_nat_const' + (Conv.top_sweep_conv expand_conv ctxt) + + val uses_nat_int = Term.exists_subterm (member (op aconv) nat_int_coercions) +in + +val nat_as_int_conv = nat_conv + +fun add_nat_embedding thms = + if exists (uses_nat_int o Thm.prop_of) thms then (thms, nat_embedding) + else (thms, []) + +val setup_nat_as_int = + SMT_Builtin.add_builtin_typ_ext (@{typ nat}, K true) #> + fold (SMT_Builtin.add_builtin_fun_ext' o Term.dest_Const) builtin_nat_ops + +end + + + +(** normalize numerals **) + +local + (* + rewrite Numeral1 into 1 + rewrite - 0 into 0 + *) + + fun is_irregular_number (Const (@{const_name numeral}, _) $ Const (@{const_name num.One}, _)) = + true + | is_irregular_number (Const (@{const_name uminus}, _) $ Const (@{const_name Groups.zero}, _)) = + true + | is_irregular_number _ = false + + fun is_strange_number ctxt t = is_irregular_number t andalso SMT_Builtin.is_builtin_num ctxt t + + val proper_num_ss = + simpset_of (put_simpset HOL_ss @{context} addsimps @{thms Num.numeral_One minus_zero}) + + fun norm_num_conv ctxt = + SMT_Util.if_conv (is_strange_number ctxt) (Simplifier.rewrite (put_simpset proper_num_ss ctxt)) + Conv.no_conv +in + +fun normalize_numerals_conv ctxt = + SMT_Util.if_exists_conv (is_strange_number ctxt) (Conv.top_sweep_conv norm_num_conv ctxt) + +end + + +(** combined unfoldings and rewritings **) + +fun unfold_conv ctxt = + rewrite_case_bool_conv ctxt then_conv + unfold_abs_min_max_conv ctxt then_conv + nat_as_int_conv ctxt then_conv + Thm.beta_conversion true + +fun burrow_ids f ithms = + let + val (is, thms) = split_list ithms + val (thms', extra_thms) = f thms + in (is ~~ thms') @ map (pair ~1) extra_thms end + +fun unfold_polymorph ctxt = map (apsnd (Conv.fconv_rule (unfold_conv ctxt))) +fun unfold_monomorph ctxt ithms = + ithms + |> map (apsnd (Conv.fconv_rule (normalize_numerals_conv ctxt))) + |> burrow_ids add_nat_embedding + + +(* overall normalization *) + +type extra_norm = Proof.context -> thm list * thm list -> thm list * thm list + +structure Extra_Norms = Generic_Data +( + type T = extra_norm SMT_Util.dict + val empty = [] + val extend = I + fun merge data = SMT_Util.dict_merge fst data +) + +fun add_extra_norm (cs, norm) = Extra_Norms.map (SMT_Util.dict_update (cs, norm)) + +fun apply_extra_norms ctxt ithms = + let + val cs = SMT_Config.solver_class_of ctxt + val es = SMT_Util.dict_lookup (Extra_Norms.get (Context.Proof ctxt)) cs + in burrow_ids (fold (fn e => e ctxt) es o rpair []) ithms end + +local + val ignored = member (op =) [@{const_name All}, @{const_name Ex}, + @{const_name Let}, @{const_name If}, @{const_name HOL.eq}] + + val schematic_consts_of = + let + fun collect (@{const trigger} $ p $ t) = collect_trigger p #> collect t + | collect (t $ u) = collect t #> collect u + | collect (Abs (_, _, t)) = collect t + | collect (t as Const (n, _)) = + if not (ignored n) then Monomorph.add_schematic_consts_of t else I + | collect _ = I + and collect_trigger t = + let val dest = these o try SMT_Util.dest_symb_list + in fold (fold collect_pat o dest) (dest t) end + and collect_pat (Const (@{const_name pat}, _) $ t) = collect t + | collect_pat (Const (@{const_name nopat}, _) $ t) = collect t + | collect_pat _ = I + in (fn t => collect t Symtab.empty) end +in + +fun monomorph ctxt xthms = + let val (xs, thms) = split_list xthms + in + map (pair 1) thms + |> Monomorph.monomorph schematic_consts_of ctxt + |> maps (uncurry (map o pair)) o map2 pair xs o map (map snd) + end + +end + +fun normalize ctxt wthms = + wthms + |> map_index I + |> gen_normalize ctxt + |> unfold_polymorph ctxt + |> monomorph ctxt + |> unfold_monomorph ctxt + |> apply_extra_norms ctxt + +val _ = Theory.setup (Context.theory_map ( + setup_atomize #> + setup_unfolded_quants #> + setup_trigger #> + setup_case_bool #> + setup_abs_min_max #> + setup_nat_as_int)) + +end; diff --git a/src/test-gen/src/main/new_smt_patch/smt_solver_patch.ML b/src/test-gen/src/main/new_smt_patch/smt_solver_patch.ML new file mode 100644 index 0000000..e0e4811 --- /dev/null +++ b/src/test-gen/src/main/new_smt_patch/smt_solver_patch.ML @@ -0,0 +1,346 @@ +(* Title: HOL/Tools/SMT/smt_solver.ML + Author: Sascha Boehme, TU Muenchen + +SMT solvers registry and SMT tactic. +*) + +signature SMT_SOLVER = +sig + (*configuration*) + datatype outcome = Unsat | Sat | Unknown + + type parsed_proof = + {outcome: SMT_Failure.failure option, + fact_ids: (int * ((string * ATP_Problem_Generate.stature) * thm)) list option, + atp_proof: unit -> (term, string) ATP_Proof.atp_step list} + + type parsed_model = + {const_defs: (term * term) list} + + type solver_config = + {name: string, + class: Proof.context -> SMT_Util.class, + avail: unit -> bool, + command: unit -> string list, + options: Proof.context -> string list, + smt_options: (string * string) list, + default_max_relevant: int, + outcome: string -> string list -> outcome * string list, + parse_proof: (Proof.context -> SMT_Translate.replay_data -> + ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> + parsed_proof) option, + replay: (Proof.context -> SMT_Translate.replay_data -> string list -> thm) option, + parse_model: (SMT_Translate.replay_data -> string list -> parsed_model) option} + + (*registry*) + val add_solver: solver_config -> theory -> theory + val default_max_relevant: Proof.context -> string -> int + + (*filter*) + val smt_filter: Proof.context -> thm -> ((string * ATP_Problem_Generate.stature) * thm) list -> + int -> Time.time -> parsed_proof + + (*tactic*) + val smt_tac: Proof.context -> thm list -> int -> tactic + val smt_tac': Proof.context -> thm list -> int -> tactic + + exception SMT_Model of parsed_model + val smt_get_model_tac: Proof.context -> thm list -> int -> tactic +end; + +structure SMT_patch_Solver: SMT_SOLVER = +struct + +(* interface to external solvers *) + +local + +val shell_quote = enclose "'" "'"; +val shell_path = shell_quote o File.standard_path; + +fun make_command command options problem_path proof_path = + "(exec 2>&1;" :: map shell_quote (command () @ options) @ + [shell_path problem_path, ")", ">", shell_path proof_path] + |> space_implode " " + +fun with_trace ctxt msg f x = + let val _ = SMT_Config.trace_msg ctxt (fn () => msg) () + in f x end + +fun run ctxt name mk_cmd input = + (case SMT_Config.certificates_of ctxt of + NONE => + if not (SMT_Config.is_available ctxt name) then + error ("The SMT solver " ^ quote name ^ " is not installed") + else if Config.get ctxt SMT_Config.debug_files = "" then + with_trace ctxt ("Invoking SMT solver " ^ quote name ^ " ...") (Cache_IO.run mk_cmd) input + else + let + val base_path = Path.explode (Config.get ctxt SMT_Config.debug_files) + val in_path = Path.ext "smt_in" base_path + val out_path = Path.ext "smt_out" base_path + in Cache_IO.raw_run mk_cmd input in_path out_path end + | SOME certs => + (case Cache_IO.lookup certs input of + (NONE, key) => + if Config.get ctxt SMT_Config.read_only_certificates then + error ("Bad certificate cache: missing certificate") + else + Cache_IO.run_and_cache certs key mk_cmd input + | (SOME output, _) => + with_trace ctxt ("Using cached certificate from " ^ + shell_path (Cache_IO.cache_path_of certs) ^ " ...") I output)) + +(* Z3 returns 1 if "get-model" or "get-model" fails *) +val normal_return_codes = [0, 1] + +fun run_solver ctxt name mk_cmd input = + let + fun pretty tag lines = Pretty.string_of (Pretty.big_list tag (map Pretty.str lines)) + + val _ = SMT_Config.trace_msg ctxt (pretty "Problem:" o split_lines) input + + val {redirected_output = res, output = err, return_code} = + SMT_Config.with_timeout ctxt (run ctxt name mk_cmd) input + val _ = SMT_Config.trace_msg ctxt (pretty "Solver:") err + + val output = fst (take_suffix (equal "") res) + val _ = SMT_Config.trace_msg ctxt (pretty "Result:") output + + val _ = member (op =) normal_return_codes return_code orelse + raise SMT_Failure.SMT (SMT_Failure.Abnormal_Termination return_code) + in output end + +fun trace_assms ctxt = + SMT_Config.trace_msg ctxt (Pretty.string_of o + Pretty.big_list "Assertions:" o map (Thm.pretty_thm ctxt o snd)) + +fun trace_replay_data ({context = ctxt, typs, terms, ...} : SMT_Translate.replay_data) = + let + fun pretty_eq n p = Pretty.block [Pretty.str n, Pretty.str " = ", p] + fun p_typ (n, T) = pretty_eq n (Syntax.pretty_typ ctxt T) + fun p_term (n, t) = pretty_eq n (Syntax.pretty_term ctxt t) + in + SMT_Config.trace_msg ctxt (fn () => + Pretty.string_of (Pretty.big_list "Names:" [ + Pretty.big_list "sorts:" (map p_typ (Symtab.dest typs)), + Pretty.big_list "functions:" (map p_term (Symtab.dest terms))])) () + end + +in + +fun invoke name command smt_options ithms ctxt = + let + val options = SMT_Config.solver_options_of ctxt + val comments = [space_implode " " options] + + val (str, replay_data as {context = ctxt', ...}) = + ithms + |> tap (trace_assms ctxt) + |> SMT_Translate.translate ctxt smt_options comments + ||> tap trace_replay_data + in (run_solver ctxt' name (make_command command options) str, replay_data) end + +end + + +(* configuration *) + +datatype outcome = Unsat | Sat | Unknown + +type parsed_proof = + {outcome: SMT_Failure.failure option, + fact_ids: (int * ((string * ATP_Problem_Generate.stature) * thm)) list option, + atp_proof: unit -> (term, string) ATP_Proof.atp_step list} + +type parsed_model = + {const_defs: (term * term) list} + +type solver_config = + {name: string, + class: Proof.context -> SMT_Util.class, + avail: unit -> bool, + command: unit -> string list, + options: Proof.context -> string list, + smt_options: (string * string) list, + default_max_relevant: int, + outcome: string -> string list -> outcome * string list, + parse_proof: (Proof.context -> SMT_Translate.replay_data -> + ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> + parsed_proof) option, + replay: (Proof.context -> SMT_Translate.replay_data -> string list -> thm) option, + parse_model: (SMT_Translate.replay_data -> string list -> parsed_model) option} + + +(* check well-sortedness *) + +val has_topsort = Term.exists_type (Term.exists_subtype (fn + TFree (_, []) => true + | TVar (_, []) => true + | _ => false)) + +(* top sorts cause problems with atomization *) +fun check_topsort ctxt thm = + if has_topsort (Thm.prop_of thm) then (SMT_Normalize.drop_fact_warning ctxt thm; TrueI) else thm + + +(* registry *) + +type solver_info = { + command: unit -> string list, + smt_options: (string * string) list, + default_max_relevant: int, + parse_proof: Proof.context -> SMT_Translate.replay_data -> + ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> + parsed_proof, + replay: Proof.context -> SMT_Translate.replay_data -> string list -> thm, + parse_model: SMT_Translate.replay_data -> string list -> parsed_model} + +structure Solvers = Generic_Data +( + type T = solver_info Symtab.table + val empty = Symtab.empty + val extend = I + fun merge data = Symtab.merge (K true) data +) + +exception SMT_Model of parsed_model + +local + fun parse_proof outcome parse_proof0 outer_ctxt replay_data xfacts prems concl output = + (case outcome output of + (Unsat, lines) => + (case parse_proof0 of + SOME pp => pp outer_ctxt replay_data xfacts prems concl lines + | NONE => {outcome = NONE, fact_ids = NONE, atp_proof = K []}) + | (result, _) => raise SMT_Failure.SMT (SMT_Failure.Counterexample (result = Sat))) + + fun replay outcome replay0 oracle outer_ctxt + (replay_data as {context = ctxt, ...} : SMT_Translate.replay_data) output = + (case outcome output of + (Unsat, lines) => + if not (Config.get ctxt SMT_Config.oracle) andalso is_some replay0 + then the replay0 outer_ctxt replay_data lines + else oracle () + | (result, _) => raise SMT_Failure.SMT (SMT_Failure.Counterexample (result = Sat))) + + (* TODO: get back models *) + fun parse_model outcome parse_model0 + (replay_data as {context = ctxt, ...} : SMT_Translate.replay_data) output = + (case outcome output of + (Unsat, _) => {const_defs = []} + | (result, ls) => + if ((result = Sat) orelse (result = Unknown)) then + (case parse_model0 of SOME f => f replay_data ls | _ => {const_defs = []}) + else + {const_defs = []}) + + val cfalse = Thm.cterm_of @{context} @{prop False} +in + +fun add_solver ({name, class, avail, command, options, smt_options, default_max_relevant, outcome, + parse_proof = parse_proof0, replay = replay0, parse_model = parse_model0} : solver_config) = + let + fun solver oracle = { + command = command, + smt_options = smt_options, + default_max_relevant = default_max_relevant, + parse_proof = parse_proof (outcome name) parse_proof0, + replay = replay (outcome name) replay0 oracle, + parse_model = parse_model (outcome name) parse_model0} + + val info = {name = name, class = class, avail = avail, options = options} + in + Thm.add_oracle (Binding.name name, K cfalse) #-> (fn (_, oracle) => + Context.theory_map (Solvers.map (Symtab.update_new (name, solver oracle)))) #> + Context.theory_map (SMT_Config.add_solver info) + end + +end + +fun get_info ctxt name = the (Symtab.lookup (Solvers.get (Context.Proof ctxt)) name) + +fun name_and_info_of ctxt = + let val name = SMT_Config.solver_of ctxt + in (name, get_info ctxt name) end + +val default_max_relevant = #default_max_relevant oo get_info + +fun apply_solver_and_replay ctxt thms0 = + let + val thms = map (check_topsort ctxt) thms0 + val (name, {command, smt_options, replay, ...}) = name_and_info_of ctxt + val (output, replay_data) = + invoke name command smt_options (SMT_Normalize.normalize ctxt thms) ctxt + in replay ctxt replay_data output end + +fun apply_solver_and_get_model ctxt thms0 = + let + val thms = map (check_topsort ctxt) thms0 + val (name, {command, smt_options, parse_model, ...}) = name_and_info_of ctxt + val (output, replay_data) = + invoke name command smt_options (SMT_Normalize.normalize ctxt thms) ctxt + (* This is just a current artifact in order to use parse_model inside a tactic. We may clean this up next. *) + in raise SMT_Model (parse_model replay_data output) end + + +(* filter *) + +fun smt_filter ctxt0 goal xfacts i time_limit = + let + val ctxt = ctxt0 |> Config.put SMT_Config.timeout (Time.toReal time_limit) + + val ({context = ctxt, prems, concl, ...}, _) = Subgoal.focus ctxt i NONE goal + fun negate ct = Thm.dest_comb ct ||> Thm.apply @{cterm Not} |-> Thm.apply + val cprop = + (case try negate (Thm.rhs_of (SMT_Normalize.atomize_conv ctxt concl)) of + SOME ct => ct + | NONE => raise SMT_Failure.SMT (SMT_Failure.Other_Failure "cannot atomize goal")) + + val conjecture = Thm.assume cprop + val facts = map snd xfacts + val thms = conjecture :: prems @ facts + val thms' = map (check_topsort ctxt) thms + + val (name, {command, smt_options, parse_proof, ...}) = name_and_info_of ctxt + val (output, replay_data) = + invoke name command smt_options (SMT_Normalize.normalize ctxt thms') ctxt + in + parse_proof ctxt replay_data xfacts (map Thm.prop_of prems) (Thm.term_of concl) output + end + handle SMT_Failure.SMT fail => {outcome = SOME fail, fact_ids = NONE, atp_proof = K []} + + +(* SMT tactic *) + +local + fun str_of ctxt fail = + "Solver " ^ SMT_Config.solver_of ctxt ^ ": " ^ SMT_Failure.string_of_failure fail + + fun safe_solve ctxt facts = SOME (apply_solver_and_replay ctxt facts) + handle + SMT_Failure.SMT (fail as SMT_Failure.Counterexample _) => + (SMT_Config.verbose_msg ctxt (str_of ctxt) fail; NONE) + | SMT_Failure.SMT (fail as SMT_Failure.Time_Out) => + error ("SMT: Solver " ^ quote (SMT_Config.solver_of ctxt) ^ ": " ^ + SMT_Failure.string_of_failure fail ^ " (setting the " ^ + "configuration option " ^ quote (Config.name_of SMT_Config.timeout) ^ " might help)") + | SMT_Failure.SMT fail => error (str_of ctxt fail) + + fun resolve ctxt (SOME thm) = resolve_tac ctxt [thm] 1 + | resolve _ NONE = no_tac + + fun tac prove ctxt rules = + CONVERSION (SMT_Normalize.atomize_conv ctxt) + THEN' resolve_tac ctxt @{thms ccontr} + THEN' SUBPROOF (fn {context = ctxt', prems, ...} => + resolve ctxt' (prove ctxt' (rules @ prems))) ctxt +in + +val smt_tac = tac safe_solve +val smt_tac' = tac (SOME oo apply_solver_and_replay) +val smt_get_model_tac = tac (SOME oo apply_solver_and_get_model) + +end + +end; diff --git a/src/test-gen/src/main/new_smt_patch/smt_systems_patch.ML b/src/test-gen/src/main/new_smt_patch/smt_systems_patch.ML new file mode 100644 index 0000000..22f2ca4 --- /dev/null +++ b/src/test-gen/src/main/new_smt_patch/smt_systems_patch.ML @@ -0,0 +1,159 @@ +(* Title: HOL/Tools/SMT/smt_systems.ML + Author: Sascha Boehme, TU Muenchen + +Setup SMT solvers. +*) + +signature SMT_SYSTEMS = +sig + val cvc4_extensions: bool Config.T + val z3_extensions: bool Config.T +end; + +structure SMT_Systems: SMT_SYSTEMS = +struct + +(* helper functions *) + +fun make_avail name () = getenv (name ^ "_SOLVER") <> "" + +fun make_command name () = [getenv (name ^ "_SOLVER")] + +fun outcome_of unsat sat unknown solver_name line = + if String.isPrefix unsat line then SMT_patch_Solver.Unsat + else if String.isPrefix sat line then SMT_patch_Solver.Sat + else if String.isPrefix unknown line then SMT_patch_Solver.Unknown + else raise SMT_Failure.SMT (SMT_Failure.Other_Failure ("Solver " ^ quote solver_name ^ + " failed -- enable tracing using the " ^ quote (Config.name_of SMT_Config.trace) ^ + " option for details")) + +fun is_blank_or_error_line "" = true + | is_blank_or_error_line s = String.isPrefix "(error " s + +fun on_first_line test_outcome solver_name lines = + let + val split_first = (fn [] => ("", []) | l :: ls => (l, ls)) + val (l, ls) = split_first (snd (take_prefix is_blank_or_error_line lines)) + in (test_outcome solver_name l, ls) end + +fun on_first_non_unsupported_line test_outcome solver_name lines = + on_first_line test_outcome solver_name (filter (curry (op <>) "unsupported") lines) + +(* CVC3 *) + +local + fun cvc3_options ctxt = [ + "-seed", string_of_int (Config.get ctxt SMT_Config.random_seed), + "-lang", "smt2", + "-timeout", string_of_int (Real.ceil (Config.get ctxt SMT_Config.timeout))] +in + +val cvc3: SMT_patch_Solver.solver_config = { + name = "cvc3", + class = K SMTLIB_Interface.smtlibC, + avail = make_avail "CVC3", + command = make_command "CVC3", + options = cvc3_options, + smt_options = [], + default_max_relevant = 400 (* FUDGE *), + outcome = on_first_line (outcome_of "unsat" "sat" "unknown"), + parse_proof = NONE, + replay = NONE, + parse_model = NONE} + +end + +(* CVC4 *) + +val cvc4_extensions = Attrib.setup_config_bool @{binding cvc4_extensions} (K false) + +local + fun cvc4_options ctxt = [ + "--random-seed=" ^ string_of_int (Config.get ctxt SMT_Config.random_seed), + "--lang=smt2", + "--continued-execution", + "--tlimit", string_of_int (Real.ceil (1000.0 * Config.get ctxt SMT_Config.timeout))] + + fun select_class ctxt = + (* We currently do not support CVC4 *) + (* if Config.get ctxt cvc4_extensions then CVC4_Interface.smtlib_cvc4C + else *) SMTLIB_Interface.smtlibC +in + +val cvc4: SMT_patch_Solver.solver_config = { + name = "cvc4", + class = select_class, + avail = make_avail "CVC4", + command = make_command "CVC4", + options = cvc4_options, + smt_options = [(":produce-unsat-cores", "true")], + default_max_relevant = 400 (* FUDGE *), + outcome = on_first_line (outcome_of "unsat" "sat" "unknown"), + parse_proof = NONE, (* SOME (K CVC4_Proof_Parse.parse_proof), *) (* We currently do not support CVC4 *) + replay = NONE, + parse_model = NONE} + +end + +(* veriT *) + +val veriT: SMT_patch_Solver.solver_config = { + name = "verit", + class = K SMTLIB_Interface.smtlibC, + avail = make_avail "VERIT", + command = make_command "VERIT", + options = (fn ctxt => [ + "--proof-version=1", + "--proof-prune", + "--proof-merge", + "--disable-print-success", + "--disable-banner", + "--max-time=" ^ string_of_int (Real.ceil (Config.get ctxt SMT_Config.timeout))]), + smt_options = [(":produce-proofs", "true")], + default_max_relevant = 200 (* FUDGE *), + outcome = on_first_non_unsupported_line (outcome_of "unsat" "sat" + "(error \"status is not unsat.\")"), + parse_proof = NONE, (* SOME (K VeriT_Proof_Parse.parse_proof), *) (* Do not support veriT *) + replay = NONE, + parse_model = NONE} + +(* Z3 *) + +val z3_extensions = Attrib.setup_config_bool @{binding z3_extensions} (K false) + +local + fun z3_options ctxt = + ["smt.random_seed=" ^ string_of_int (Config.get ctxt SMT_Config.random_seed), + "smt.refine_inj_axioms=false", + "-T:" ^ string_of_int (Real.ceil (Config.get ctxt SMT_Config.timeout)), + "-smt2"] + + fun select_class ctxt = + if Config.get ctxt z3_extensions then Z3_Interface.smtlib_z3C else SMTLIB_Interface.smtlibC +in + +val z3: SMT_patch_Solver.solver_config = { + name = "z3", + class = select_class, + avail = make_avail "Z3", + command = make_command "Z3", + options = z3_options, + smt_options = [(":model", "true")], (* Produce model (instead of proof) *) + default_max_relevant = 350 (* FUDGE *), + outcome = on_first_line (outcome_of "unsat" "sat" "unknown"), + parse_proof = SOME Z3_Replay.parse_proof, + replay = SOME Z3_Replay.replay, + parse_model = SOME Z3_Model.parse_model } + +end + + +(* overall setup *) + +val _ = Theory.setup ( + SMT_patch_Solver.add_solver cvc3 #> + SMT_patch_Solver.add_solver cvc4 #> + SMT_patch_Solver.add_solver veriT #> + SMT_patch_Solver.add_solver z3) + +end; diff --git a/src/test-gen/src/main/new_smt_patch/smtlib_interface_patch.ML b/src/test-gen/src/main/new_smt_patch/smtlib_interface_patch.ML new file mode 100644 index 0000000..9507929 --- /dev/null +++ b/src/test-gen/src/main/new_smt_patch/smtlib_interface_patch.ML @@ -0,0 +1,172 @@ +(* Title: HOL/Tools/SMT/smtlib_interface.ML + Author: Sascha Boehme, TU Muenchen + Author: Jasmin Blanchette, TU Muenchen + +Interface to SMT solvers based on the SMT-LIB 2 format. +*) + +signature SMTLIB_INTERFACE = +sig + val smtlibC: SMT_Util.class + val add_logic: int * (term list -> string option) -> Context.generic -> Context.generic + val translate_config: Proof.context -> SMT_Translate.config + val assert_name_of_index: int -> string + val assert_index_of_name: string -> int + val assert_prefix : string +end; + +structure SMTLIB_Interface: SMTLIB_INTERFACE = +struct + +val smtlibC = ["smtlib"] + + +(* builtins *) + +local + fun int_num _ i = SOME (string_of_int i) + + fun is_linear [t] = SMT_Util.is_number t + | is_linear [t, u] = SMT_Util.is_number t orelse SMT_Util.is_number u + | is_linear _ = false + + fun times _ _ ts = + let val mk = Term.list_comb o pair @{const times (int)} + in if is_linear ts then SOME ("*", 2, ts, mk) else NONE end +in + +val setup_builtins = + fold (SMT_Builtin.add_builtin_typ smtlibC) [ + (@{typ bool}, K (SOME "Bool"), K (K NONE)), + (@{typ int}, K (SOME "Int"), int_num)] #> + fold (SMT_Builtin.add_builtin_fun' smtlibC) [ + (@{const True}, "true"), + (@{const False}, "false"), + (@{const Not}, "not"), + (@{const HOL.conj}, "and"), + (@{const HOL.disj}, "or"), + (@{const HOL.implies}, "=>"), + (@{const HOL.eq ('a)}, "="), + (@{const If ('a)}, "ite"), + (@{const less (int)}, "<"), + (@{const less_eq (int)}, "<="), + (@{const uminus (int)}, "-"), + (@{const plus (int)}, "+"), + (@{const minus (int)}, "-")] #> + SMT_Builtin.add_builtin_fun smtlibC + (Term.dest_Const @{const times (int)}, times) + +end + + +(* serialization *) + +(** logic **) + +fun fst_int_ord ((i1, _), (i2, _)) = int_ord (i1, i2) + +structure Logics = Generic_Data +( + type T = (int * (term list -> string option)) list + val empty = [] + val extend = I + fun merge data = Ord_List.merge fst_int_ord data +) + +fun add_logic pf = Logics.map (Ord_List.insert fst_int_ord pf) + +fun choose_logic ctxt ts = + let + fun choose [] = "AUFLIA" + | choose ((_, f) :: fs) = (case f ts of SOME s => s | NONE => choose fs) + in + (case choose (Logics.get (Context.Proof ctxt)) of + "" => "" (* for default Z3 logic, a subset of everything *) + | logic => "(set-logic " ^ logic ^ ")\n") + end + + +(** serialization **) + +fun var i = "?v" ^ string_of_int i + +fun tree_of_sterm l (SMT_Translate.SVar i) = SMTLIB.Sym (var (l - i - 1)) + | tree_of_sterm _ (SMT_Translate.SApp (n, [])) = SMTLIB.Sym n + | tree_of_sterm l (SMT_Translate.SApp (n, ts)) = + SMTLIB.S (SMTLIB.Sym n :: map (tree_of_sterm l) ts) + | tree_of_sterm _ (SMT_Translate.SLet _) = + raise Fail "SMT-LIB: unsupported let expression" + | tree_of_sterm l (SMT_Translate.SQua (q, ss, pats, t)) = + let + val l' = l + length ss + + fun quant_name SMT_Translate.SForall = "forall" + | quant_name SMT_Translate.SExists = "exists" + + fun gen_trees_of_pat keyword ps = + [SMTLIB.Key keyword, SMTLIB.S (map (tree_of_sterm l') ps)] + fun trees_of_pat (SMT_Translate.SPat ps) = gen_trees_of_pat "pattern" ps + | trees_of_pat (SMT_Translate.SNoPat ps) = gen_trees_of_pat "no-pattern" ps + fun tree_of_pats [] t = t + | tree_of_pats pats t = SMTLIB.S (SMTLIB.Sym "!" :: t :: maps trees_of_pat pats) + + val vs = map_index (fn (i, ty) => + SMTLIB.S [SMTLIB.Sym (var (l + i)), SMTLIB.Sym ty]) ss + + val body = t + |> tree_of_sterm l' + |> tree_of_pats pats + in + SMTLIB.S [SMTLIB.Sym (quant_name q), SMTLIB.S vs, body] + end + + +fun sctrarg (sel, typ) = "(" ^ sel ^ " " ^ typ ^ ")" +fun sctr (name, args) = enclose "(" ")" (space_implode " " (name :: map sctrarg args)) +fun sdatatype (name, ctrs) = enclose "(" ")" (space_implode " " (name :: map sctr ctrs)) + +fun string_of_fun (f, (ss, s)) = f ^ " (" ^ space_implode " " ss ^ ") " ^ s + +fun named_sterm s t = SMTLIB.S [SMTLIB.Sym "!", t, SMTLIB.Key "named", SMTLIB.Sym s] + +val assert_prefix = "a" + +fun assert_name_of_index i = assert_prefix ^ string_of_int i +fun assert_index_of_name s = the_default ~1 (Int.fromString (unprefix assert_prefix s)) + +fun sdtyp (fp, dtyps) = + Buffer.add (enclose ("(declare-" ^ BNF_FP_Util.co_prefix fp ^ "datatypes () (") "))\n" + (space_implode "\n " (map sdatatype dtyps))) + +fun serialize smt_options comments {logic, sorts, dtyps, funcs} ts = + let + val unsat_core = member (op =) smt_options (":produce-unsat-cores", "true") + val model = member (op =) smt_options (":model", "true") + in + Buffer.empty + |> fold (Buffer.add o enclose "; " "\n") comments + |> fold (fn (k, v) => Buffer.add ("(set-option " ^ k ^ " " ^ v ^ ")\n")) smt_options + |> Buffer.add logic + |> fold (Buffer.add o enclose "(declare-sort " " 0)\n") (sort fast_string_ord sorts) + |> fold sdtyp (AList.coalesce (op =) dtyps) + |> fold (Buffer.add o enclose "(declare-fun " ")\n" o string_of_fun) + (sort (fast_string_ord o apply2 fst) funcs) + |> fold (fn (i, t) => Buffer.add (enclose "(assert " ")\n" + (SMTLIB.str_of (named_sterm (assert_name_of_index i) (tree_of_sterm 0 t))))) (map_index I ts) + |> Buffer.add "(push)\n(check-sat)\n" (* Workaround to get models, see *) + |> Buffer.add (if unsat_core then "(get-unsat-core)\n" else if model then "(get-model)\n" else "(get-proof)\n") + |> Buffer.content + end + +(* interface *) + +fun translate_config ctxt = { + logic = choose_logic ctxt, + fp_kinds = [], + serialize = serialize} + +val _ = Theory.setup (Context.theory_map + (setup_builtins #> + SMT_Translate.add_config (smtlibC, translate_config))) + +end; diff --git a/src/test-gen/src/main/new_smt_patch/smtlib_patch.ML b/src/test-gen/src/main/new_smt_patch/smtlib_patch.ML new file mode 100644 index 0000000..edc0c05 --- /dev/null +++ b/src/test-gen/src/main/new_smt_patch/smtlib_patch.ML @@ -0,0 +1,202 @@ +(* Title: HOL/Tools/SMT/smtlib.ML + Author: Sascha Boehme, TU Muenchen + +Parsing and generating SMT-LIB 2. +*) + +signature SMTLIB = +sig + exception PARSE of int * string + datatype tree = + Num of int | + Dec of int * int | + Str of string | + Sym of string | + Key of string | + S of tree list + val parse: string list -> tree + val pretty_tree: tree -> Pretty.T + val str_of: tree -> string +end; + +structure SMTLIB: SMTLIB = +struct + +(* data structures *) + +exception PARSE of int * string + +datatype tree = + Num of int | + Dec of int * int | + Str of string | + Sym of string | + Key of string | + S of tree list + +datatype unfinished = None | String of string | Symbol of string + + +(* utilities *) + +fun read_raw pred l cs = + (case take_prefix pred cs of + ([], []) => raise PARSE (l, "empty token") + | ([], c :: _) => raise PARSE (l, "unexpected character " ^ quote c) + | x => x) + + +(* numerals and decimals *) + +fun int_of cs = fst (read_int cs) + +fun read_num l cs = + (case read_raw Symbol.is_ascii_digit l cs of + (cs1, "." :: cs') => + let val (cs2, cs'') = read_raw Symbol.is_ascii_digit l cs' + in (Dec (int_of cs1, int_of cs2), cs'') end + | (cs1, cs2) => (Num (int_of cs1), cs2)) + + +(* binary numbers *) + +fun is_bin c = (c = "0" orelse c = "1") + +fun read_bin l cs = read_raw is_bin l cs |>> Num o fst o read_radix_int 2 + + +(* hex numbers *) + +val is_hex = member (op =) (raw_explode "0123456789abcdefABCDEF") + +fun within c1 c2 c = (ord c1 <= ord c andalso ord c <= ord c2) + +fun unhex i [] = i + | unhex i (c :: cs) = + if within "0" "9" c then unhex (i * 16 + (ord c - ord "0")) cs + else if within "a" "f" c then unhex (i * 16 + (ord c - ord "a" + 10)) cs + else if within "A" "F" c then unhex (i * 16 + (ord c - ord "A" + 10)) cs + else raise Fail ("bad hex character " ^ quote c) + +fun read_hex l cs = read_raw is_hex l cs |>> Num o unhex 0 + + +(* symbols *) + +val symbol_chars = raw_explode "~!@$%^&*_+=<>.?/-" + +fun is_sym c = + Symbol.is_ascii_letter c orelse + Symbol.is_ascii_digit c orelse + member (op =) symbol_chars c + +fun read_sym f l cs = read_raw is_sym l cs |>> f o implode + + +(* quoted tokens *) + +fun read_quoted stop (escape, replacement) cs = + let + fun read _ [] = NONE + | read rs (cs as (c :: cs')) = + if is_prefix (op =) stop cs then + SOME (implode (rev rs), drop (length stop) cs) + else if not (null escape) andalso is_prefix (op =) escape cs then + read (replacement :: rs) (drop (length escape) cs) + else read (c :: rs) cs' + in read [] cs end + +fun read_string cs = read_quoted ["\\", "\""] (["\\", "\\"], "\\") cs +fun read_symbol cs = read_quoted ["|"] ([], "") cs + + +(* core parser *) + +fun read _ [] rest tss = (rest, tss) + | read l ("(" :: cs) None tss = read l cs None ([] :: tss) + | read l (")" :: cs) None (ts1 :: ts2 :: tss) = + read l cs None ((S (rev ts1) :: ts2) :: tss) + | read l ("#" :: "x" :: cs) None (ts :: tss) = + token read_hex l cs ts tss + | read l ("#" :: "b" :: cs) None (ts :: tss) = + token read_bin l cs ts tss + | read l (":" :: cs) None (ts :: tss) = + token (read_sym Key) l cs ts tss + | read l ("\"" :: cs) None (ts :: tss) = + quoted read_string String Str l "" cs ts tss + | read l ("|" :: cs) None (ts :: tss) = + quoted read_symbol Symbol Sym l "" cs ts tss + | read l ((c as "!") :: cs) None (ts :: tss) = + token (fn _ => pair (Sym c)) l cs ts tss + | read l (c :: cs) None (ts :: tss) = + if Symbol.is_ascii_blank c then read l cs None (ts :: tss) + else if Symbol.is_digit c then token read_num l (c :: cs) ts tss + else token (read_sym Sym) l (c :: cs) ts tss + | read l cs (String s) (ts :: tss) = + quoted read_string String Str l s cs ts tss + | read l cs (Symbol s) (ts :: tss) = + quoted read_symbol Symbol Sym l s cs ts tss + | read l _ _ [] = raise PARSE (l, "bad parser state") + +and token f l cs ts tss = + let val (t, cs') = f l cs + in read l cs' None ((t :: ts) :: tss) end + +and quoted r f g l s cs ts tss = + (case r cs of + NONE => (f (s ^ implode cs), ts :: tss) + | SOME (s', cs') => read l cs' None ((g (s ^ s') :: ts) :: tss)) + + + +(* overall parser *) + +fun read_line l line = read l (raw_explode line) + +fun first_line line = + let + fun first i = + if size line <= i then "\n" else + let val c = nth_string line i in + if c = " " then first (i+1) else c + end + in + first 0 + end + +fun add_line line (l, (None, tss)) = + if size line = 0 orelse first_line line = ";" then (l + 1, (None, tss)) + else (l + 1, read_line l line None tss) + | add_line line (l, (unfinished, tss)) = + (l + 1, read_line l line unfinished tss) + +fun finish (_, (None, [[t]])) = t + | finish (l, _) = raise PARSE (l, "bad nesting") + +fun parse lines = finish (fold add_line lines (1, (None, [[]]))) + + +(* pretty printer *) + +fun pretty_tree (Num i) = Pretty.str (string_of_int i) + | pretty_tree (Dec (i, j)) = + Pretty.str (string_of_int i ^ "." ^ string_of_int j) + | pretty_tree (Str s) = + raw_explode s + |> maps (fn "\"" => ["\\", "\""] | "\\" => ["\\", "\\"] | c => [c]) + |> implode + |> enclose "\"" "\"" + |> Pretty.str + | pretty_tree (Sym s) = + if String.isPrefix "(" s (* for bit vector functions *) orelse + forall is_sym (raw_explode s) then + Pretty.str s + else + Pretty.str ("|" ^ s ^ "|") + | pretty_tree (Key s) = Pretty.str (":" ^ s) + | pretty_tree (S trees) = + Pretty.enclose "(" ")" (Pretty.separate "" (map pretty_tree trees)) + +val str_of = Pretty.unformatted_string_of o pretty_tree + +end; diff --git a/src/test-gen/src/main/new_smt_patch/z3_model.ML b/src/test-gen/src/main/new_smt_patch/z3_model.ML new file mode 100644 index 0000000..5c2b0f8 --- /dev/null +++ b/src/test-gen/src/main/new_smt_patch/z3_model.ML @@ -0,0 +1,113 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * z3_model.ML --- a parser for models generated by Z3. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2010 ETH Zurich, Switzerland + * 2008-2013 Achim D. Brucker, Germany + * 2009-2013 Universite Paris-Sud, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id:$ *) + +signature Z3_MODEL = +sig + val parse_model : SMT_Translate.replay_data -> string list -> SMT_patch_Solver.parsed_model +end + + + +structure Z3_Model : Z3_MODEL = +struct + +open SMTLIB_Proof + + +(* Extracting definitions of a variable or a function symbol *) + +(* Non-functional variables: the codomain is empty *) +fun get_def ctxt smt_ctxt (SMTLIB.S [SMTLIB.Sym "define-fun", n, SMTLIB.S [], _, v]) = + let + (* Parsing the variable *) + val (x, smt_ctxt2) = term_of n smt_ctxt + (* For debugging *) + (* val _ = tracing ("Variable: "^(Pretty.string_of (Syntax.pretty_term ctxt x))) *) + (* Parsing its value *) + val (t, smt_ctxt3) = term_of v smt_ctxt2 + (* For debugging *) + (* val _ = tracing ("Value: "^(Pretty.string_of (Syntax.pretty_term ctxt t))) *) + in + SOME (x, t, smt_ctxt3) + end + + (* UNDER PROGRESS *) + (* Functional variables: the codomain is nonempty *) + | get_def ctxt smt_ctxt (SMTLIB.S [SMTLIB.Sym "define-fun", n, SMTLIB.S (absname::args), _, v]) = NONE (* UNDER PROGRESS *) + + (* Other definitions: dismissed for the moment *) + | get_def _ _ v = NONE + + +(* Extracting all the definitions *) + +fun get_defs ctxt smt_ctxt smtlib_defs = + snd ( + List.foldl (fn (def, (smt_ctxt', acc)) => + case get_def ctxt smt_ctxt' def of + NONE => (smt_ctxt', acc) + | SOME (a, b, smt_ctxt'') => (smt_ctxt'', (a,b)::acc)) + (smt_ctxt, []) smtlib_defs + ) + + +(* Top-level function to interpret Z3 models *) + +fun parse_model_main ctxt typs funs lines = + let + (* val _ = tracing "Lines:\n" + val _ = List.app (fn l => tracing (l^"")) lines *) + val res = + case SMTLIB.parse lines of + SMTLIB.S ((SMTLIB.Sym "model") :: vs) => get_defs ctxt (empty_context ctxt typs funs) vs + | ts => raise SMTLIB_PARSE ("bad Z3 model declaration", ts) + in + {const_defs = res} + end + +fun parse_model + ({context = ctxt, typs, terms, ...} : SMT_Translate.replay_data) + output = + parse_model_main ctxt typs terms output + +end diff --git a/src/test-gen/src/main/new_smt_patch/z3_replay_patch.ML b/src/test-gen/src/main/new_smt_patch/z3_replay_patch.ML new file mode 100644 index 0000000..f8d49d8 --- /dev/null +++ b/src/test-gen/src/main/new_smt_patch/z3_replay_patch.ML @@ -0,0 +1,262 @@ +(* Title: HOL/Tools/SMT/z3_replay.ML + Author: Sascha Boehme, TU Muenchen + Author: Jasmin Blanchette, TU Muenchen + +Z3 proof parsing and replay. +*) + +signature Z3_REPLAY = +sig + val parse_proof: Proof.context -> SMT_Translate.replay_data -> + ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> + SMT_patch_Solver.parsed_proof + val replay: Proof.context -> SMT_Translate.replay_data -> string list -> thm +end; + +structure Z3_Replay: Z3_REPLAY = +struct + +fun params_of t = Term.strip_qnt_vars @{const_name Pure.all} t + +fun varify ctxt thm = + let + val maxidx = Thm.maxidx_of thm + 1 + val vs = params_of (Thm.prop_of thm) + val vars = map_index (fn (i, (n, T)) => Var ((n, i + maxidx), T)) vs + in Drule.forall_elim_list (map (Thm.cterm_of ctxt) vars) thm end + +fun add_paramTs names t = + fold2 (fn n => fn (_, T) => AList.update (op =) (n, T)) names (params_of t) + +fun new_fixes ctxt nTs = + let + val (ns, ctxt') = Variable.variant_fixes (replicate (length nTs) "") ctxt + fun mk (n, T) n' = (n, Thm.cterm_of ctxt' (Free (n', T))) + in (ctxt', Symtab.make (map2 mk nTs ns)) end + +fun forall_elim_term ct (Const (@{const_name Pure.all}, _) $ (a as Abs _)) = + Term.betapply (a, Thm.term_of ct) + | forall_elim_term _ qt = raise TERM ("forall_elim'", [qt]) + +fun apply_fixes elim env = fold (elim o the o Symtab.lookup env) + +val apply_fixes_prem = uncurry o apply_fixes Thm.forall_elim +val apply_fixes_concl = apply_fixes forall_elim_term + +fun export_fixes env names = Drule.forall_intr_list (map (the o Symtab.lookup env) names) + +fun under_fixes f ctxt (prems, nthms) names concl = + let + val thms1 = map (varify ctxt) prems + val (ctxt', env) = + add_paramTs names concl [] + |> fold (uncurry add_paramTs o apsnd Thm.prop_of) nthms + |> new_fixes ctxt + val thms2 = map (apply_fixes_prem env) nthms + val t = apply_fixes_concl env names concl + in export_fixes env names (f ctxt' (thms1 @ thms2) t) end + +fun replay_thm ctxt assumed nthms (Z3_Proof.Z3_Step {id, rule, concl, fixes, is_fix_step, ...}) = + if Z3_Proof.is_assumption rule then + (case Inttab.lookup assumed id of + SOME (_, thm) => thm + | NONE => Thm.assume (Thm.cterm_of ctxt concl)) + else + under_fixes (Z3_Replay_Methods.method_for rule) ctxt + (if is_fix_step then (map snd nthms, []) else ([], nthms)) fixes concl + +fun replay_step ctxt assumed (step as Z3_Proof.Z3_Step {id, rule, prems, fixes, ...}) state = + let + val (proofs, stats) = state + val nthms = map (the o Inttab.lookup proofs) prems + val replay = Timing.timing (replay_thm ctxt assumed nthms) + val ({elapsed, ...}, thm) = + SMT_Config.with_time_limit ctxt SMT_Config.reconstruction_step_timeout replay step + handle Timeout.TIMEOUT t => raise SMT_Failure.SMT SMT_Failure.Time_Out + val stats' = Symtab.cons_list (Z3_Proof.string_of_rule rule, Time.toMilliseconds elapsed) stats + in (Inttab.update (id, (fixes, thm)) proofs, stats') end + +local + val remove_trigger = mk_meta_eq @{thm trigger_def} + val remove_fun_app = mk_meta_eq @{thm fun_app_def} + + fun rewrite_conv _ [] = Conv.all_conv + | rewrite_conv ctxt eqs = Simplifier.full_rewrite (empty_simpset ctxt addsimps eqs) + + val rewrite_true_rule = @{lemma "True == ~ False" by simp} + val prep_rules = [@{thm Let_def}, remove_trigger, remove_fun_app, rewrite_true_rule] + + fun rewrite _ [] = I + | rewrite ctxt eqs = Conv.fconv_rule (rewrite_conv ctxt eqs) + + fun lookup_assm assms_net ct = + Z3_Replay_Util.net_instances assms_net ct + |> map (fn ithm as (_, thm) => (ithm, Thm.cprop_of thm aconvc ct)) +in + +fun add_asserted outer_ctxt rewrite_rules assms steps ctxt = + let + val eqs = map (rewrite ctxt [rewrite_true_rule]) rewrite_rules + val eqs' = union Thm.eq_thm eqs prep_rules + + val assms_net = + assms + |> map (apsnd (rewrite ctxt eqs')) + |> map (apsnd (Conv.fconv_rule Thm.eta_conversion)) + |> Z3_Replay_Util.thm_net_of snd + + fun revert_conv ctxt = rewrite_conv ctxt eqs' then_conv Thm.eta_conversion + + fun assume thm ctxt = + let + val ct = Thm.cprem_of thm 1 + val (thm', ctxt') = yield_singleton Assumption.add_assumes ct ctxt + in (thm' RS thm, ctxt') end + + fun add1 id fixes thm1 ((i, th), exact) ((iidths, thms), (ctxt, ptab)) = + let + val (thm, ctxt') = if exact then (Thm.implies_elim thm1 th, ctxt) else assume thm1 ctxt + val thms' = if exact then thms else th :: thms + in (((i, (id, th)) :: iidths, thms'), (ctxt', Inttab.update (id, (fixes, thm)) ptab)) end + + fun add (Z3_Proof.Z3_Step {id, rule, concl, fixes, ...}) + (cx as ((iidths, thms), (ctxt, ptab))) = + if Z3_Proof.is_assumption rule andalso rule <> Z3_Proof.Hypothesis then + let + val ct = Thm.cterm_of ctxt concl + val thm1 = Thm.trivial ct |> Conv.fconv_rule (Conv.arg1_conv (revert_conv outer_ctxt)) + val thm2 = singleton (Variable.export ctxt outer_ctxt) thm1 + in + (case lookup_assm assms_net (Thm.cprem_of thm2 1) of + [] => + let val (thm, ctxt') = assume thm1 ctxt + in ((iidths, thms), (ctxt', Inttab.update (id, (fixes, thm)) ptab)) end + | ithms => fold (add1 id fixes thm1) ithms cx) + end + else + cx + in fold add steps (([], []), (ctxt, Inttab.empty)) end + +end + +(* |- (EX x. P x) = P c |- ~ (ALL x. P x) = ~ P c *) +local + val sk_rules = @{lemma + "c = (SOME x. P x) ==> (EX x. P x) = P c" + "c = (SOME x. ~ P x) ==> (~ (ALL x. P x)) = (~ P c)" + by (metis someI_ex)+} +in + +fun discharge_sk_tac ctxt i st = + (resolve_tac ctxt @{thms trans} i + THEN resolve_tac ctxt sk_rules i + THEN (resolve_tac ctxt @{thms refl} ORELSE' discharge_sk_tac ctxt) (i+1) + THEN resolve_tac ctxt @{thms refl} i) st + +end + +val true_thm = @{lemma "~False" by simp} +fun make_discharge_rules rules = rules @ [@{thm allI}, @{thm refl}, @{thm reflexive}, true_thm] + +val intro_def_rules = @{lemma + "(~ P | P) & (P | ~ P)" + "(P | ~ P) & (~ P | P)" + by fast+} + +fun discharge_assms_tac ctxt rules = + REPEAT + (HEADGOAL (resolve_tac ctxt (intro_def_rules @ rules) ORELSE' + SOLVED' (discharge_sk_tac ctxt))) + +fun discharge_assms ctxt rules thm = + (if Thm.nprems_of thm = 0 then + thm + else + (case Seq.pull (discharge_assms_tac ctxt rules thm) of + SOME (thm', _) => thm' + | NONE => raise THM ("failed to discharge premise", 1, [thm]))) + |> Goal.norm_result ctxt + +fun discharge rules outer_ctxt inner_ctxt = + singleton (Proof_Context.export inner_ctxt outer_ctxt) + #> discharge_assms outer_ctxt (make_discharge_rules rules) + +fun parse_proof outer_ctxt + ({context = ctxt, typs, terms, ll_defs, rewrite_rules, assms} : SMT_Translate.replay_data) + xfacts prems concl output = + let + val (steps, ctxt2) = Z3_Proof.parse typs terms output ctxt + val ((iidths, _), _) = add_asserted outer_ctxt rewrite_rules assms steps ctxt2 + + fun id_of_index i = the_default ~1 (Option.map fst (AList.lookup (op =) iidths i)) + + val conjecture_i = 0 + val prems_i = 1 + val facts_i = prems_i + length prems + + val conjecture_id = id_of_index conjecture_i + val prem_ids = map id_of_index (prems_i upto facts_i - 1) + val fact_ids' = + map_filter (fn (i, (id, _)) => try (apsnd (nth xfacts)) (id, i - facts_i)) iidths + val helper_ids' = map_filter (try (fn (~1, idth) => idth)) iidths + + val fact_helper_ts = + map (fn (_, th) => (ATP_Util.short_thm_name ctxt th, Thm.prop_of th)) helper_ids' @ + map (fn (_, ((s, _), th)) => (s, Thm.prop_of th)) fact_ids' + val fact_helper_ids' = + map (apsnd (ATP_Util.short_thm_name ctxt)) helper_ids' @ map (apsnd (fst o fst)) fact_ids' + in + {outcome = NONE, fact_ids = SOME fact_ids', + atp_proof = fn () => Z3_Isar.atp_proof_of_z3_proof ctxt ll_defs rewrite_rules prems concl + fact_helper_ts prem_ids conjecture_id fact_helper_ids' steps} + end + +fun intermediate_statistics ctxt start total = + SMT_Config.statistics_msg ctxt (fn current => + "Reconstructed " ^ string_of_int current ^ " of " ^ string_of_int total ^ " steps in " ^ + string_of_int (Time.toMilliseconds (#elapsed (Timing.result start))) ^ " ms") + +fun pretty_statistics total stats = + let + fun mean_of is = + let + val len = length is + val mid = len div 2 + in if len mod 2 = 0 then (nth is (mid - 1) + nth is mid) div 2 else nth is mid end + fun pretty_item name p = Pretty.item (Pretty.separate ":" [Pretty.str name, p]) + fun pretty (name, milliseconds) = pretty_item name (Pretty.block (Pretty.separate "," [ + Pretty.str (string_of_int (length milliseconds) ^ " occurrences") , + Pretty.str (string_of_int (mean_of milliseconds) ^ " ms mean time"), + Pretty.str (string_of_int (fold Integer.max milliseconds 0) ^ " ms maximum time"), + Pretty.str (string_of_int (fold Integer.add milliseconds 0) ^ " ms total time")])) + in + Pretty.big_list "Z3 proof reconstruction statistics:" ( + pretty_item "total time" (Pretty.str (string_of_int total ^ " ms")) :: + map pretty (Symtab.dest stats)) + end + +fun replay outer_ctxt + ({context = ctxt, typs, terms, rewrite_rules, assms, ...} : SMT_Translate.replay_data) output = + let + val (steps, ctxt2) = Z3_Proof.parse typs terms output ctxt + val ((_, rules), (ctxt3, assumed)) = add_asserted outer_ctxt rewrite_rules assms steps ctxt2 + val ctxt4 = + ctxt3 + |> put_simpset (Z3_Replay_Util.make_simpset ctxt3 []) + |> Config.put SAT.solver (Config.get ctxt3 SMT_Config.sat_solver) + val len = length steps + val start = Timing.start () + val print_runtime_statistics = intermediate_statistics ctxt4 start len + fun blockwise f (i, x) y = + (if i > 0 andalso i mod 100 = 0 then print_runtime_statistics i else (); f x y) + val (proofs, stats) = + fold_index (blockwise (replay_step ctxt4 assumed)) steps (assumed, Symtab.empty) + val _ = print_runtime_statistics len + val total = Time.toMilliseconds (#elapsed (Timing.result start)) + val (_, Z3_Proof.Z3_Step {id, ...}) = split_last steps + val _ = SMT_Config.statistics_msg ctxt4 (Pretty.string_of o pretty_statistics total) stats + in + Inttab.lookup proofs id |> the |> snd |> discharge rules outer_ctxt ctxt4 + end + +end; diff --git a/src/test-gen/src/main/smt_patch/Old_SMT_patch.thy b/src/test-gen/src/main/smt_patch/Old_SMT_patch.thy new file mode 100644 index 0000000..71c5061 --- /dev/null +++ b/src/test-gen/src/main/smt_patch/Old_SMT_patch.thy @@ -0,0 +1,431 @@ +(* Title: HOL/Library/Old_SMT.thy + Author: Sascha Boehme, TU Muenchen +*) + +section \Old Version of Bindings to Satisfiability Modulo Theories (SMT) solvers\ + +theory Old_SMT_patch +imports HOL.Real (* "~~/src/HOL/Word/Word" *) +keywords "old_smt_status" :: diag +begin + +ML_file "../Old_SMT/old_smt_utils.ML" +ML_file "old_smt_failure_patch.ML" +ML_file "old_smt_config_patch.ML" + + +subsection \Triggers for quantifier instantiation\ + +text \ +Some SMT solvers support patterns as a quantifier instantiation +heuristics. Patterns may either be positive terms (tagged by "pat") +triggering quantifier instantiations -- when the solver finds a +term matching a positive pattern, it instantiates the corresponding +quantifier accordingly -- or negative terms (tagged by "nopat") +inhibiting quantifier instantiations. A list of patterns +of the same kind is called a multipattern, and all patterns in a +multipattern are considered conjunctively for quantifier instantiation. +A list of multipatterns is called a trigger, and their multipatterns +act disjunctively during quantifier instantiation. Each multipattern +should mention at least all quantified variables of the preceding +quantifier block. +\ + +typedecl pattern + +consts + pat :: "'a \ pattern" + nopat :: "'a \ pattern" + +definition trigger :: "pattern list list \ bool \ bool" where "trigger _ P = P" + + +subsection \Quantifier weights\ + +text \ +Weight annotations to quantifiers influence the priority of quantifier +instantiations. They should be handled with care for solvers, which support +them, because incorrect choices of weights might render a problem unsolvable. +\ + +definition weight :: "int \ bool \ bool" where "weight _ P = P" + +text \ +Weights must be non-negative. The value \0\ is equivalent to providing +no weight at all. + +Weights should only be used at quantifiers and only inside triggers (if the +quantifier has triggers). Valid usages of weights are as follows: + +\begin{itemize} +\item +@{term "\x. trigger [[pat (P x)]] (weight 2 (P x))"} +\item +@{term "\x. weight 3 (P x)"} +\end{itemize} +\ + + +subsection \Higher-order encoding\ + +text \ +Application is made explicit for constants occurring with varying +numbers of arguments. This is achieved by the introduction of the +following constant. +\ + +definition fun_app where "fun_app f = f" + +text \ +Some solvers support a theory of arrays which can be used to encode +higher-order functions. The following set of lemmas specifies the +properties of such (extensional) arrays. +\ + +lemmas array_rules = ext fun_upd_apply fun_upd_same fun_upd_other + fun_upd_upd fun_app_def + + +subsection \First-order logic\ + +text \ +Some SMT solvers only accept problems in first-order logic, i.e., +where formulas and terms are syntactically separated. When +translating higher-order into first-order problems, all +uninterpreted constants (those not built-in in the target solver) +are treated as function symbols in the first-order sense. Their +occurrences as head symbols in atoms (i.e., as predicate symbols) are +turned into terms by logically equating such atoms with @{term True}. +For technical reasons, @{term True} and @{term False} occurring inside +terms are replaced by the following constants. +\ + +definition term_true where "term_true = True" +definition term_false where "term_false = False" + + +subsection \Integer division and modulo for Z3\ + +definition z3div :: "int \ int \ int" where + "z3div k l = (if 0 \ l then k div l else -(k div (-l)))" + +definition z3mod :: "int \ int \ int" where + "z3mod k l = (if 0 \ l then k mod l else k mod (-l))" + + +subsection \Setup\ + +ML_file "../Old_SMT/old_smt_builtin.ML" +ML_file "../Old_SMT/old_smt_datatypes.ML" +ML_file "old_smt_normalize_patch.ML" +ML_file "old_smt_translate_patch.ML" +(* This is the file that is actually patched.*) +ML_file "old_smt_solver_patch.ML" +ML_file "../Old_SMT/old_smtlib_interface.ML" +ML_file "old_z3_interface_patch.ML" +ML_file "old_z3_proof_parser_patch.ML" +ML_file "../Old_SMT/old_z3_proof_tools.ML" +ML_file "../Old_SMT/old_z3_proof_literals.ML" +ML_file "../Old_SMT/old_z3_proof_methods.ML" +named_theorems old_z3_simp "simplification rules for Z3 proof reconstruction" +ML_file "old_z3_proof_reconstruction_patch.ML" +ML_file "old_z3_model_patch.ML" +ML_file "old_smt_setup_solvers_patch.ML" + +setup \ + Old_SMT_Config.setup #> + Old_SMT_Normalize.setup #> + Old_SMTLIB_Interface.setup #> + Old_Z3_Interface.setup #> + Old_SMT_Setup_Solvers.setup +\ + +method_setup old_smt = \ + Scan.optional Attrib.thms [] >> + (fn thms => fn ctxt => + METHOD (fn facts => HEADGOAL (Old_SMT_patch_Solver.smt_tac ctxt (thms @ facts)))) +\ "apply an SMT solver to the current goal" + + +subsection \Configuration\ + +text \ +The current configuration can be printed by the command +\old_smt_status\, which shows the values of most options. +\ + + + +subsection \General configuration options\ + +text \ +The option \old_smt_solver\ can be used to change the target SMT +solver. The possible values can be obtained from the \old_smt_status\ +command. + +Due to licensing restrictions, Yices and Z3 are not installed/enabled +by default. Z3 is free for non-commercial applications and can be enabled +by setting the \OLD_Z3_NON_COMMERCIAL\ environment variable to +\yes\. +\ + +declare [[ old_smt_solver = z3 ]] + +text \ +Since SMT solvers are potentially non-terminating, there is a timeout +(given in seconds) to restrict their runtime. A value greater than +120 (seconds) is in most cases not advisable. +\ + +declare [[ old_smt_timeout = 20 ]] + +text \ +SMT solvers apply randomized heuristics. In case a problem is not +solvable by an SMT solver, changing the following option might help. +\ + +declare [[ old_smt_random_seed = 1 ]] + +text \ +In general, the binding to SMT solvers runs as an oracle, i.e, the SMT +solvers are fully trusted without additional checks. The following +option can cause the SMT solver to run in proof-producing mode, giving +a checkable certificate. This is currently only implemented for Z3. +\ + +declare [[ old_smt_oracle = false ]] + +text \ +Each SMT solver provides several commandline options to tweak its +behaviour. They can be passed to the solver by setting the following +options. +\ + +declare [[ old_cvc3_options = "" ]] +declare [[ old_yices_options = "" ]] +declare [[ old_z3_options = "" ]] + +text \ +Enable the following option to use built-in support for datatypes and +records. Currently, this is only implemented for Z3 running in oracle +mode. +\ + +declare [[ old_smt_datatypes = false ]] + +text \ +The SMT method provides an inference mechanism to detect simple triggers +in quantified formulas, which might increase the number of problems +solvable by SMT solvers (note: triggers guide quantifier instantiations +in the SMT solver). To turn it on, set the following option. +\ + +declare [[ old_smt_infer_triggers = false ]] + +text \ +The SMT method monomorphizes the given facts, that is, it tries to +instantiate all schematic type variables with fixed types occurring +in the problem. This is a (possibly nonterminating) fixed-point +construction whose cycles are limited by the following option. +\ + +declare [[ monomorph_max_rounds = 5 ]] + +text \ +In addition, the number of generated monomorphic instances is limited +by the following option. +\ + +declare [[ monomorph_max_new_instances = 500 ]] + + + +subsection \Certificates\ + +text \ +By setting the option \old_smt_certificates\ to the name of a file, +all following applications of an SMT solver a cached in that file. +Any further application of the same SMT solver (using the very same +configuration) re-uses the cached certificate instead of invoking the +solver. An empty string disables caching certificates. + +The filename should be given as an explicit path. It is good +practice to use the name of the current theory (with ending +\.certs\ instead of \.thy\) as the certificates file. +Certificate files should be used at most once in a certain theory context, +to avoid race conditions with other concurrent accesses. +\ + +declare [[ old_smt_certificates = "" ]] + +text \ +The option \old_smt_read_only_certificates\ controls whether only +stored certificates are should be used or invocation of an SMT solver +is allowed. When set to \true\, no SMT solver will ever be +invoked and only the existing certificates found in the configured +cache are used; when set to \false\ and there is no cached +certificate for some proposition, then the configured SMT solver is +invoked. +\ + +declare [[ old_smt_read_only_certificates = false ]] + + + +subsection \Tracing\ + +text \ +The SMT method, when applied, traces important information. To +make it entirely silent, set the following option to \false\. +\ + +declare [[ old_smt_verbose = true ]] + +text \ +For tracing the generated problem file given to the SMT solver as +well as the returned result of the solver, the option +\old_smt_trace\ should be set to \true\. +\ + +declare [[ old_smt_trace = false ]] + +text \ +From the set of assumptions given to the SMT solver, those assumptions +used in the proof are traced when the following option is set to +@{term true}. This only works for Z3 when it runs in non-oracle mode +(see options \old_smt_solver\ and \old_smt_oracle\ above). +\ + +declare [[ old_smt_trace_used_facts = false ]] + + + +subsection \Schematic rules for Z3 proof reconstruction\ + +text \ +Several prof rules of Z3 are not very well documented. There are two +lemma groups which can turn failing Z3 proof reconstruction attempts +into succeeding ones: the facts in \z3_rule\ are tried prior to +any implemented reconstruction procedure for all uncertain Z3 proof +rules; the facts in \z3_simp\ are only fed to invocations of +the simplifier when reconstructing theory-specific proof steps. +\ + +lemmas [old_z3_rule] = + refl eq_commute conj_commute disj_commute simp_thms nnf_simps + ring_distribs field_simps times_divide_eq_right times_divide_eq_left + if_True if_False not_not + +lemma [old_z3_rule]: + "(P \ Q) = (\(\P \ \Q))" + "(P \ Q) = (\(\Q \ \P))" + "(\P \ Q) = (\(P \ \Q))" + "(\P \ Q) = (\(\Q \ P))" + "(P \ \Q) = (\(\P \ Q))" + "(P \ \Q) = (\(Q \ \P))" + "(\P \ \Q) = (\(P \ Q))" + "(\P \ \Q) = (\(Q \ P))" + by auto + +lemma [old_z3_rule]: + "(P \ Q) = (Q \ \P)" + "(\P \ Q) = (P \ Q)" + "(\P \ Q) = (Q \ P)" + "(True \ P) = P" + "(P \ True) = True" + "(False \ P) = True" + "(P \ P) = True" + by auto + +lemma [old_z3_rule]: + "((P = Q) \ R) = (R | (Q = (\P)))" + by auto + +lemma [old_z3_rule]: + "(\True) = False" + "(\False) = True" + "(x = x) = True" + "(P = True) = P" + "(True = P) = P" + "(P = False) = (\P)" + "(False = P) = (\P)" + "((\P) = P) = False" + "(P = (\P)) = False" + "((\P) = (\Q)) = (P = Q)" + "\(P = (\Q)) = (P = Q)" + "\((\P) = Q) = (P = Q)" + "(P \ Q) = (Q = (\P))" + "(P = Q) = ((\P \ Q) \ (P \ \Q))" + "(P \ Q) = ((\P \ \Q) \ (P \ Q))" + by auto + +lemma [old_z3_rule]: + "(if P then P else \P) = True" + "(if \P then \P else P) = True" + "(if P then True else False) = P" + "(if P then False else True) = (\P)" + "(if P then Q else True) = ((\P) \ Q)" + "(if P then Q else True) = (Q \ (\P))" + "(if P then Q else \Q) = (P = Q)" + "(if P then Q else \Q) = (Q = P)" + "(if P then \Q else Q) = (P = (\Q))" + "(if P then \Q else Q) = ((\Q) = P)" + "(if \P then x else y) = (if P then y else x)" + "(if P then (if Q then x else y) else x) = (if P \ (\Q) then y else x)" + "(if P then (if Q then x else y) else x) = (if (\Q) \ P then y else x)" + "(if P then (if Q then x else y) else y) = (if P \ Q then x else y)" + "(if P then (if Q then x else y) else y) = (if Q \ P then x else y)" + "(if P then x else if P then y else z) = (if P then x else z)" + "(if P then x else if Q then x else y) = (if P \ Q then x else y)" + "(if P then x else if Q then x else y) = (if Q \ P then x else y)" + "(if P then x = y else x = z) = (x = (if P then y else z))" + "(if P then x = y else y = z) = (y = (if P then x else z))" + "(if P then x = y else z = y) = (y = (if P then x else z))" + by auto + +lemma [old_z3_rule]: + "0 + (x::int) = x" + "x + 0 = x" + "x + x = 2 * x" + "0 * x = 0" + "1 * x = x" + "x + y = y + x" + by auto + +lemma [old_z3_rule]: (* for def-axiom *) + "P = Q \ P \ Q" + "P = Q \ \P \ \Q" + "(\P) = Q \ \P \ Q" + "(\P) = Q \ P \ \Q" + "P = (\Q) \ \P \ Q" + "P = (\Q) \ P \ \Q" + "P \ Q \ P \ \Q" + "P \ Q \ \P \ Q" + "P \ (\Q) \ P \ Q" + "(\P) \ Q \ P \ Q" + "P \ Q \ P \ (\Q)" + "P \ Q \ (\P) \ Q" + "P \ \Q \ P \ Q" + "\P \ Q \ P \ Q" + "P \ y = (if P then x else y)" + "P \ (if P then x else y) = y" + "\P \ x = (if P then x else y)" + "\P \ (if P then x else y) = x" + "P \ R \ \(if P then Q else R)" + "\P \ Q \ \(if P then Q else R)" + "\(if P then Q else R) \ \P \ Q" + "\(if P then Q else R) \ P \ R" + "(if P then Q else R) \ \P \ \Q" + "(if P then Q else R) \ P \ \R" + "(if P then \Q else R) \ \P \ Q" + "(if P then Q else \R) \ P \ R" + by auto + +ML_file "../Old_SMT/old_smt_real.ML" +(* ML_file "~~/src/HOL/Library/Old_SMT/old_smt_word.ML" *) + +hide_type (open) pattern +hide_const fun_app term_true term_false z3div z3mod +hide_const (open) trigger pat nopat weight + +end diff --git a/src/test-gen/src/main/smt_patch/old_smt_config_patch.ML b/src/test-gen/src/main/smt_patch/old_smt_config_patch.ML new file mode 100644 index 0000000..d498f9b --- /dev/null +++ b/src/test-gen/src/main/smt_patch/old_smt_config_patch.ML @@ -0,0 +1,254 @@ +(* Title: HOL/Library/Old_SMT/old_smt_config.ML + Author: Sascha Boehme, TU Muenchen + +Configuration options and diagnostic tools for SMT. +*) + +signature OLD_SMT_CONFIG = +sig + (*solver*) + type solver_info = { + name: string, + class: Proof.context -> Old_SMT_Utils.class, + avail: unit -> bool, + options: Proof.context -> string list } + val add_solver: solver_info -> Context.generic -> Context.generic + val set_solver_options: string * string -> Context.generic -> Context.generic + val is_available: Proof.context -> string -> bool + val available_solvers_of: Proof.context -> string list + val select_solver: string -> Context.generic -> Context.generic + val solver_of: Proof.context -> string + val solver_class_of: Proof.context -> Old_SMT_Utils.class + val solver_options_of: Proof.context -> string list + + (*options*) + val oracle: bool Config.T + val datatypes: bool Config.T + val timeout: real Config.T + val random_seed: int Config.T + val read_only_certificates: bool Config.T + val verbose: bool Config.T + val trace: bool Config.T + val trace_used_facts: bool Config.T + val monomorph_limit: int Config.T + val monomorph_instances: int Config.T + val infer_triggers: bool Config.T + val filter_only_facts: bool Config.T + val debug_files: string Config.T + + (*tools*) + val with_timeout: Proof.context -> ('a -> 'b) -> 'a -> 'b + + (*diagnostics*) + val trace_msg: Proof.context -> ('a -> string) -> 'a -> unit + val verbose_msg: Proof.context -> ('a -> string) -> 'a -> unit + + (*certificates*) + val select_certificates: string -> Context.generic -> Context.generic + val certificates_of: Proof.context -> Cache_IO.cache option + + (*setup*) + val setup: theory -> theory + val print_setup: Proof.context -> unit +end + +structure Old_SMT_Config: OLD_SMT_CONFIG = +struct + +(* solver *) + +type solver_info = { + name: string, + class: Proof.context -> Old_SMT_Utils.class, + avail: unit -> bool, + options: Proof.context -> string list } + +(* FIXME just one data slot (record) per program unit *) +structure Solvers = Generic_Data +( + type T = (solver_info * string list) Symtab.table * string option + val empty = (Symtab.empty, NONE) + val extend = I + fun merge ((ss1, s1), (ss2, s2)) = + (Symtab.merge (K true) (ss1, ss2), merge_options (s1, s2)) +) + +fun set_solver_options (name, options) = + let val opts = String.tokens (Symbol.is_ascii_blank o str) options + in Solvers.map (apfst (Symtab.map_entry name (apsnd (K opts)))) end + +fun add_solver (info as {name, ...} : solver_info) context = + if Symtab.defined (fst (Solvers.get context)) name then + error ("Solver already registered: " ^ quote name) + else + context + |> Solvers.map (apfst (Symtab.update (name, (info, [])))) + |> Context.map_theory (Attrib.setup (Binding.name ("old_" ^ name ^ "_options")) + (Scan.lift (@{keyword "="} |-- Args.name) >> + (Thm.declaration_attribute o K o set_solver_options o pair name)) + ("additional command line options for SMT solver " ^ quote name)) + +fun all_solvers_of ctxt = Symtab.keys (fst (Solvers.get (Context.Proof ctxt))) + +fun solver_name_of ctxt = snd (Solvers.get (Context.Proof ctxt)) + +fun is_available ctxt name = + (case Symtab.lookup (fst (Solvers.get (Context.Proof ctxt))) name of + SOME ({avail, ...}, _) => avail () + | NONE => false) + +fun available_solvers_of ctxt = + filter (is_available ctxt) (all_solvers_of ctxt) + +fun warn_solver (Context.Proof ctxt) name = + if Context_Position.is_visible ctxt then + warning ("The SMT solver " ^ quote name ^ " is not installed.") + else () + | warn_solver _ _ = (); + +fun select_solver name context = + let + val ctxt = Context.proof_of context + val upd = Solvers.map (apsnd (K (SOME name))) + in + if not (member (op =) (all_solvers_of ctxt) name) then + error ("Trying to select unknown solver: " ^ quote name) + else if not (is_available ctxt name) then + (warn_solver context name; upd context) + else upd context + end + +fun no_solver_err () = error "No SMT solver selected" + +fun solver_of ctxt = + (case solver_name_of ctxt of + SOME name => name + | NONE => no_solver_err ()) + +fun solver_info_of default select ctxt = + (case Solvers.get (Context.Proof ctxt) of + (solvers, SOME name) => select (Symtab.lookup solvers name) + | (_, NONE) => default ()) + +fun solver_class_of ctxt = + let fun class_of ({class, ...}: solver_info, _) = class ctxt + in solver_info_of no_solver_err (class_of o the) ctxt end + +fun solver_options_of ctxt = + let + fun all_options NONE = [] + | all_options (SOME ({options, ...} : solver_info, opts)) = + opts @ options ctxt + in solver_info_of (K []) all_options ctxt end + +val setup_solver = + Attrib.setup @{binding old_smt_solver} + (Scan.lift (@{keyword "="} |-- Args.name) >> + (Thm.declaration_attribute o K o select_solver)) + "SMT solver configuration" + + +(* options *) + +val oracle = Attrib.setup_config_bool @{binding old_smt_oracle} (K true) +val datatypes = Attrib.setup_config_bool @{binding old_smt_datatypes} (K false) +val timeout = Attrib.setup_config_real @{binding old_smt_timeout} (K 30.0) +val random_seed = Attrib.setup_config_int @{binding old_smt_random_seed} (K 1) +val read_only_certificates = Attrib.setup_config_bool @{binding old_smt_read_only_certificates} (K false) +val verbose = Attrib.setup_config_bool @{binding old_smt_verbose} (K true) +val trace = Attrib.setup_config_bool @{binding old_smt_trace} (K false) +val trace_used_facts = Attrib.setup_config_bool @{binding old_smt_trace_used_facts} (K false) +val monomorph_limit = Attrib.setup_config_int @{binding old_smt_monomorph_limit} (K 10) +val monomorph_instances = Attrib.setup_config_int @{binding old_smt_monomorph_instances} (K 500) +val infer_triggers = Attrib.setup_config_bool @{binding old_smt_infer_triggers} (K false) +val filter_only_facts = Attrib.setup_config_bool @{binding old_smt_filter_only_facts} (K false) +val debug_files = Attrib.setup_config_string @{binding old_smt_debug_files} (K "") + + +(* diagnostics *) + +fun cond_trace flag f x = if flag then tracing ("SMT: " ^ f x) else () + +fun verbose_msg ctxt = cond_trace (Config.get ctxt verbose) + +fun trace_msg ctxt = cond_trace (Config.get ctxt trace) + + +(* tools *) + +fun with_timeout ctxt f x = + Timeout.apply (seconds (Config.get ctxt timeout)) f x + handle Timeout.TIMEOUT t => raise Old_SMT_patch_Failure.SMT Old_SMT_patch_Failure.Time_Out + + +(* certificates *) + +(* FIXME just one data slot (record) per program unit *) +structure Certificates = Generic_Data +( + type T = Cache_IO.cache option + val empty = NONE + val extend = I + fun merge (s, _) = s (* FIXME merge options!? *) +) + +val get_certificates_path = + Option.map (Cache_IO.cache_path_of) o Certificates.get o Context.Proof + +fun select_certificates name context = context |> Certificates.put ( + if name = "" then NONE + else + Path.explode name + |> Path.append (Resources.master_directory (Context.theory_of context)) + |> SOME o Cache_IO.unsynchronized_init) + +val certificates_of = Certificates.get o Context.Proof + +val setup_certificates = + Attrib.setup @{binding old_smt_certificates} + (Scan.lift (@{keyword "="} |-- Args.name) >> + (Thm.declaration_attribute o K o select_certificates)) + "SMT certificates configuration" + + +(* setup *) + +val setup = + setup_solver #> + setup_certificates + +fun print_setup ctxt = + let + fun string_of_bool b = if b then "true" else "false" + + val names = available_solvers_of ctxt + val ns = if null names then ["(none)"] else sort_strings names + val n = the_default "(none)" (solver_name_of ctxt) + val opts = solver_options_of ctxt + + val t = string_of_real (Config.get ctxt timeout) + + val certs_filename = + (case get_certificates_path ctxt of + SOME path => Path.print path + | NONE => "(disabled)") + in + Pretty.writeln (Pretty.big_list "SMT setup:" [ + Pretty.str ("Current SMT solver: " ^ n), + Pretty.str ("Current SMT solver options: " ^ space_implode " " opts), + Pretty.str_list "Available SMT solvers: " "" ns, + Pretty.str ("Current timeout: " ^ t ^ " seconds"), + Pretty.str ("With proofs: " ^ + string_of_bool (not (Config.get ctxt oracle))), + Pretty.str ("Certificates cache: " ^ certs_filename), + Pretty.str ("Fixed certificates: " ^ + string_of_bool (Config.get ctxt read_only_certificates))]) + end + +val _ = + Outer_Syntax.command @{command_keyword old_smt_status} + "show the available SMT solvers, the currently selected SMT solver, \ + \and the values of SMT configuration options" + (Scan.succeed (Toplevel.keep (print_setup o Toplevel.context_of))) + +end diff --git a/src/test-gen/src/main/smt_patch/old_smt_failure_patch.ML b/src/test-gen/src/main/smt_patch/old_smt_failure_patch.ML new file mode 100644 index 0000000..2910062 --- /dev/null +++ b/src/test-gen/src/main/smt_patch/old_smt_failure_patch.ML @@ -0,0 +1,61 @@ +(* Title: HOL/Library/Old_SMT/old_smt_failure.ML + Author: Sascha Boehme, TU Muenchen + +Failures and exception of SMT. +*) + +signature OLD_SMT_FAILURE = +sig + type counterexample = { + is_real_cex: bool, + free_constraints: term list, + const_defs: term list} + datatype failure = + Counterexample of counterexample | + Time_Out | + Out_Of_Memory | + Abnormal_Termination of int | + Other_Failure of string + val pretty_counterexample: Proof.context -> counterexample -> Pretty.T + val string_of_failure: Proof.context -> failure -> string + exception SMT of failure +end + +structure Old_SMT_patch_Failure: OLD_SMT_FAILURE = +struct + +type counterexample = { + is_real_cex: bool, + free_constraints: term list, + const_defs: term list} + +datatype failure = + Counterexample of counterexample | + Time_Out | + Out_Of_Memory | + Abnormal_Termination of int | + Other_Failure of string + +fun pretty_counterexample ctxt {is_real_cex, free_constraints, const_defs} = + let + val msg = + if is_real_cex then "Counterexample found (possibly spurious)" + else "Potential counterexample found" + in + if null free_constraints andalso null const_defs then Pretty.str msg + else + Pretty.big_list (msg ^ ":") + (map (Syntax.pretty_term ctxt) (free_constraints @ const_defs)) + end + +fun string_of_failure ctxt (Counterexample cex) = + Pretty.string_of (pretty_counterexample ctxt cex) + | string_of_failure _ Time_Out = "Timed out" + | string_of_failure _ Out_Of_Memory = "Ran out of memory" + | string_of_failure _ (Abnormal_Termination err) = + "Solver terminated abnormally with error code " ^ string_of_int err + | string_of_failure _ (Other_Failure msg) = msg + +exception SMT of failure + +end diff --git a/src/test-gen/src/main/smt_patch/old_smt_normalize_patch.ML b/src/test-gen/src/main/smt_patch/old_smt_normalize_patch.ML new file mode 100644 index 0000000..fbc3b50 --- /dev/null +++ b/src/test-gen/src/main/smt_patch/old_smt_normalize_patch.ML @@ -0,0 +1,652 @@ +(* Title: HOL/Library/Old_SMT/old_smt_normalize.ML + Author: Sascha Boehme, TU Muenchen + +Normalization steps on theorems required by SMT solvers. +*) + +signature OLD_SMT_NORMALIZE = +sig + val drop_fact_warning: Proof.context -> thm -> unit + val atomize_conv: Proof.context -> conv + type extra_norm = Proof.context -> thm list * thm list -> thm list * thm list + val add_extra_norm: Old_SMT_Utils.class * extra_norm -> Context.generic -> + Context.generic + val normalize: (int * (int option * thm)) list -> Proof.context -> + (int * thm) list * Proof.context + val setup: theory -> theory +end + +structure Old_SMT_Normalize: OLD_SMT_NORMALIZE = +struct + +fun drop_fact_warning ctxt = + Old_SMT_Config.verbose_msg ctxt (prefix "Warning: dropping assumption: " o + Thm.string_of_thm ctxt) + + +(* general theorem normalizations *) + +(** instantiate elimination rules **) + +local + val (cpfalse, cfalse) = + `Old_SMT_Utils.mk_cprop (Thm.cterm_of @{context} @{const False}) + + fun inst f ct thm = + let val cv = f (Drule.strip_imp_concl (Thm.cprop_of thm)) + in Thm.instantiate ([], [(dest_Var (Thm.term_of cv), ct)]) thm end +in + +fun instantiate_elim thm = + (case Thm.concl_of thm of + @{const Trueprop} $ Var (_, @{typ bool}) => inst Thm.dest_arg cfalse thm + | Var _ => inst I cpfalse thm + | _ => thm) + +end + + +(** normalize definitions **) + +fun norm_def thm = + (case Thm.prop_of thm of + @{const Trueprop} $ (Const (@{const_name HOL.eq}, _) $ _ $ Abs _) => + norm_def (thm RS @{thm fun_cong}) + | Const (@{const_name Pure.eq}, _) $ _ $ Abs _ => + norm_def (thm RS @{thm meta_eq_to_obj_eq}) + | _ => thm) + + +(** atomization **) + +fun atomize_conv ctxt ct = + (case Thm.term_of ct of + @{const Pure.imp} $ _ $ _ => + Conv.binop_conv (atomize_conv ctxt) then_conv + Conv.rewr_conv @{thm atomize_imp} + | Const (@{const_name Pure.eq}, _) $ _ $ _ => + Conv.binop_conv (atomize_conv ctxt) then_conv + Conv.rewr_conv @{thm atomize_eq} + | Const (@{const_name Pure.all}, _) $ Abs _ => + Conv.binder_conv (atomize_conv o snd) ctxt then_conv + Conv.rewr_conv @{thm atomize_all} + | _ => Conv.all_conv) ct + +val setup_atomize = + fold Old_SMT_Builtin.add_builtin_fun_ext'' [@{const_name Pure.imp}, + @{const_name Pure.eq}, @{const_name Pure.all}, @{const_name Trueprop}] + + +(** unfold special quantifiers **) + +local + val ex1_def = mk_meta_eq @{lemma + "Ex1 = (%P. EX x. P x & (ALL y. P y --> y = x))" + by (rule ext) (simp only: Ex1_def)} + + val ball_def = mk_meta_eq @{lemma "Ball = (%A P. ALL x. x : A --> P x)" + by (rule ext)+ (rule Ball_def)} + + val bex_def = mk_meta_eq @{lemma "Bex = (%A P. EX x. x : A & P x)" + by (rule ext)+ (rule Bex_def)} + + val special_quants = [(@{const_name Ex1}, ex1_def), + (@{const_name Ball}, ball_def), (@{const_name Bex}, bex_def)] + + fun special_quant (Const (n, _)) = AList.lookup (op =) special_quants n + | special_quant _ = NONE + + fun special_quant_conv _ ct = + (case special_quant (Thm.term_of ct) of + SOME thm => Conv.rewr_conv thm + | NONE => Conv.all_conv) ct +in + +fun unfold_special_quants_conv ctxt = + Old_SMT_Utils.if_exists_conv (is_some o special_quant) + (Conv.top_conv special_quant_conv ctxt) + +val setup_unfolded_quants = + fold (Old_SMT_Builtin.add_builtin_fun_ext'' o fst) special_quants + +end + + +(** trigger inference **) + +local + (*** check trigger syntax ***) + + fun dest_trigger (Const (@{const_name pat}, _) $ _) = SOME true + | dest_trigger (Const (@{const_name nopat}, _) $ _) = SOME false + | dest_trigger _ = NONE + + fun eq_list [] = false + | eq_list (b :: bs) = forall (equal b) bs + + fun proper_trigger t = + t + |> these o try HOLogic.dest_list + |> map (map_filter dest_trigger o these o try HOLogic.dest_list) + |> (fn [] => false | bss => forall eq_list bss) + + fun proper_quant inside f t = + (case t of + Const (@{const_name All}, _) $ Abs (_, _, u) => proper_quant true f u + | Const (@{const_name Ex}, _) $ Abs (_, _, u) => proper_quant true f u + | @{const trigger} $ p $ u => + (if inside then f p else false) andalso proper_quant false f u + | Abs (_, _, u) => proper_quant false f u + | u1 $ u2 => proper_quant false f u1 andalso proper_quant false f u2 + | _ => true) + + fun check_trigger_error ctxt t = + error ("SMT triggers must only occur under quantifier and multipatterns " ^ + "must have the same kind: " ^ Syntax.string_of_term ctxt t) + + fun check_trigger_conv ctxt ct = + if proper_quant false proper_trigger (Old_SMT_Utils.term_of ct) then + Conv.all_conv ct + else check_trigger_error ctxt (Thm.term_of ct) + + + (*** infer simple triggers ***) + + fun dest_cond_eq ct = + (case Thm.term_of ct of + Const (@{const_name HOL.eq}, _) $ _ $ _ => Thm.dest_binop ct + | @{const HOL.implies} $ _ $ _ => dest_cond_eq (Thm.dest_arg ct) + | _ => raise CTERM ("no equation", [ct])) + + fun get_constrs thy (Type (n, _)) = these (Old_Datatype_Data.get_constrs thy n) + | get_constrs _ _ = [] + + fun is_constr thy (n, T) = + let fun match (m, U) = m = n andalso Sign.typ_instance thy (T, U) + in can (the o find_first match o get_constrs thy o Term.body_type) T end + + fun is_constr_pat thy t = + (case Term.strip_comb t of + (Free _, []) => true + | (Const c, ts) => is_constr thy c andalso forall (is_constr_pat thy) ts + | _ => false) + + fun is_simp_lhs ctxt t = + (case Term.strip_comb t of + (Const c, ts as _ :: _) => + not (Old_SMT_Builtin.is_builtin_fun_ext ctxt c ts) andalso + forall (is_constr_pat (Proof_Context.theory_of ctxt)) ts + | _ => false) + + fun has_all_vars vs t = + subset (op aconv) (vs, map Free (Term.add_frees t [])) + + fun minimal_pats vs ct = + if has_all_vars vs (Thm.term_of ct) then + (case Thm.term_of ct of + _ $ _ => + (case apply2 (minimal_pats vs) (Thm.dest_comb ct) of + ([], []) => [[ct]] + | (ctss, ctss') => union (eq_set (op aconvc)) ctss ctss') + | _ => []) + else [] + + fun proper_mpat _ _ _ [] = false + | proper_mpat thy gen u cts = + let + val tps = (op ~~) (`gen (map Thm.term_of cts)) + fun some_match u = tps |> exists (fn (t', t) => + Pattern.matches thy (t', u) andalso not (t aconv u)) + in not (Term.exists_subterm some_match u) end + + val pat = + Old_SMT_Utils.mk_const_pat @{theory} @{const_name Old_SMT_patch.pat} Old_SMT_Utils.destT1 + fun mk_pat ct = Thm.apply (Old_SMT_Utils.instT' ct pat) ct + + fun mk_clist T = apply2 (Thm.cterm_of @{context}) (HOLogic.cons_const T, HOLogic.nil_const T) + fun mk_list (ccons, cnil) f cts = fold_rev (Thm.mk_binop ccons o f) cts cnil + val mk_pat_list = mk_list (mk_clist @{typ Old_SMT_patch.pattern}) + val mk_mpat_list = mk_list (mk_clist @{typ "Old_SMT_patch.pattern list"}) + fun mk_trigger ctss = mk_mpat_list (mk_pat_list mk_pat) ctss + + val trigger_eq = + mk_meta_eq @{lemma "p = Old_SMT_patch.trigger t p" by (simp add: trigger_def)} + + fun insert_trigger_conv [] ct = Conv.all_conv ct + | insert_trigger_conv ctss ct = + let val (ctr, cp) = Thm.dest_binop (Thm.rhs_of trigger_eq) ||> rpair ct + in Thm.instantiate ([], map (apfst (dest_Var o Thm.term_of)) [cp, (ctr, mk_trigger ctss)]) trigger_eq end + + fun infer_trigger_eq_conv outer_ctxt (ctxt, cvs) ct = + let + val (lhs, rhs) = dest_cond_eq ct + + val vs = map Thm.term_of cvs + val thy = Proof_Context.theory_of ctxt + + fun get_mpats ct = + if is_simp_lhs ctxt (Thm.term_of ct) then minimal_pats vs ct + else [] + val gen = Variable.export_terms ctxt outer_ctxt + val filter_mpats = filter (proper_mpat thy gen (Thm.term_of rhs)) + + in insert_trigger_conv (filter_mpats (get_mpats lhs)) ct end + + fun has_trigger (@{const Old_SMT_patch.trigger} $ _ $ _) = true + | has_trigger _ = false + + fun try_trigger_conv cv ct = + if Old_SMT_Utils.under_quant has_trigger (Old_SMT_Utils.term_of ct) then + Conv.all_conv ct + else Conv.try_conv cv ct + + fun infer_trigger_conv ctxt = + if Config.get ctxt Old_SMT_Config.infer_triggers then + try_trigger_conv + (Old_SMT_Utils.under_quant_conv (infer_trigger_eq_conv ctxt) ctxt) + else Conv.all_conv +in + +fun trigger_conv ctxt = + Old_SMT_Utils.prop_conv + (check_trigger_conv ctxt then_conv infer_trigger_conv ctxt) + +val setup_trigger = + fold Old_SMT_Builtin.add_builtin_fun_ext'' + [@{const_name Old_SMT_patch.pat}, @{const_name Old_SMT_patch.nopat}, @{const_name Old_SMT_patch.trigger}] + +end + + +(** adding quantifier weights **) + +local + (*** check weight syntax ***) + + val has_no_weight = + not o Term.exists_subterm (fn @{const Old_SMT_patch.weight} => true | _ => false) + + fun is_weight (@{const Old_SMT_patch.weight} $ w $ t) = + (case try HOLogic.dest_number w of + SOME (_, i) => i >= 0 andalso has_no_weight t + | _ => false) + | is_weight t = has_no_weight t + + fun proper_trigger (@{const Old_SMT_patch.trigger} $ _ $ t) = is_weight t + | proper_trigger t = is_weight t + + fun check_weight_error ctxt t = + error ("SMT weight must be a non-negative number and must only occur " ^ + "under the top-most quantifier and an optional trigger: " ^ + Syntax.string_of_term ctxt t) + + fun check_weight_conv ctxt ct = + if Old_SMT_Utils.under_quant proper_trigger (Old_SMT_Utils.term_of ct) then + Conv.all_conv ct + else check_weight_error ctxt (Thm.term_of ct) + + + (*** insertion of weights ***) + + fun under_trigger_conv cv ct = + (case Thm.term_of ct of + @{const Old_SMT_patch.trigger} $ _ $ _ => Conv.arg_conv cv + | _ => cv) ct + + val weight_eq = + mk_meta_eq @{lemma "p = Old_SMT_patch.weight i p" by (simp add: weight_def)} + fun mk_weight_eq w = + let val cv = Thm.dest_arg1 (Thm.rhs_of weight_eq) + in + Thm.instantiate ([], [(dest_Var (Thm.term_of cv), Numeral.mk_cnumber @{ctyp int} w)]) + weight_eq + end + + fun add_weight_conv NONE _ = Conv.all_conv + | add_weight_conv (SOME weight) ctxt = + let val cv = Conv.rewr_conv (mk_weight_eq weight) + in Old_SMT_Utils.under_quant_conv (K (under_trigger_conv cv)) ctxt end +in + +fun weight_conv weight ctxt = + Old_SMT_Utils.prop_conv + (check_weight_conv ctxt then_conv add_weight_conv weight ctxt) + +val setup_weight = Old_SMT_Builtin.add_builtin_fun_ext'' @{const_name Old_SMT_patch.weight} + +end + + +(** combined general normalizations **) + +fun gen_normalize1_conv ctxt weight = + atomize_conv ctxt then_conv + unfold_special_quants_conv ctxt then_conv + Thm.beta_conversion true then_conv + trigger_conv ctxt then_conv + weight_conv weight ctxt + +fun gen_normalize1 ctxt weight thm = + thm + |> instantiate_elim + |> norm_def + |> Conv.fconv_rule (Thm.beta_conversion true then_conv Thm.eta_conversion) + |> Drule.forall_intr_vars + |> Conv.fconv_rule (gen_normalize1_conv ctxt weight) + +fun gen_norm1_safe ctxt (i, (weight, thm)) = + (case try (gen_normalize1 ctxt weight) thm of + SOME thm' => SOME (i, thm') + | NONE => (drop_fact_warning ctxt thm; NONE)) + +fun gen_normalize ctxt iwthms = map_filter (gen_norm1_safe ctxt) iwthms + + + +(* unfolding of definitions and theory-specific rewritings *) + +fun expand_head_conv cv ct = + (case Thm.term_of ct of + _ $ _ => + Conv.fun_conv (expand_head_conv cv) then_conv + Conv.try_conv (Thm.beta_conversion false) + | _ => cv) ct + + +(** rewrite bool case expressions as if expressions **) + +local + fun is_case_bool (Const (@{const_name "bool.case_bool"}, _)) = true + | is_case_bool _ = false + + val thm = mk_meta_eq @{lemma + "case_bool = (%x y P. if P then x else y)" by (rule ext)+ simp} + + fun unfold_conv _ = + Old_SMT_Utils.if_true_conv (is_case_bool o Term.head_of) + (expand_head_conv (Conv.rewr_conv thm)) +in + +fun rewrite_case_bool_conv ctxt = + Old_SMT_Utils.if_exists_conv is_case_bool (Conv.top_conv unfold_conv ctxt) + +val setup_case_bool = + Old_SMT_Builtin.add_builtin_fun_ext'' @{const_name "bool.case_bool"} + +end + + +(** unfold abs, min and max **) + +local + val abs_def = mk_meta_eq @{lemma + "abs = (%a::'a::abs_if. if a < 0 then - a else a)" + by (rule ext) (rule abs_if)} + + val min_def = mk_meta_eq @{lemma "min = (%a b. if a <= b then a else b)" + by (rule ext)+ (rule min_def)} + + val max_def = mk_meta_eq @{lemma "max = (%a b. if a <= b then b else a)" + by (rule ext)+ (rule max_def)} + + val defs = [(@{const_name min}, min_def), (@{const_name max}, max_def), + (@{const_name abs}, abs_def)] + + fun is_builtinT ctxt T = + Old_SMT_Builtin.is_builtin_typ_ext ctxt (Term.domain_type T) + + fun abs_min_max ctxt (Const (n, T)) = + (case AList.lookup (op =) defs n of + NONE => NONE + | SOME thm => if is_builtinT ctxt T then SOME thm else NONE) + | abs_min_max _ _ = NONE + + fun unfold_amm_conv ctxt ct = + (case abs_min_max ctxt (Term.head_of (Thm.term_of ct)) of + SOME thm => expand_head_conv (Conv.rewr_conv thm) + | NONE => Conv.all_conv) ct +in + +fun unfold_abs_min_max_conv ctxt = + Old_SMT_Utils.if_exists_conv (is_some o abs_min_max ctxt) + (Conv.top_conv unfold_amm_conv ctxt) + +val setup_abs_min_max = fold (Old_SMT_Builtin.add_builtin_fun_ext'' o fst) defs + +end + + +(** embedding of standard natural number operations into integer operations **) + +local + val nat_embedding = @{lemma + "ALL n. nat (int n) = n" + "ALL i. i >= 0 --> int (nat i) = i" + "ALL i. i < 0 --> int (nat i) = 0" + by simp_all} + + val simple_nat_ops = [ + @{const less (nat)}, @{const less_eq (nat)}, + @{const Suc}, @{const plus (nat)}, @{const minus (nat)}] + + val mult_nat_ops = + [@{const times (nat)}, @{const divide (nat)}, @{const modulo (nat)}] + + val nat_ops = simple_nat_ops @ mult_nat_ops + + val nat_consts = nat_ops @ [@{const numeral (nat)}, + @{const zero_class.zero (nat)}, @{const one_class.one (nat)}] + + val nat_int_coercions = [@{const of_nat (int)}, @{const nat}] + + val builtin_nat_ops = nat_int_coercions @ simple_nat_ops + + val is_nat_const = member (op aconv) nat_consts + + fun is_nat_const' @{const of_nat (int)} = true + | is_nat_const' t = is_nat_const t + + val expands = map mk_meta_eq @{lemma + "0 = nat 0" + "1 = nat 1" + "(numeral :: num => nat) = (%i. nat (numeral i))" + "op < = (%a b. int a < int b)" + "op <= = (%a b. int a <= int b)" + "Suc = (%a. nat (int a + 1))" + "op + = (%a b. nat (int a + int b))" + "op - = (%a b. nat (int a - int b))" + "op * = (%a b. nat (int a * int b))" + "op div = (%a b. nat (int a div int b))" + "op mod = (%a b. nat (int a mod int b))" + by (fastforce simp add: nat_mult_distrib nat_div_distrib nat_mod_distrib)+} + + val ints = map mk_meta_eq @{lemma + "int 0 = 0" + "int 1 = 1" + "int (Suc n) = int n + 1" + "int (n + m) = int n + int m" + "int (n - m) = int (nat (int n - int m))" + "int (n * m) = int n * int m" + "int (n div m) = int n div int m" + "int (n mod m) = int n mod int m" + by (auto simp add: zdiv_int zmod_int)} + + val int_if = mk_meta_eq @{lemma + "int (if P then n else m) = (if P then int n else int m)" + by simp} + + fun mk_number_eq ctxt i lhs = + let + val eq = Old_SMT_Utils.mk_cequals lhs (Numeral.mk_cnumber @{ctyp int} i) + val tac = + Simplifier.simp_tac (put_simpset HOL_ss ctxt addsimps [@{thm of_nat_numeral [where 'a=int]}]) 1 + in Goal.norm_result ctxt (Goal.prove_internal ctxt [] eq (K tac)) end + + fun ite_conv cv1 cv2 = + Conv.combination_conv (Conv.combination_conv (Conv.arg_conv cv1) cv2) cv2 + + fun int_conv ctxt ct = + (case Thm.term_of ct of + @{const of_nat (int)} $ (n as (@{const numeral (nat)} $ _)) => + Conv.rewr_conv (mk_number_eq ctxt (snd (HOLogic.dest_number n)) ct) + | @{const of_nat (int)} $ _ => + (Conv.rewrs_conv ints then_conv Conv.sub_conv ints_conv ctxt) else_conv + (Conv.rewr_conv int_if then_conv + ite_conv (nat_conv ctxt) (int_conv ctxt)) else_conv + Conv.sub_conv (Conv.top_sweep_conv nat_conv) ctxt + | _ => Conv.no_conv) ct + + and ints_conv ctxt = Conv.top_sweep_conv int_conv ctxt + + and expand_conv ctxt = + Old_SMT_Utils.if_conv (is_nat_const o Term.head_of) + (expand_head_conv (Conv.rewrs_conv expands) then_conv ints_conv ctxt) + (int_conv ctxt) + + and nat_conv ctxt = Old_SMT_Utils.if_exists_conv is_nat_const' + (Conv.top_sweep_conv expand_conv ctxt) + + val uses_nat_int = Term.exists_subterm (member (op aconv) nat_int_coercions) +in + +val nat_as_int_conv = nat_conv + +fun add_nat_embedding thms = + if exists (uses_nat_int o Thm.prop_of) thms then (thms, nat_embedding) + else (thms, []) + +val setup_nat_as_int = + Old_SMT_Builtin.add_builtin_typ_ext (@{typ nat}, K true) #> + fold (Old_SMT_Builtin.add_builtin_fun_ext' o Term.dest_Const) builtin_nat_ops + +end + + +(** normalize numerals **) + +local + (* + rewrite Numeral1 into 1 + rewrite - 0 into 0 + *) + + fun is_irregular_number (Const (@{const_name numeral}, _) $ Const (@{const_name num.One}, _)) = + true + | is_irregular_number (Const (@{const_name uminus}, _) $ Const (@{const_name Groups.zero}, _)) = + true + | is_irregular_number _ = + false; + + fun is_strange_number ctxt t = is_irregular_number t andalso Old_SMT_Builtin.is_builtin_num ctxt t; + + val proper_num_ss = + simpset_of (put_simpset HOL_ss @{context} + addsimps @{thms Num.numeral_One minus_zero}) + + fun norm_num_conv ctxt = + Old_SMT_Utils.if_conv (is_strange_number ctxt) + (Simplifier.rewrite (put_simpset proper_num_ss ctxt)) Conv.no_conv +in + +fun normalize_numerals_conv ctxt = + Old_SMT_Utils.if_exists_conv (is_strange_number ctxt) + (Conv.top_sweep_conv norm_num_conv ctxt) + +end + + +(** combined unfoldings and rewritings **) + +fun unfold_conv ctxt = + rewrite_case_bool_conv ctxt then_conv + unfold_abs_min_max_conv ctxt then_conv + nat_as_int_conv ctxt then_conv + Thm.beta_conversion true + +fun unfold1 ctxt = map (apsnd (Conv.fconv_rule (unfold_conv ctxt))) + +fun burrow_ids f ithms = + let + val (is, thms) = split_list ithms + val (thms', extra_thms) = f thms + in (is ~~ thms') @ map (pair ~1) extra_thms end + +fun unfold2 ctxt ithms = + ithms + |> map (apsnd (Conv.fconv_rule (normalize_numerals_conv ctxt))) + |> burrow_ids add_nat_embedding + + + +(* overall normalization *) + +type extra_norm = Proof.context -> thm list * thm list -> thm list * thm list + +structure Extra_Norms = Generic_Data +( + type T = extra_norm Old_SMT_Utils.dict + val empty = [] + val extend = I + fun merge data = Old_SMT_Utils.dict_merge fst data +) + +fun add_extra_norm (cs, norm) = + Extra_Norms.map (Old_SMT_Utils.dict_update (cs, norm)) + +fun apply_extra_norms ctxt ithms = + let + val cs = Old_SMT_Config.solver_class_of ctxt + val es = Old_SMT_Utils.dict_lookup (Extra_Norms.get (Context.Proof ctxt)) cs + in burrow_ids (fold (fn e => e ctxt) es o rpair []) ithms end + +local + val ignored = member (op =) [@{const_name All}, @{const_name Ex}, + @{const_name Let}, @{const_name If}, @{const_name HOL.eq}] + + val schematic_consts_of = + let + fun collect (@{const Old_SMT_patch.trigger} $ p $ t) = + collect_trigger p #> collect t + | collect (t $ u) = collect t #> collect u + | collect (Abs (_, _, t)) = collect t + | collect (t as Const (n, _)) = + if not (ignored n) then Monomorph.add_schematic_consts_of t else I + | collect _ = I + and collect_trigger t = + let val dest = these o try HOLogic.dest_list + in fold (fold collect_pat o dest) (dest t) end + and collect_pat (Const (@{const_name Old_SMT_patch.pat}, _) $ t) = collect t + | collect_pat (Const (@{const_name Old_SMT_patch.nopat}, _) $ t) = collect t + | collect_pat _ = I + in (fn t => collect t Symtab.empty) end +in + +fun monomorph ctxt xthms = + let val (xs, thms) = split_list xthms + in + map (pair 1) thms + |> Monomorph.monomorph schematic_consts_of ctxt + |> maps (uncurry (map o pair)) o map2 pair xs o map (map snd) + end + +end + +fun normalize iwthms ctxt = + iwthms + |> gen_normalize ctxt + |> unfold1 ctxt + |> monomorph ctxt + |> unfold2 ctxt + |> apply_extra_norms ctxt + |> rpair ctxt + +val setup = Context.theory_map ( + setup_atomize #> + setup_unfolded_quants #> + setup_trigger #> + setup_weight #> + setup_case_bool #> + setup_abs_min_max #> + setup_nat_as_int) + +end diff --git a/src/test-gen/src/main/smt_patch/old_smt_setup_solvers_patch.ML b/src/test-gen/src/main/smt_patch/old_smt_setup_solvers_patch.ML new file mode 100644 index 0000000..30617b3 --- /dev/null +++ b/src/test-gen/src/main/smt_patch/old_smt_setup_solvers_patch.ML @@ -0,0 +1,189 @@ +(* Title: HOL/Library/Old_SMT/old_smt_setup_solvers.ML + Author: Sascha Boehme, TU Muenchen + +Setup SMT solvers. +*) + +signature OLD_SMT_SETUP_SOLVERS = +sig + datatype z3_non_commercial = + Z3_Non_Commercial_Unknown | + Z3_Non_Commercial_Accepted | + Z3_Non_Commercial_Declined + val z3_non_commercial: unit -> z3_non_commercial + val z3_with_extensions: bool Config.T + val setup: theory -> theory +end + +structure Old_SMT_Setup_Solvers: OLD_SMT_SETUP_SOLVERS = +struct + +(* helper functions *) + +fun make_avail name () = getenv ("OLD_" ^ name ^ "_SOLVER") <> "" +fun make_command name () = [getenv ("OLD_" ^ name ^ "_SOLVER")] + +fun outcome_of unsat sat unknown solver_name line = + if String.isPrefix unsat line then Old_SMT_patch_Solver.Unsat + else if String.isPrefix sat line then Old_SMT_patch_Solver.Sat + else if String.isPrefix unknown line then Old_SMT_patch_Solver.Unknown + else raise Old_SMT_patch_Failure.SMT (Old_SMT_patch_Failure.Other_Failure ("Solver " ^ + quote solver_name ^ " failed. Enable SMT tracing by setting the " ^ + "configuration option " ^ quote (Config.name_of Old_SMT_Config.trace) ^ " and " ^ + "see the trace for details.")) + +fun on_first_line test_outcome solver_name lines = + let + val empty_line = (fn "" => true | _ => false) + val split_first = (fn [] => ("", []) | l :: ls => (l, ls)) + val (l, ls) = split_first (snd (take_prefix empty_line lines)) + in (test_outcome solver_name l, ls) end + + +(* CVC3 *) + +local + fun cvc3_options ctxt = [ + "-seed", string_of_int (Config.get ctxt Old_SMT_Config.random_seed), + "-lang", "smtlib", "-output-lang", "presentation", + "-timeout", string_of_int (Real.ceil (Config.get ctxt Old_SMT_Config.timeout))] +in + +val cvc3: Old_SMT_patch_Solver.solver_config = { + name = "cvc3", + class = K Old_SMTLIB_Interface.smtlibC, + avail = make_avail "CVC3", + command = make_command "CVC3", + options = cvc3_options, + default_max_relevant = 400 (* FUDGE *), + supports_filter = false, + outcome = + on_first_line (outcome_of "Unsatisfiable." "Satisfiable." "Unknown."), + cex_parser = NONE, + reconstruct = NONE } + +end + + +(* Yices *) + +val yices: Old_SMT_patch_Solver.solver_config = { + name = "yices", + class = K Old_SMTLIB_Interface.smtlibC, + avail = make_avail "YICES", + command = make_command "YICES", + options = (fn ctxt => [ + "--rand-seed=" ^ string_of_int (Config.get ctxt Old_SMT_Config.random_seed), + "--timeout=" ^ + string_of_int (Real.ceil (Config.get ctxt Old_SMT_Config.timeout)), + "--smtlib"]), + default_max_relevant = 350 (* FUDGE *), + supports_filter = false, + outcome = on_first_line (outcome_of "unsat" "sat" "unknown"), + cex_parser = NONE, + reconstruct = NONE } + + +(* Z3 *) + +datatype z3_non_commercial = + Z3_Non_Commercial_Unknown | + Z3_Non_Commercial_Accepted | + Z3_Non_Commercial_Declined + + +local + val accepted = member (op =) ["yes", "Yes", "YES"] + val declined = member (op =) ["no", "No", "NO"] +in + +fun z3_non_commercial () = + let + val flag2 = getenv "OLD_Z3_NON_COMMERCIAL" + in + if accepted flag2 then Z3_Non_Commercial_Accepted + else if declined flag2 then Z3_Non_Commercial_Declined + else Z3_Non_Commercial_Unknown + end + +fun if_z3_non_commercial f = + (case z3_non_commercial () of + Z3_Non_Commercial_Accepted => f () + | Z3_Non_Commercial_Declined => + error (Pretty.string_of (Pretty.para + "The SMT solver Z3 may only be used for non-commercial applications.")) + | Z3_Non_Commercial_Unknown => + error + (Pretty.string_of + (Pretty.para + ("The SMT solver Z3 is not activated. To activate it, set the Isabelle \ + \system option \"z3_non_commercial\" to \"yes\" (e.g. via \ + \the Isabelle/jEdit menu Plugin Options / Isabelle / General).")) ^ + "\n\nSee also " ^ Url.print (Url.explode "http://z3.codeplex.com/license"))) + +end + + +val z3_with_extensions = + Attrib.setup_config_bool @{binding old_z3_with_extensions} (K false) + +local + fun z3_make_command name () = if_z3_non_commercial (make_command name) + + fun z3_options ctxt = + ["-rs:" ^ string_of_int (Config.get ctxt Old_SMT_Config.random_seed), + "MODEL=true", + "SOFT_TIMEOUT=" ^ + string_of_int (Real.ceil (1000.0 * Config.get ctxt Old_SMT_Config.timeout)), + "-smt"] + |> not (Config.get ctxt Old_SMT_Config.oracle) ? + append ["DISPLAY_PROOF=true", "PROOF_MODE=2"] + + fun z3_on_first_or_last_line solver_name lines = + let + fun junk l = + if String.isPrefix "WARNING: Out of allocated virtual memory" l + then raise Old_SMT_patch_Failure.SMT Old_SMT_patch_Failure.Out_Of_Memory + else + String.isPrefix "WARNING" l orelse + String.isPrefix "ERROR" l orelse + forall Symbol.is_ascii_blank (Symbol.explode l) + val lines = filter_out junk lines + fun outcome split = + the_default ("", []) (try split lines) + |>> outcome_of "unsat" "sat" "unknown" solver_name + in + (* Starting with version 4.0, Z3 puts the outcome on the first line of the + output rather than on the last line. *) + outcome (fn lines => (hd lines, tl lines)) + handle Old_SMT_patch_Failure.SMT _ => outcome (swap o split_last) + end + + fun select_class ctxt = + if Config.get ctxt z3_with_extensions then Old_Z3_Interface.smtlib_z3C + else Old_SMTLIB_Interface.smtlibC +in + +val z3: Old_SMT_patch_Solver.solver_config = { + name = "z3", + class = select_class, + avail = make_avail "Z3", + command = z3_make_command "Z3", + options = z3_options, + default_max_relevant = 350 (* FUDGE *), + supports_filter = true, + outcome = z3_on_first_or_last_line, + cex_parser = SOME Old_Z3_Model.parse_counterex, + reconstruct = SOME Old_Z3_Proof_Reconstruction.reconstruct } + +end + + +(* overall setup *) + +val setup = + Old_SMT_patch_Solver.add_solver cvc3 #> + Old_SMT_patch_Solver.add_solver yices #> + Old_SMT_patch_Solver.add_solver z3 + +end diff --git a/src/test-gen/src/main/smt_patch/old_smt_solver_patch.ML b/src/test-gen/src/main/smt_patch/old_smt_solver_patch.ML new file mode 100644 index 0000000..944f67e --- /dev/null +++ b/src/test-gen/src/main/smt_patch/old_smt_solver_patch.ML @@ -0,0 +1,378 @@ +(* Title: HOL/Library/Old_SMT/old_smt_solver.ML + Author: Sascha Boehme, TU Muenchen + +SMT solvers registry and SMT tactic. +*) + +signature OLD_SMT_SOLVER = +sig + (*configuration*) + datatype outcome = Unsat | Sat | Unknown + type solver_config = { + name: string, + class: Proof.context -> Old_SMT_Utils.class, + avail: unit -> bool, + command: unit -> string list, + options: Proof.context -> string list, + default_max_relevant: int, + supports_filter: bool, + outcome: string -> string list -> outcome * string list, + cex_parser: (Proof.context -> Old_SMT_Translate.recon -> string list -> + term list * term list) option, + reconstruct: (Proof.context -> Old_SMT_Translate.recon -> string list -> + int list * thm) option } + + (*registry*) + val add_solver: solver_config -> theory -> theory + val solver_name_of: Proof.context -> string + val available_solvers_of: Proof.context -> string list + val apply_solver: Proof.context -> (int * (int option * thm)) list -> + int list * thm + val default_max_relevant: Proof.context -> string -> int + + (*filter*) + type 'a smt_filter_data = + ('a * thm) list * ((int * thm) list * Proof.context) + val smt_filter_preprocess: Proof.context -> thm list -> thm -> + ('a * (int option * thm)) list -> int -> 'a smt_filter_data + val smt_filter_apply: Time.time -> 'a smt_filter_data -> + {outcome: Old_SMT_patch_Failure.failure option, used_facts: ('a * thm) list} + + (*tactic*) + val smt_tac: Proof.context -> thm list -> int -> tactic + val smt_tac': Proof.context -> thm list -> int -> tactic +end + +structure Old_SMT_patch_Solver: OLD_SMT_SOLVER = +struct + + +(* interface to external solvers *) + +local + +val shell_quote = enclose "'" "'"; +val shell_path = shell_quote o File.standard_path; + +fun make_cmd command options problem_path proof_path = space_implode " " ( + "(exec 2>&1;" :: map shell_quote (command () @ options) @ + [shell_path problem_path, ")", ">", shell_path proof_path]) + +fun trace_and ctxt msg f x = + let val _ = Old_SMT_Config.trace_msg ctxt (fn () => msg) () + in f x end + +fun run ctxt name mk_cmd input = + (case Old_SMT_Config.certificates_of ctxt of + NONE => + if not (Old_SMT_Config.is_available ctxt name) then + error ("The SMT solver " ^ quote name ^ " is not installed.") + else if Config.get ctxt Old_SMT_Config.debug_files = "" then + trace_and ctxt ("Invoking SMT solver " ^ quote name ^ " ...") + (Cache_IO.run mk_cmd) input + else + let + val base_path = Path.explode (Config.get ctxt Old_SMT_Config.debug_files) + val in_path = Path.ext "smt_in" base_path + val out_path = Path.ext "smt_out" base_path + in Cache_IO.raw_run mk_cmd input in_path out_path end + | SOME certs => + (case Cache_IO.lookup certs input of + (NONE, key) => + if Config.get ctxt Old_SMT_Config.read_only_certificates then + error ("Bad certificate cache: missing certificate") + else + Cache_IO.run_and_cache certs key mk_cmd input + | (SOME output, _) => + trace_and ctxt ("Using cached certificate from " ^ + shell_path (Cache_IO.cache_path_of certs) ^ " ...") + I output)) + +fun run_solver ctxt name mk_cmd input = + let + fun pretty tag ls = Pretty.string_of (Pretty.big_list tag + (map Pretty.str ls)) + + val _ = Old_SMT_Config.trace_msg ctxt (pretty "Problem:" o split_lines) input + + val {redirected_output=res, output=err, return_code} = + Old_SMT_Config.with_timeout ctxt (run ctxt name mk_cmd) input + val _ = Old_SMT_Config.trace_msg ctxt (pretty "Solver:") err + + val ls = fst (take_suffix (equal "") res) + val _ = Old_SMT_Config.trace_msg ctxt (pretty "Result:") ls + + (* Patch compared to Isabelle2016 Old_SMT *) + + (* val _ = return_code <> 0 andalso + raise Old_SMT_Failure.SMT (Old_SMT_Failure.Abnormal_Termination return_code) *) + in ls end + +fun trace_assms ctxt = + Old_SMT_Config.trace_msg ctxt (Pretty.string_of o + Pretty.big_list "Assertions:" o map (Thm.pretty_thm ctxt o snd)) + +fun trace_recon_data ({context=ctxt, typs, terms, ...} : Old_SMT_Translate.recon) = + let + fun pretty_eq n p = Pretty.block [Pretty.str n, Pretty.str " = ", p] + fun p_typ (n, T) = pretty_eq n (Syntax.pretty_typ ctxt T) + fun p_term (n, t) = pretty_eq n (Syntax.pretty_term ctxt t) + in + Old_SMT_Config.trace_msg ctxt (fn () => + Pretty.string_of (Pretty.big_list "Names:" [ + Pretty.big_list "sorts:" (map p_typ (Symtab.dest typs)), + Pretty.big_list "functions:" (map p_term (Symtab.dest terms))])) () + end + +in + +fun invoke name command ithms ctxt = + let + val options = Old_SMT_Config.solver_options_of ctxt + val comments = ("solver: " ^ name) :: + ("timeout: " ^ string_of_real (Config.get ctxt Old_SMT_Config.timeout)) :: + ("random seed: " ^ + string_of_int (Config.get ctxt Old_SMT_Config.random_seed)) :: + "arguments:" :: options + + val (str, recon as {context=ctxt', ...}) = + ithms + |> tap (trace_assms ctxt) + |> Old_SMT_Translate.translate ctxt comments + ||> tap trace_recon_data + in (run_solver ctxt' name (make_cmd command options) str, recon) end + +end + + +(* configuration *) + +datatype outcome = Unsat | Sat | Unknown + +type solver_config = { + name: string, + class: Proof.context -> Old_SMT_Utils.class, + avail: unit -> bool, + command: unit -> string list, + options: Proof.context -> string list, + default_max_relevant: int, + supports_filter: bool, + outcome: string -> string list -> outcome * string list, + cex_parser: (Proof.context -> Old_SMT_Translate.recon -> string list -> + term list * term list) option, + reconstruct: (Proof.context -> Old_SMT_Translate.recon -> string list -> + int list * thm) option } + + +(* registry *) + +type solver_info = { + command: unit -> string list, + default_max_relevant: int, + supports_filter: bool, + reconstruct: Proof.context -> string list * Old_SMT_Translate.recon -> + int list * thm } + +structure Solvers = Generic_Data +( + type T = solver_info Symtab.table + val empty = Symtab.empty + val extend = I + fun merge data = Symtab.merge (K true) data +) + +local + fun finish outcome cex_parser reconstruct ocl outer_ctxt + (output, (recon as {context=ctxt, ...} : Old_SMT_Translate.recon)) = + (case outcome output of + (Unsat, ls) => + if not (Config.get ctxt Old_SMT_Config.oracle) andalso is_some reconstruct + then the reconstruct outer_ctxt recon ls + else ([], ocl ()) + | (result, ls) => + let + val (ts, us) = + (case cex_parser of SOME f => f ctxt recon ls | _ => ([], [])) + in + raise Old_SMT_patch_Failure.SMT (Old_SMT_patch_Failure.Counterexample { + is_real_cex = (result = Sat), + free_constraints = ts, + const_defs = us}) + end) + + val cfalse = Thm.cterm_of @{context} (@{const Trueprop} $ @{const False}) +in + +fun add_solver cfg = + let + val {name, class, avail, command, options, default_max_relevant, + supports_filter, outcome, cex_parser, reconstruct} = cfg + + fun core_oracle () = cfalse + + fun solver ocl = { + command = command, + default_max_relevant = default_max_relevant, + supports_filter = supports_filter, + reconstruct = finish (outcome name) cex_parser reconstruct ocl } + + val info = {name=name, class=class, avail=avail, options=options} + in + Thm.add_oracle (Binding.name name, core_oracle) #-> (fn (_, ocl) => + Context.theory_map (Solvers.map (Symtab.update_new (name, solver ocl)))) #> + Context.theory_map (Old_SMT_Config.add_solver info) + end + +end + +fun get_info ctxt name = + the (Symtab.lookup (Solvers.get (Context.Proof ctxt)) name) + +val solver_name_of = Old_SMT_Config.solver_of + +val available_solvers_of = Old_SMT_Config.available_solvers_of + +fun name_and_info_of ctxt = + let val name = solver_name_of ctxt + in (name, get_info ctxt name) end + +fun gen_preprocess ctxt iwthms = Old_SMT_Normalize.normalize iwthms ctxt + +fun gen_apply (ithms, ctxt) = + let val (name, {command, reconstruct, ...}) = name_and_info_of ctxt + in + (ithms, ctxt) + |-> invoke name command + |> reconstruct ctxt + |>> distinct (op =) + end + +fun apply_solver ctxt = gen_apply o gen_preprocess ctxt + +val default_max_relevant = #default_max_relevant oo get_info + +val supports_filter = #supports_filter o snd o name_and_info_of + + +(* check well-sortedness *) + +val has_topsort = Term.exists_type (Term.exists_subtype (fn + TFree (_, []) => true + | TVar (_, []) => true + | _ => false)) + +(* without this test, we would run into problems when atomizing the rules: *) +fun check_topsort ctxt thm = + if has_topsort (Thm.prop_of thm) then + (Old_SMT_Normalize.drop_fact_warning ctxt thm; TrueI) + else + thm + +fun check_topsorts ctxt iwthms = map (apsnd (apsnd (check_topsort ctxt))) iwthms + + +(* filter *) + +val cnot = Thm.cterm_of @{context} @{const Not} + +fun mk_result outcome xrules = { outcome = outcome, used_facts = xrules } + +type 'a smt_filter_data = ('a * thm) list * ((int * thm) list * Proof.context) + +fun smt_filter_preprocess ctxt facts goal xwthms i = + let + val ctxt = + ctxt + |> Config.put Old_SMT_Config.oracle false + |> Config.put Old_SMT_Config.filter_only_facts true + + val ({context=ctxt', prems, concl, ...}, _) = Subgoal.focus ctxt i NONE goal + fun negate ct = Thm.dest_comb ct ||> Thm.apply cnot |-> Thm.apply + val cprop = + (case try negate (Thm.rhs_of (Old_SMT_Normalize.atomize_conv ctxt' concl)) of + SOME ct => ct + | NONE => raise Old_SMT_patch_Failure.SMT (Old_SMT_patch_Failure.Other_Failure ( + "goal is not a HOL term"))) + in + map snd xwthms + |> map_index I + |> append (map (pair ~1 o pair NONE) (Thm.assume cprop :: prems @ facts)) + |> check_topsorts ctxt' + |> gen_preprocess ctxt' + |> pair (map (apsnd snd) xwthms) + end + +fun smt_filter_apply time_limit (xthms, (ithms, ctxt)) = + let + val ctxt' = + ctxt + |> Config.put Old_SMT_Config.timeout (Time.toReal time_limit) + + fun filter_thms false = K xthms + | filter_thms true = map_filter (try (nth xthms)) o fst + in + (ithms, ctxt') + |> gen_apply + |> filter_thms (supports_filter ctxt') + |> mk_result NONE + end + handle Old_SMT_patch_Failure.SMT fail => mk_result (SOME fail) [] + + +(* SMT tactic *) + +local + fun trace_assumptions ctxt iwthms idxs = + let + val wthms = + idxs + |> filter (fn i => i >= 0) + |> map_filter (AList.lookup (op =) iwthms) + in + if Config.get ctxt Old_SMT_Config.trace_used_facts andalso length wthms > 0 + then + tracing (Pretty.string_of (Pretty.big_list "SMT used facts:" + (map (Thm.pretty_thm ctxt o snd) wthms))) + else () + end + + fun solve ctxt iwthms = + iwthms + |> check_topsorts ctxt + |> apply_solver ctxt + |>> trace_assumptions ctxt iwthms + |> snd + + fun str_of ctxt fail = + Old_SMT_patch_Failure.string_of_failure ctxt fail + |> prefix ("Solver " ^ Old_SMT_Config.solver_of ctxt ^ ": ") + + fun safe_solve ctxt iwthms = SOME (solve ctxt iwthms) + handle + Old_SMT_patch_Failure.SMT (fail as Old_SMT_patch_Failure.Counterexample _) => + (Old_SMT_Config.verbose_msg ctxt (str_of ctxt) fail; NONE) + | Old_SMT_patch_Failure.SMT (fail as Old_SMT_patch_Failure.Time_Out) => + error ("SMT: Solver " ^ quote (Old_SMT_Config.solver_of ctxt) ^ ": " ^ + Old_SMT_patch_Failure.string_of_failure ctxt fail ^ " (setting the " ^ + "configuration option " ^ quote (Config.name_of Old_SMT_Config.timeout) ^ " might help)") + | Old_SMT_patch_Failure.SMT fail => error (str_of ctxt fail) + + fun tag_rules thms = map_index (apsnd (pair NONE)) thms + fun tag_prems thms = map (pair ~1 o pair NONE) thms + + fun resolve ctxt (SOME thm) = resolve_tac ctxt [thm] 1 + | resolve _ NONE = no_tac + + fun tac prove ctxt rules = + CONVERSION (Old_SMT_Normalize.atomize_conv ctxt) + THEN' resolve_tac ctxt @{thms ccontr} + THEN' SUBPROOF (fn {context = ctxt', prems, ...} => + resolve ctxt' (prove ctxt' (tag_rules rules @ tag_prems prems))) ctxt +in + +val smt_tac = tac safe_solve +val smt_tac' = tac (SOME oo solve) + +end + +end diff --git a/src/test-gen/src/main/smt_patch/old_smt_translate_patch.ML b/src/test-gen/src/main/smt_patch/old_smt_translate_patch.ML new file mode 100644 index 0000000..1eb4b4c --- /dev/null +++ b/src/test-gen/src/main/smt_patch/old_smt_translate_patch.ML @@ -0,0 +1,589 @@ +(* Title: HOL/Library/Old_SMT/old_smt_translate.ML + Author: Sascha Boehme, TU Muenchen + +Translate theorems into an SMT intermediate format and serialize them. +*) + +signature OLD_SMT_TRANSLATE = +sig + (*intermediate term structure*) + datatype squant = SForall | SExists + datatype 'a spattern = SPat of 'a list | SNoPat of 'a list + datatype sterm = + SVar of int | + SApp of string * sterm list | + SLet of string * sterm * sterm | + SQua of squant * string list * sterm spattern list * int option * sterm + + (*translation configuration*) + type prefixes = {sort_prefix: string, func_prefix: string} + type sign = { + header: string list, + sorts: string list, + dtyps: (string * (string * (string * string) list) list) list list, + funcs: (string * (string list * string)) list } + type config = { + prefixes: prefixes, + header: term list -> string list, + is_fol: bool, + has_datatypes: bool, + serialize: string list -> sign -> sterm list -> string } + type recon = { + context: Proof.context, + typs: typ Symtab.table, + terms: term Symtab.table, + rewrite_rules: thm list, + assms: (int * thm) list } + + (*translation*) + val add_config: Old_SMT_Utils.class * (Proof.context -> config) -> + Context.generic -> Context.generic + val translate: Proof.context -> string list -> (int * thm) list -> + string * recon +end + +structure Old_SMT_Translate: OLD_SMT_TRANSLATE = +struct + + +(* intermediate term structure *) + +datatype squant = SForall | SExists + +datatype 'a spattern = SPat of 'a list | SNoPat of 'a list + +datatype sterm = + SVar of int | + SApp of string * sterm list | + SLet of string * sterm * sterm | + SQua of squant * string list * sterm spattern list * int option * sterm + + + +(* translation configuration *) + +type prefixes = {sort_prefix: string, func_prefix: string} + +type sign = { + header: string list, + sorts: string list, + dtyps: (string * (string * (string * string) list) list) list list, + funcs: (string * (string list * string)) list } + +type config = { + prefixes: prefixes, + header: term list -> string list, + is_fol: bool, + has_datatypes: bool, + serialize: string list -> sign -> sterm list -> string } + +type recon = { + context: Proof.context, + typs: typ Symtab.table, + terms: term Symtab.table, + rewrite_rules: thm list, + assms: (int * thm) list } + + + +(* translation context *) + +fun make_tr_context {sort_prefix, func_prefix} = + (sort_prefix, 1, Typtab.empty, func_prefix, 1, Termtab.empty) + +fun string_of_index pre i = pre ^ string_of_int i + +fun add_typ T proper (cx as (sp, Tidx, typs, fp, idx, terms)) = + (case Typtab.lookup typs T of + SOME (n, _) => (n, cx) + | NONE => + let + val n = string_of_index sp Tidx + val typs' = Typtab.update (T, (n, proper)) typs + in (n, (sp, Tidx+1, typs', fp, idx, terms)) end) + +fun add_fun t sort (cx as (sp, Tidx, typs, fp, idx, terms)) = + (case Termtab.lookup terms t of + SOME (n, _) => (n, cx) + | NONE => + let + val n = string_of_index fp idx + val terms' = Termtab.update (t, (n, sort)) terms + in (n, (sp, Tidx, typs, fp, idx+1, terms')) end) + +fun sign_of header dtyps (_, _, typs, _, _, terms) = { + header = header, + sorts = Typtab.fold (fn (_, (n, true)) => cons n | _ => I) typs [], + dtyps = dtyps, + funcs = Termtab.fold (fn (_, (n, SOME ss)) => cons (n,ss) | _ => I) terms []} + +fun recon_of ctxt rules thms ithms (_, _, typs, _, _, terms) = + let + fun add_typ (T, (n, _)) = Symtab.update (n, T) + val typs' = Typtab.fold add_typ typs Symtab.empty + + fun add_fun (t, (n, _)) = Symtab.update (n, t) + val terms' = Termtab.fold add_fun terms Symtab.empty + + val assms = map (pair ~1) thms @ ithms + in + {context=ctxt, typs=typs', terms=terms', rewrite_rules=rules, assms=assms} + end + + + +(* preprocessing *) + +(** datatype declarations **) + +fun collect_datatypes_and_records (tr_context, ctxt) ts = + let + val (declss, ctxt') = + fold (Term.fold_types Old_SMT_Datatypes.add_decls) ts ([], ctxt) + + fun is_decl_typ T = exists (exists (equal T o fst)) declss + + fun add_typ' T proper = + (case Old_SMT_Builtin.dest_builtin_typ ctxt' T of + SOME n => pair n + | NONE => add_typ T proper) + + fun tr_select sel = + let val T = Term.range_type (Term.fastype_of sel) + in add_fun sel NONE ##>> add_typ' T (not (is_decl_typ T)) end + fun tr_constr (constr, selects) = + add_fun constr NONE ##>> fold_map tr_select selects + fun tr_typ (T, cases) = add_typ' T false ##>> fold_map tr_constr cases + val (declss', tr_context') = fold_map (fold_map tr_typ) declss tr_context + + fun add (constr, selects) = + Termtab.update (constr, length selects) #> + fold (Termtab.update o rpair 1) selects + val funcs = fold (fold (fold add o snd)) declss Termtab.empty + in ((funcs, declss', tr_context', ctxt'), ts) end + (* FIXME: also return necessary datatype and record theorems *) + + +(** eta-expand quantifiers, let expressions and built-ins *) + +local + fun eta f T t = Abs (Name.uu, T, f (Term.incr_boundvars 1 t $ Bound 0)) + + fun exp f T = eta f (Term.domain_type (Term.domain_type T)) + + fun exp2 T q = + let val U = Term.domain_type T + in Abs (Name.uu, U, q $ eta I (Term.domain_type U) (Bound 0)) end + + fun exp2' T l = + let val (U1, U2) = Term.dest_funT T ||> Term.domain_type + in Abs (Name.uu, U1, eta I U2 (l $ Bound 0)) end + + fun expf k i T t = + let val Ts = drop i (fst (Old_SMT_Utils.dest_funT k T)) + in + Term.incr_boundvars (length Ts) t + |> fold_rev (fn i => fn u => u $ Bound i) (0 upto length Ts - 1) + |> fold_rev (fn T => fn u => Abs (Name.uu, T, u)) Ts + end +in + +fun eta_expand ctxt is_fol funcs = + let + fun exp_func t T ts = + (case Termtab.lookup funcs t of + SOME k => + Term.list_comb (t, ts) + |> k <> length ts ? expf k (length ts) T + | NONE => Term.list_comb (t, ts)) + + fun expand ((q as Const (@{const_name All}, _)) $ Abs a) = q $ abs_expand a + | expand ((q as Const (@{const_name All}, T)) $ t) = q $ exp expand T t + | expand (q as Const (@{const_name All}, T)) = exp2 T q + | expand ((q as Const (@{const_name Ex}, _)) $ Abs a) = q $ abs_expand a + | expand ((q as Const (@{const_name Ex}, T)) $ t) = q $ exp expand T t + | expand (q as Const (@{const_name Ex}, T)) = exp2 T q + | expand ((l as Const (@{const_name Let}, _)) $ t $ Abs a) = + if is_fol then expand (Term.betapply (Abs a, t)) + else l $ expand t $ abs_expand a + | expand ((l as Const (@{const_name Let}, T)) $ t $ u) = + if is_fol then expand (u $ t) + else l $ expand t $ exp expand (Term.range_type T) u + | expand ((l as Const (@{const_name Let}, T)) $ t) = + if is_fol then + let val U = Term.domain_type (Term.range_type T) + in Abs (Name.uu, U, Bound 0 $ Term.incr_boundvars 1 t) end + else exp2 T (l $ expand t) + | expand (l as Const (@{const_name Let}, T)) = + if is_fol then + let val U = Term.domain_type (Term.range_type T) + in + Abs (Name.uu, Term.domain_type T, Abs (Name.uu, U, + Bound 0 $ Bound 1)) + end + else exp2' T l + | expand t = + (case Term.strip_comb t of + (u as Const (c as (_, T)), ts) => + (case Old_SMT_Builtin.dest_builtin ctxt c ts of + SOME (_, k, us, mk) => + if k = length us then mk (map expand us) + else if k < length us then + chop k (map expand us) |>> mk |> Term.list_comb + else expf k (length ts) T (mk (map expand us)) + | NONE => exp_func u T (map expand ts)) + | (u as Free (_, T), ts) => exp_func u T (map expand ts) + | (Abs a, ts) => Term.list_comb (abs_expand a, map expand ts) + | (u, ts) => Term.list_comb (u, map expand ts)) + + and abs_expand (n, T, t) = Abs (n, T, expand t) + + in map expand end + +end + + +(** introduce explicit applications **) + +local + (* + Make application explicit for functions with varying number of arguments. + *) + + fun add t i = apfst (Termtab.map_default (t, i) (Integer.min i)) + fun add_type T = apsnd (Typtab.update (T, ())) + + fun min_arities t = + (case Term.strip_comb t of + (u as Const _, ts) => add u (length ts) #> fold min_arities ts + | (u as Free _, ts) => add u (length ts) #> fold min_arities ts + | (Abs (_, T, u), ts) => add_type T #> min_arities u #> fold min_arities ts + | (_, ts) => fold min_arities ts) + + fun minimize types t i = + let + fun find_min j [] _ = j + | find_min j (U :: Us) T = + if Typtab.defined types T then j + else find_min (j + 1) Us (U --> T) + + val (Ts, T) = Term.strip_type (Term.type_of t) + in find_min 0 (take i (rev Ts)) T end + + fun app u (t, T) = + (Const (@{const_name Old_SMT_patch.fun_app}, T --> T) $ t $ u, Term.range_type T) + + fun apply i t T ts = + let + val (ts1, ts2) = chop i ts + val (_, U) = Old_SMT_Utils.dest_funT i T + in fst (fold app ts2 (Term.list_comb (t, ts1), U)) end +in + +fun intro_explicit_application ctxt funcs ts = + let + val (arities, types) = fold min_arities ts (Termtab.empty, Typtab.empty) + val arities' = Termtab.map (minimize types) arities + + fun app_func t T ts = + if is_some (Termtab.lookup funcs t) then Term.list_comb (t, ts) + else apply (the (Termtab.lookup arities' t)) t T ts + + fun in_list T f t = HOLogic.mk_list T (map f (HOLogic.dest_list t)) + + fun traverse Ts t = + (case Term.strip_comb t of + (q as Const (@{const_name All}, _), [Abs (x, T, u)]) => + q $ Abs (x, T, in_trigger (T :: Ts) u) + | (q as Const (@{const_name Ex}, _), [Abs (x, T, u)]) => + q $ Abs (x, T, in_trigger (T :: Ts) u) + | (q as Const (@{const_name Let}, _), [u1, u2 as Abs _]) => + q $ traverse Ts u1 $ traverse Ts u2 + | (u as Const (c as (_, T)), ts) => + (case Old_SMT_Builtin.dest_builtin ctxt c ts of + SOME (_, k, us, mk) => + let + val (ts1, ts2) = chop k (map (traverse Ts) us) + val U = Term.strip_type T |>> snd o chop k |> (op --->) + in apply 0 (mk ts1) U ts2 end + | NONE => app_func u T (map (traverse Ts) ts)) + | (u as Free (_, T), ts) => app_func u T (map (traverse Ts) ts) + | (u as Bound i, ts) => apply 0 u (nth Ts i) (map (traverse Ts) ts) + | (Abs (n, T, u), ts) => traverses Ts (Abs (n, T, traverse (T::Ts) u)) ts + | (u, ts) => traverses Ts u ts) + and in_trigger Ts ((c as @{const Old_SMT_patch.trigger}) $ p $ t) = + c $ in_pats Ts p $ in_weight Ts t + | in_trigger Ts t = in_weight Ts t + and in_pats Ts ps = + in_list @{typ "Old_SMT_patch.pattern list"} + (in_list @{typ Old_SMT_patch.pattern} (in_pat Ts)) ps + and in_pat Ts ((p as Const (@{const_name Old_SMT_patch.pat}, _)) $ t) = + p $ traverse Ts t + | in_pat Ts ((p as Const (@{const_name Old_SMT_patch.nopat}, _)) $ t) = + p $ traverse Ts t + | in_pat _ t = raise TERM ("bad pattern", [t]) + and in_weight Ts ((c as @{const Old_SMT_patch.weight}) $ w $ t) = + c $ w $ traverse Ts t + | in_weight Ts t = traverse Ts t + and traverses Ts t ts = Term.list_comb (t, map (traverse Ts) ts) + in map (traverse []) ts end + +val fun_app_eq = mk_meta_eq @{thm Old_SMT_patch.fun_app_def} + +end + + +(** map HOL formulas to FOL formulas (i.e., separate formulas froms terms) **) + +local + val term_bool = @{lemma "Old_SMT_patch.term_true ~= Old_SMT_patch.term_false" + by (simp add: Old_SMT_patch.term_true_def Old_SMT_patch.term_false_def)} + + val is_quant = member (op =) [@{const_name All}, @{const_name Ex}] + + val fol_rules = [ + Let_def, + mk_meta_eq @{thm Old_SMT_patch.term_true_def}, + mk_meta_eq @{thm Old_SMT_patch.term_false_def}, + @{lemma "P = True == P" by (rule eq_reflection) simp}, + @{lemma "if P then True else False == P" by (rule eq_reflection) simp}] + + fun as_term t = @{const HOL.eq (bool)} $ t $ @{const Old_SMT_patch.term_true} + + exception BAD_PATTERN of unit + + fun wrap_in_if pat t = + if pat then + raise BAD_PATTERN () + else + @{const If (bool)} $ t $ @{const Old_SMT_patch.term_true} $ @{const Old_SMT_patch.term_false} + + fun is_builtin_conn_or_pred ctxt c ts = + is_some (Old_SMT_Builtin.dest_builtin_conn ctxt c ts) orelse + is_some (Old_SMT_Builtin.dest_builtin_pred ctxt c ts) + + fun builtin b ctxt c ts = + (case (Const c, ts) of + (@{const HOL.eq (bool)}, [t, u]) => + if t = @{const Old_SMT_patch.term_true} orelse u = @{const Old_SMT_patch.term_true} then + Old_SMT_Builtin.dest_builtin_eq ctxt t u + else b ctxt c ts + | _ => b ctxt c ts) +in + +fun folify ctxt = + let + fun in_list T f t = HOLogic.mk_list T (map_filter f (HOLogic.dest_list t)) + + fun in_term pat t = + (case Term.strip_comb t of + (@{const True}, []) => @{const Old_SMT_patch.term_true} + | (@{const False}, []) => @{const Old_SMT_patch.term_false} + | (u as Const (@{const_name If}, _), [t1, t2, t3]) => + if pat then raise BAD_PATTERN () + else u $ in_form t1 $ in_term pat t2 $ in_term pat t3 + | (Const (c as (n, _)), ts) => + if is_builtin_conn_or_pred ctxt c ts then wrap_in_if pat (in_form t) + else if is_quant n then wrap_in_if pat (in_form t) + else Term.list_comb (Const c, map (in_term pat) ts) + | (Free c, ts) => Term.list_comb (Free c, map (in_term pat) ts) + | _ => t) + + and in_weight ((c as @{const Old_SMT_patch.weight}) $ w $ t) = c $ w $ in_form t + | in_weight t = in_form t + + and in_pat ((p as Const (@{const_name Old_SMT_patch.pat}, _)) $ t) = + p $ in_term true t + | in_pat ((p as Const (@{const_name Old_SMT_patch.nopat}, _)) $ t) = + p $ in_term true t + | in_pat t = raise TERM ("bad pattern", [t]) + + and in_pats ps = + in_list @{typ "Old_SMT_patch.pattern list"} + (SOME o in_list @{typ Old_SMT_patch.pattern} (try in_pat)) ps + + and in_trigger ((c as @{const Old_SMT_patch.trigger}) $ p $ t) = + c $ in_pats p $ in_weight t + | in_trigger t = in_weight t + + and in_form t = + (case Term.strip_comb t of + (q as Const (qn, _), [Abs (n, T, u)]) => + if is_quant qn then q $ Abs (n, T, in_trigger u) + else as_term (in_term false t) + | (Const c, ts) => + (case Old_SMT_Builtin.dest_builtin_conn ctxt c ts of + SOME (_, _, us, mk) => mk (map in_form us) + | NONE => + (case Old_SMT_Builtin.dest_builtin_pred ctxt c ts of + SOME (_, _, us, mk) => mk (map (in_term false) us) + | NONE => as_term (in_term false t))) + | _ => as_term (in_term false t)) + in + map in_form #> + cons (Old_SMT_Utils.prop_of term_bool) #> + pair (fol_rules, [term_bool], builtin) + end + +end + + +(* translation into intermediate format *) + +(** utility functions **) + +val quantifier = (fn + @{const_name All} => SOME SForall + | @{const_name Ex} => SOME SExists + | _ => NONE) + +fun group_quant qname Ts (t as Const (q, _) $ Abs (_, T, u)) = + if q = qname then group_quant qname (T :: Ts) u else (Ts, t) + | group_quant _ Ts t = (Ts, t) + +fun dest_weight (@{const Old_SMT_patch.weight} $ w $ t) = + (SOME (snd (HOLogic.dest_number w)), t) + | dest_weight t = (NONE, t) + +fun dest_pat (Const (@{const_name Old_SMT_patch.pat}, _) $ t) = (t, true) + | dest_pat (Const (@{const_name Old_SMT_patch.nopat}, _) $ t) = (t, false) + | dest_pat t = raise TERM ("bad pattern", [t]) + +fun dest_pats [] = I + | dest_pats ts = + (case map dest_pat ts |> split_list ||> distinct (op =) of + (ps, [true]) => cons (SPat ps) + | (ps, [false]) => cons (SNoPat ps) + | _ => raise TERM ("bad multi-pattern", ts)) + +fun dest_trigger (@{const Old_SMT_patch.trigger} $ tl $ t) = + (rev (fold (dest_pats o HOLogic.dest_list) (HOLogic.dest_list tl) []), t) + | dest_trigger t = ([], t) + +fun dest_quant qn T t = quantifier qn |> Option.map (fn q => + let + val (Ts, u) = group_quant qn [T] t + val (ps, p) = dest_trigger u + val (w, b) = dest_weight p + in (q, rev Ts, ps, w, b) end) + +fun fold_map_pat f (SPat ts) = fold_map f ts #>> SPat + | fold_map_pat f (SNoPat ts) = fold_map f ts #>> SNoPat + + +(** translation from Isabelle terms into SMT intermediate terms **) + +fun intermediate header dtyps builtin ctxt ts trx = + let + fun transT (T as TFree _) = add_typ T true + | transT (T as TVar _) = (fn _ => raise TYPE ("bad SMT type", [T], [])) + | transT (T as Type _) = + (case Old_SMT_Builtin.dest_builtin_typ ctxt T of + SOME n => pair n + | NONE => add_typ T true) + + fun app n ts = SApp (n, ts) + + fun trans t = + (case Term.strip_comb t of + (Const (qn, _), [Abs (_, T, t1)]) => + (case dest_quant qn T t1 of + SOME (q, Ts, ps, w, b) => + fold_map transT Ts ##>> fold_map (fold_map_pat trans) ps ##>> + trans b #>> (fn ((Ts', ps'), b') => SQua (q, Ts', ps', w, b')) + | NONE => raise TERM ("unsupported quantifier", [t])) + | (Const (@{const_name Let}, _), [t1, Abs (_, T, t2)]) => + transT T ##>> trans t1 ##>> trans t2 #>> + (fn ((U, u1), u2) => SLet (U, u1, u2)) + | (u as Const (c as (_, T)), ts) => + (case builtin ctxt c ts of + SOME (n, _, us, _) => fold_map trans us #>> app n + | NONE => transs u T ts) + | (u as Free (_, T), ts) => transs u T ts + | (Bound i, []) => pair (SVar i) + | _ => raise TERM ("bad SMT term", [t])) + + and transs t T ts = + let val (Us, U) = Old_SMT_Utils.dest_funT (length ts) T + in + fold_map transT Us ##>> transT U #-> (fn Up => + add_fun t (SOME Up) ##>> fold_map trans ts #>> SApp) + end + + val (us, trx') = fold_map trans ts trx + in ((sign_of (header ts) dtyps trx', us), trx') end + + + +(* translation *) + +structure Configs = Generic_Data +( + type T = (Proof.context -> config) Old_SMT_Utils.dict + val empty = [] + val extend = I + fun merge data = Old_SMT_Utils.dict_merge fst data +) + +fun add_config (cs, cfg) = Configs.map (Old_SMT_Utils.dict_update (cs, cfg)) + +fun get_config ctxt = + let val cs = Old_SMT_Config.solver_class_of ctxt + in + (case Old_SMT_Utils.dict_get (Configs.get (Context.Proof ctxt)) cs of + SOME cfg => cfg ctxt + | NONE => error ("SMT: no translation configuration found " ^ + "for solver class " ^ quote (Old_SMT_Utils.string_of_class cs))) + end + +fun translate ctxt comments ithms = + let + val {prefixes, is_fol, header, has_datatypes, serialize} = get_config ctxt + + val with_datatypes = + has_datatypes andalso Config.get ctxt Old_SMT_Config.datatypes + + fun no_dtyps (tr_context, ctxt) ts = + ((Termtab.empty, [], tr_context, ctxt), ts) + + val ts1 = map (Envir.beta_eta_contract o Old_SMT_Utils.prop_of o snd) ithms + + val ((funcs, dtyps, tr_context, ctxt1), ts2) = + ((make_tr_context prefixes, ctxt), ts1) + |-> (if with_datatypes then collect_datatypes_and_records else no_dtyps) + + fun is_binder (Const (@{const_name Let}, _) $ _) = true + | is_binder t = Lambda_Lifting.is_quantifier t + + fun mk_trigger ((q as Const (@{const_name All}, _)) $ Abs (n, T, t)) = + q $ Abs (n, T, mk_trigger t) + | mk_trigger (eq as (Const (@{const_name HOL.eq}, T) $ lhs $ _)) = + Term.domain_type T --> @{typ Old_SMT_patch.pattern} + |> (fn T => Const (@{const_name Old_SMT_patch.pat}, T) $ lhs) + |> HOLogic.mk_list @{typ Old_SMT_patch.pattern} o single + |> HOLogic.mk_list @{typ "Old_SMT_patch.pattern list"} o single + |> (fn t => @{const Old_SMT_patch.trigger} $ t $ eq) + | mk_trigger t = t + + val (ctxt2, ts3) = + ts2 + |> eta_expand ctxt1 is_fol funcs + |> rpair ctxt1 + |-> Lambda_Lifting.lift_lambdas NONE is_binder + |-> (fn (ts', defs) => fn ctxt' => + map mk_trigger defs @ ts' + |> intro_explicit_application ctxt' funcs + |> pair ctxt') + + val ((rewrite_rules, extra_thms, builtin), ts4) = + (if is_fol then folify ctxt2 else pair ([], [], I)) ts3 + + val rewrite_rules' = fun_app_eq :: rewrite_rules + in + (ts4, tr_context) + |-> intermediate header dtyps (builtin Old_SMT_Builtin.dest_builtin) ctxt2 + |>> uncurry (serialize comments) + ||> recon_of ctxt2 rewrite_rules' extra_thms ithms + end + +end diff --git a/src/test-gen/src/main/smt_patch/old_z3_interface_patch.ML b/src/test-gen/src/main/smt_patch/old_z3_interface_patch.ML new file mode 100644 index 0000000..31b0837 --- /dev/null +++ b/src/test-gen/src/main/smt_patch/old_z3_interface_patch.ML @@ -0,0 +1,239 @@ +(* Title: HOL/Library/Old_SMT/old_z3_interface.ML + Author: Sascha Boehme, TU Muenchen + +Interface to Z3 based on a relaxed version of SMT-LIB. +*) + +signature OLD_Z3_INTERFACE = +sig + val smtlib_z3C: Old_SMT_Utils.class + val setup: theory -> theory + + datatype sym = Sym of string * sym list + type mk_builtins = { + mk_builtin_typ: sym -> typ option, + mk_builtin_num: theory -> int -> typ -> cterm option, + mk_builtin_fun: theory -> sym -> cterm list -> cterm option } + val add_mk_builtins: mk_builtins -> Context.generic -> Context.generic + val mk_builtin_typ: Proof.context -> sym -> typ option + val mk_builtin_num: Proof.context -> int -> typ -> cterm option + val mk_builtin_fun: Proof.context -> sym -> cterm list -> cterm option + + val is_builtin_theory_term: Proof.context -> term -> bool +end + +structure Old_Z3_Interface: OLD_Z3_INTERFACE = +struct + +val smtlib_z3C = Old_SMTLIB_Interface.smtlibC @ ["z3"] + + + +(* interface *) + +local + fun translate_config ctxt = + let + val {prefixes, header, is_fol, serialize, ...} = + Old_SMTLIB_Interface.translate_config ctxt + in + {prefixes=prefixes, header=header, is_fol=is_fol, serialize=serialize, + has_datatypes=true} + end + + fun is_div_mod @{const divide (int)} = true + | is_div_mod @{const modulo (int)} = true + | is_div_mod _ = false + + val div_by_z3div = @{lemma + "ALL k l. k div l = ( + if k = 0 | l = 0 then 0 + else if (0 < k & 0 < l) | (k < 0 & 0 < l) then z3div k l + else z3div (-k) (-l))" + by (simp add: Old_SMT_patch.z3div_def)} + + val mod_by_z3mod = @{lemma + "ALL k l. k mod l = ( + if l = 0 then k + else if k = 0 then 0 + else if (0 < k & 0 < l) | (k < 0 & 0 < l) then z3mod k l + else - z3mod (-k) (-l))" + by (simp add: z3mod_def)} + + val have_int_div_mod = + exists (Term.exists_subterm is_div_mod o Thm.prop_of) + + fun add_div_mod _ (thms, extra_thms) = + if have_int_div_mod thms orelse have_int_div_mod extra_thms then + (thms, div_by_z3div :: mod_by_z3mod :: extra_thms) + else (thms, extra_thms) + + val setup_builtins = + Old_SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const times (int)}, "*") #> + Old_SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const z3div}, "div") #> + Old_SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const z3mod}, "mod") +in + +val setup = Context.theory_map ( + setup_builtins #> + Old_SMT_Normalize.add_extra_norm (smtlib_z3C, add_div_mod) #> + Old_SMT_Translate.add_config (smtlib_z3C, translate_config)) + +end + + + +(* constructors *) + +datatype sym = Sym of string * sym list + + +(** additional constructors **) + +type mk_builtins = { + mk_builtin_typ: sym -> typ option, + mk_builtin_num: theory -> int -> typ -> cterm option, + mk_builtin_fun: theory -> sym -> cterm list -> cterm option } + +fun chained _ [] = NONE + | chained f (b :: bs) = (case f b of SOME y => SOME y | NONE => chained f bs) + +fun chained_mk_builtin_typ bs sym = + chained (fn {mk_builtin_typ=mk, ...} : mk_builtins => mk sym) bs + +fun chained_mk_builtin_num ctxt bs i T = + let val thy = Proof_Context.theory_of ctxt + in chained (fn {mk_builtin_num=mk, ...} : mk_builtins => mk thy i T) bs end + +fun chained_mk_builtin_fun ctxt bs s cts = + let val thy = Proof_Context.theory_of ctxt + in chained (fn {mk_builtin_fun=mk, ...} : mk_builtins => mk thy s cts) bs end + +fun fst_int_ord ((i1, _), (i2, _)) = int_ord (i1, i2) + +structure Mk_Builtins = Generic_Data +( + type T = (int * mk_builtins) list + val empty = [] + val extend = I + fun merge data = Ord_List.merge fst_int_ord data +) + +fun add_mk_builtins mk = + Mk_Builtins.map (Ord_List.insert fst_int_ord (serial (), mk)) + +fun get_mk_builtins ctxt = map snd (Mk_Builtins.get (Context.Proof ctxt)) + + +(** basic and additional constructors **) + +fun mk_builtin_typ _ (Sym ("Bool", _)) = SOME @{typ bool} + | mk_builtin_typ _ (Sym ("Int", _)) = SOME @{typ int} + | mk_builtin_typ _ (Sym ("bool", _)) = SOME @{typ bool} (*FIXME: legacy*) + | mk_builtin_typ _ (Sym ("int", _)) = SOME @{typ int} (*FIXME: legacy*) + | mk_builtin_typ ctxt sym = chained_mk_builtin_typ (get_mk_builtins ctxt) sym + +fun mk_builtin_num _ i @{typ int} = SOME (Numeral.mk_cnumber @{ctyp int} i) + | mk_builtin_num ctxt i T = + chained_mk_builtin_num ctxt (get_mk_builtins ctxt) i T + +val mk_true = Thm.cterm_of @{context} (@{const Not} $ @{const False}) +val mk_false = Thm.cterm_of @{context} @{const False} +val mk_not = Thm.apply (Thm.cterm_of @{context} @{const Not}) +val mk_implies = Thm.mk_binop (Thm.cterm_of @{context} @{const HOL.implies}) +val mk_iff = Thm.mk_binop (Thm.cterm_of @{context} @{const HOL.eq (bool)}) +val conj = Thm.cterm_of @{context} @{const HOL.conj} +val disj = Thm.cterm_of @{context} @{const HOL.disj} + +fun mk_nary _ cu [] = cu + | mk_nary ct _ cts = uncurry (fold_rev (Thm.mk_binop ct)) (split_last cts) + +val eq = Old_SMT_Utils.mk_const_pat @{theory} @{const_name HOL.eq} Old_SMT_Utils.destT1 +fun mk_eq ct cu = Thm.mk_binop (Old_SMT_Utils.instT' ct eq) ct cu + +val if_term = + Old_SMT_Utils.mk_const_pat @{theory} @{const_name If} + (Old_SMT_Utils.destT1 o Old_SMT_Utils.destT2) +fun mk_if cc ct cu = + Thm.mk_binop (Thm.apply (Old_SMT_Utils.instT' ct if_term) cc) ct cu + +val nil_term = + Old_SMT_Utils.mk_const_pat @{theory} @{const_name Nil} Old_SMT_Utils.destT1 +val cons_term = + Old_SMT_Utils.mk_const_pat @{theory} @{const_name Cons} Old_SMT_Utils.destT1 +fun mk_list cT cts = + fold_rev (Thm.mk_binop (Old_SMT_Utils.instT cT cons_term)) cts + (Old_SMT_Utils.instT cT nil_term) + +val distinct = Old_SMT_Utils.mk_const_pat @{theory} @{const_name distinct} + (Old_SMT_Utils.destT1 o Old_SMT_Utils.destT1) +fun mk_distinct [] = mk_true + | mk_distinct (cts as (ct :: _)) = + Thm.apply (Old_SMT_Utils.instT' ct distinct) + (mk_list (Thm.ctyp_of_cterm ct) cts) + +val access = + Old_SMT_Utils.mk_const_pat @{theory} @{const_name fun_app} Old_SMT_Utils.destT1 +fun mk_access array = Thm.apply (Old_SMT_Utils.instT' array access) array + +val update = Old_SMT_Utils.mk_const_pat @{theory} @{const_name fun_upd} + (Thm.dest_ctyp o Old_SMT_Utils.destT1) +fun mk_update array index value = + let val cTs = Thm.dest_ctyp (Thm.ctyp_of_cterm array) + in + Thm.apply (Thm.mk_binop (Old_SMT_Utils.instTs cTs update) array index) value + end + +val mk_uminus = Thm.apply (Thm.cterm_of @{context} @{const uminus (int)}) +val add = Thm.cterm_of @{context} @{const plus (int)} +val int0 = Numeral.mk_cnumber @{ctyp int} 0 +val mk_sub = Thm.mk_binop (Thm.cterm_of @{context} @{const minus (int)}) +val mk_mul = Thm.mk_binop (Thm.cterm_of @{context} @{const times (int)}) +val mk_div = Thm.mk_binop (Thm.cterm_of @{context} @{const z3div}) +val mk_mod = Thm.mk_binop (Thm.cterm_of @{context} @{const z3mod}) +val mk_lt = Thm.mk_binop (Thm.cterm_of @{context} @{const less (int)}) +val mk_le = Thm.mk_binop (Thm.cterm_of @{context} @{const less_eq (int)}) + +fun mk_builtin_fun ctxt sym cts = + (case (sym, cts) of + (Sym ("true", _), []) => SOME mk_true + | (Sym ("false", _), []) => SOME mk_false + | (Sym ("not", _), [ct]) => SOME (mk_not ct) + | (Sym ("and", _), _) => SOME (mk_nary conj mk_true cts) + | (Sym ("or", _), _) => SOME (mk_nary disj mk_false cts) + | (Sym ("implies", _), [ct, cu]) => SOME (mk_implies ct cu) + | (Sym ("iff", _), [ct, cu]) => SOME (mk_iff ct cu) + | (Sym ("~", _), [ct, cu]) => SOME (mk_iff ct cu) + | (Sym ("xor", _), [ct, cu]) => SOME (mk_not (mk_iff ct cu)) + | (Sym ("if", _), [ct1, ct2, ct3]) => SOME (mk_if ct1 ct2 ct3) + | (Sym ("ite", _), [ct1, ct2, ct3]) => SOME (mk_if ct1 ct2 ct3) (* FIXME: remove *) + | (Sym ("=", _), [ct, cu]) => SOME (mk_eq ct cu) + | (Sym ("distinct", _), _) => SOME (mk_distinct cts) + | (Sym ("select", _), [ca, ck]) => SOME (Thm.apply (mk_access ca) ck) + | (Sym ("store", _), [ca, ck, cv]) => SOME (mk_update ca ck cv) + | _ => + (case (sym, try (Thm.typ_of_cterm o hd) cts, cts) of + (Sym ("+", _), SOME @{typ int}, _) => SOME (mk_nary add int0 cts) + | (Sym ("-", _), SOME @{typ int}, [ct]) => SOME (mk_uminus ct) + | (Sym ("-", _), SOME @{typ int}, [ct, cu]) => SOME (mk_sub ct cu) + | (Sym ("*", _), SOME @{typ int}, [ct, cu]) => SOME (mk_mul ct cu) + | (Sym ("div", _), SOME @{typ int}, [ct, cu]) => SOME (mk_div ct cu) + | (Sym ("mod", _), SOME @{typ int}, [ct, cu]) => SOME (mk_mod ct cu) + | (Sym ("<", _), SOME @{typ int}, [ct, cu]) => SOME (mk_lt ct cu) + | (Sym ("<=", _), SOME @{typ int}, [ct, cu]) => SOME (mk_le ct cu) + | (Sym (">", _), SOME @{typ int}, [ct, cu]) => SOME (mk_lt cu ct) + | (Sym (">=", _), SOME @{typ int}, [ct, cu]) => SOME (mk_le cu ct) + | _ => chained_mk_builtin_fun ctxt (get_mk_builtins ctxt) sym cts)) + + + +(* abstraction *) + +fun is_builtin_theory_term ctxt t = + if Old_SMT_Builtin.is_builtin_num ctxt t then true + else + (case Term.strip_comb t of + (Const c, ts) => Old_SMT_Builtin.is_builtin_fun ctxt c ts + | _ => false) + +end diff --git a/src/test-gen/src/main/smt_patch/old_z3_model_patch.ML b/src/test-gen/src/main/smt_patch/old_z3_model_patch.ML new file mode 100644 index 0000000..8ea560c --- /dev/null +++ b/src/test-gen/src/main/smt_patch/old_z3_model_patch.ML @@ -0,0 +1,337 @@ +(* Title: HOL/Library/Old_SMT/old_z3_model.ML + Author: Sascha Boehme and Philipp Meyer, TU Muenchen + +Parser for counterexamples generated by Z3. +*) + +signature OLD_Z3_MODEL = +sig + val parse_counterex: Proof.context -> Old_SMT_Translate.recon -> string list -> + term list * term list +end + +structure Old_Z3_Model: OLD_Z3_MODEL = +struct + + +(* counterexample expressions *) + +datatype expr = True | False | Number of int * int option | Value of int | + Array of array | App of string * expr list +and array = Fresh of expr | Store of (array * expr) * expr + + +(* parsing *) + +val space = Scan.many Symbol.is_ascii_blank +fun spaced p = p --| space +fun in_parens p = spaced (Scan.$$ "(") |-- p --| spaced (Scan.$$ ")") +fun in_braces p = spaced (Scan.$$ "{") |-- p --| spaced (Scan.$$ "}") + +val digit = (fn + "0" => SOME 0 | "1" => SOME 1 | "2" => SOME 2 | "3" => SOME 3 | + "4" => SOME 4 | "5" => SOME 5 | "6" => SOME 6 | "7" => SOME 7 | + "8" => SOME 8 | "9" => SOME 9 | _ => NONE) + +val nat_num = spaced (Scan.repeat1 (Scan.some digit) >> + (fn ds => fold (fn d => fn i => i * 10 + d) ds 0)) +val int_num = spaced (Scan.optional ($$ "-" >> K (fn i => ~i)) I :|-- + (fn sign => nat_num >> sign)) + +val is_char = Symbol.is_ascii_letter orf Symbol.is_ascii_digit orf + member (op =) (raw_explode "_+*-/%~=<>$&|?!.@^#") +val name = spaced (Scan.many1 is_char >> implode) + +fun $$$ s = spaced (Scan.this_string s) + +fun array_expr st = st |> in_parens ( + $$$ "const" |-- expr >> Fresh || + $$$ "store" |-- array_expr -- expr -- expr >> Store) + +and expr st = st |> ( + $$$ "true" >> K True || + $$$ "false" >> K False || + int_num -- Scan.option ($$$ "/" |-- int_num) >> Number || + $$$ "val!" |-- nat_num >> Value || + name >> (App o rpair []) || + array_expr >> Array || + in_parens (name -- Scan.repeat1 expr) >> App) + +fun args st = ($$$ "->" >> K [] || expr ::: args) st +val args_case = args -- expr +val else_case = $$$ "else" -- $$$ "->" |-- expr >> pair ([] : expr list) + +val func = + let fun cases st = (else_case >> single || args_case ::: cases) st + in in_braces cases end + +val cex = space |-- + Scan.repeat (name --| $$$ "->" -- (func || expr >> (single o pair []))) + +fun resolve terms ((n, k), cases) = + (case Symtab.lookup terms n of + NONE => NONE + | SOME t => SOME ((t, k), cases)) + +fun annotate _ (_, []) = NONE + | annotate terms (n, [([], c)]) = resolve terms ((n, 0), (c, [])) + | annotate _ (_, [_]) = NONE + | annotate terms (n, cases as (args, _) :: _) = + let val (cases', (_, else_case)) = split_last cases + in resolve terms ((n, length args), (else_case, cases')) end + +fun read_cex terms ls = + maps (cons "\n" o raw_explode) ls + |> try (fst o Scan.finite Symbol.stopper cex) + |> the_default [] + |> map_filter (annotate terms) + + +(* translation into terms *) + +fun max_value vs = + let + fun max_val_expr (Value i) = Integer.max i + | max_val_expr (App (_, es)) = fold max_val_expr es + | max_val_expr (Array a) = max_val_array a + | max_val_expr _ = I + + and max_val_array (Fresh e) = max_val_expr e + | max_val_array (Store ((a, e1), e2)) = + max_val_array a #> max_val_expr e1 #> max_val_expr e2 + + fun max_val (_, (ec, cs)) = + max_val_expr ec #> fold (fn (es, e) => fold max_val_expr (e :: es)) cs + + in fold max_val vs ~1 end + +fun with_context terms f vs = fst (fold_map f vs (terms, max_value vs + 1)) + +fun get_term n T es (cx as (terms, next_val)) = + (case Symtab.lookup terms n of + SOME t => ((t, es), cx) + | NONE => + let val t = Var (("skolem", next_val), T) + in ((t, []), (Symtab.update (n, t) terms, next_val + 1)) end) + +fun trans_expr _ True = pair @{const True} + | trans_expr _ False = pair @{const False} + | trans_expr T (Number (i, NONE)) = pair (HOLogic.mk_number T i) + | trans_expr T (Number (i, SOME j)) = + pair (Const (@{const_name divide}, [T, T] ---> T) $ + HOLogic.mk_number T i $ HOLogic.mk_number T j) + | trans_expr T (Value i) = pair (Var (("value", i), T)) + | trans_expr T (Array a) = trans_array T a + | trans_expr T (App (n, es)) = get_term n T es #-> (fn (t, es') => + let val Ts = fst (Old_SMT_Utils.dest_funT (length es') (Term.fastype_of t)) + in + fold_map (uncurry trans_expr) (Ts ~~ es') #>> Term.list_comb o pair t + end) + +and trans_array T a = + let val (dT, rT) = Term.dest_funT T + in + (case a of + Fresh e => trans_expr rT e #>> (fn t => Abs ("x", dT, t)) + | Store ((a', e1), e2) => + trans_array T a' ##>> trans_expr dT e1 ##>> trans_expr rT e2 #>> + (fn ((m, k), v) => + Const (@{const_name fun_upd}, [T, dT, rT] ---> T) $ m $ k $ v)) + end + +fun trans_pattern T ([], e) = trans_expr T e #>> pair [] + | trans_pattern T (arg :: args, e) = + trans_expr (Term.domain_type T) arg ##>> + trans_pattern (Term.range_type T) (args, e) #>> + (fn (arg', (args', e')) => (arg' :: args', e')) + +fun mk_fun_upd T U = Const (@{const_name fun_upd}, [T --> U, T, U, T] ---> U) + +fun mk_update ([], u) _ = u + | mk_update ([t], u) f = + uncurry mk_fun_upd (Term.dest_funT (Term.fastype_of f)) $ f $ t $ u + | mk_update (t :: ts, u) f = + let + val (dT, rT) = Term.dest_funT (Term.fastype_of f) + val (dT', rT') = Term.dest_funT rT + in + mk_fun_upd dT rT $ f $ t $ + mk_update (ts, u) (absdummy dT' (Const ("_", rT'))) + end + +fun mk_lambda Ts (t, pats) = + fold_rev absdummy Ts t |> fold mk_update pats + +fun translate ((t, k), (e, cs)) = + let + val T = Term.fastype_of t + val (Us, U) = Old_SMT_Utils.dest_funT k (Term.fastype_of t) + + fun mk_full_def u' pats = + pats + |> filter_out (fn (_, u) => u aconv u') + |> HOLogic.mk_eq o pair t o mk_lambda Us o pair u' + + fun mk_eq (us, u) = HOLogic.mk_eq (Term.list_comb (t, us), u) + fun mk_eqs u' [] = [HOLogic.mk_eq (t, u')] + | mk_eqs _ pats = map mk_eq pats + in + trans_expr U e ##>> + (if k = 0 then pair [] else fold_map (trans_pattern T) cs) #>> + (fn (u', pats) => (mk_eqs u' pats, mk_full_def u' pats)) + end + + +(* normalization *) + +fun partition_eqs f = + let + fun part t (xs, ts) = + (case try HOLogic.dest_eq t of + SOME (l, r) => (case f l r of SOME x => (x::xs, ts) | _ => (xs, t::ts)) + | NONE => (xs, t :: ts)) + in (fn ts => fold part ts ([], [])) end + +fun first_eq pred = + let + fun part _ [] = NONE + | part us (t :: ts) = + (case try (pred o HOLogic.dest_eq) t of + SOME (SOME lr) => SOME (lr, fold cons us ts) + | _ => part (t :: us) ts) + in (fn ts => part [] ts) end + +fun replace_vars tab = + let + fun repl v = the_default v (AList.lookup (op aconv) tab v) + fun replace (v as Var _) = repl v + | replace (v as Free _) = repl v + | replace t = t + in map (Term.map_aterms replace) end + +fun remove_int_nat_coercions (eqs, defs) = + let + fun mk_nat_num t i = + (case try HOLogic.dest_number i of + SOME (_, n) => SOME (t, HOLogic.mk_number @{typ nat} n) + | NONE => NONE) + fun nat_of (@{const of_nat (int)} $ (t as Var _)) i = mk_nat_num t i + | nat_of (@{const nat} $ i) (t as Var _) = mk_nat_num t i + | nat_of _ _ = NONE + val (nats, eqs') = partition_eqs nat_of eqs + + fun is_coercion t = + (case try HOLogic.dest_eq t of + SOME (@{const of_nat (int)}, _) => true + | SOME (@{const nat}, _) => true + | _ => false) + in apply2 (replace_vars nats) (eqs', filter_out is_coercion defs) end + +fun unfold_funapp (eqs, defs) = + let + fun unfold_app (Const (@{const_name Old_SMT_patch.fun_app}, _) $ f $ t) = f $ t + | unfold_app t = t + fun unfold_eq ((eq as Const (@{const_name HOL.eq}, _)) $ t $ u) = + eq $ unfold_app t $ u + | unfold_eq t = t + + fun is_fun_app t = + (case try HOLogic.dest_eq t of + SOME (Const (@{const_name Old_SMT_patch.fun_app}, _), _) => true + | _ => false) + + in (map unfold_eq eqs, filter_out is_fun_app defs) end + +val unfold_eqs = + let + val is_ground = not o Term.exists_subterm Term.is_Var + fun is_non_rec (v, t) = not (Term.exists_subterm (equal v) t) + + fun rewr_var (l as Var _, r) = if is_ground r then SOME (l, r) else NONE + | rewr_var (r, l as Var _) = if is_ground r then SOME (l, r) else NONE + | rewr_var _ = NONE + + fun rewr_free' e = if is_non_rec e then SOME e else NONE + fun rewr_free (e as (Free _, _)) = rewr_free' e + | rewr_free (e as (_, Free _)) = rewr_free' (swap e) + | rewr_free _ = NONE + + fun is_trivial (Const (@{const_name HOL.eq}, _) $ t $ u) = t aconv u + | is_trivial _ = false + + fun replace r = replace_vars [r] #> filter_out is_trivial + + fun unfold_vars (es, ds) = + (case first_eq rewr_var es of + SOME (lr, es') => unfold_vars (apply2 (replace lr) (es', ds)) + | NONE => (es, ds)) + + fun unfold_frees ues (es, ds) = + (case first_eq rewr_free es of + SOME (lr, es') => + apply2 (replace lr) (es', ds) + |> unfold_frees (HOLogic.mk_eq lr :: replace lr ues) + | NONE => (ues @ es, ds)) + + in unfold_vars #> unfold_frees [] end + +fun swap_free ((eq as Const (@{const_name HOL.eq}, _)) $ t $ (u as Free _)) = + eq $ u $ t + | swap_free t = t + +fun frees_for_vars ctxt (eqs, defs) = + let + fun fresh_free i T (cx as (frees, ctxt)) = + (case Inttab.lookup frees i of + SOME t => (t, cx) + | NONE => + let + val (n, ctxt') = yield_singleton Variable.variant_fixes "" ctxt + val t = Free (n, T) + in (t, (Inttab.update (i, t) frees, ctxt')) end) + + fun repl_var (Var ((_, i), T)) = fresh_free i T + | repl_var (t $ u) = repl_var t ##>> repl_var u #>> op $ + | repl_var (Abs (n, T, t)) = repl_var t #>> (fn t' => Abs (n, T, t')) + | repl_var t = pair t + in + (Inttab.empty, ctxt) + |> fold_map repl_var eqs + ||>> fold_map repl_var defs + |> fst + end + + +(* overall procedure *) + +val is_free_constraint = Term.exists_subterm (fn Free _ => true | _ => false) + +fun is_free_def (Const (@{const_name HOL.eq}, _) $ Free _ $ _) = true + | is_free_def _ = false + +fun defined tp = + try (apply2 (fst o HOLogic.dest_eq)) tp + |> the_default false o Option.map (op aconv) + +fun add_free_defs free_cs defs = + let val (free_defs, defs') = List.partition is_free_def defs + in (free_cs @ filter_out (member defined free_cs) free_defs, defs') end + +fun is_const_def (Const (@{const_name HOL.eq}, _) $ Const _ $ _) = true + | is_const_def _ = false + +fun parse_counterex ctxt ({terms, ...} : Old_SMT_Translate.recon) ls = + read_cex terms ls + |> with_context terms translate + |> apfst flat o split_list + |> remove_int_nat_coercions + |> unfold_funapp + |> unfold_eqs + |>> map swap_free + |>> filter is_free_constraint + |-> add_free_defs + |> frees_for_vars ctxt + ||> filter is_const_def + +end + diff --git a/src/test-gen/src/main/smt_patch/old_z3_proof_parser_patch.ML b/src/test-gen/src/main/smt_patch/old_z3_proof_parser_patch.ML new file mode 100644 index 0000000..0cf3aa5 --- /dev/null +++ b/src/test-gen/src/main/smt_patch/old_z3_proof_parser_patch.ML @@ -0,0 +1,446 @@ +(* Title: HOL/Library/Old_SMT/old_z3_proof_parser.ML + Author: Sascha Boehme, TU Muenchen + +Parser for Z3 proofs. +*) + +signature OLD_Z3_PROOF_PARSER = +sig + (*proof rules*) + datatype rule = True_Axiom | Asserted | Goal | Modus_Ponens | Reflexivity | + Symmetry | Transitivity | Transitivity_Star | Monotonicity | Quant_Intro | + Distributivity | And_Elim | Not_Or_Elim | Rewrite | Rewrite_Star | + Pull_Quant | Pull_Quant_Star | Push_Quant | Elim_Unused_Vars | + Dest_Eq_Res | Quant_Inst | Hypothesis | Lemma | Unit_Resolution | + Iff_True | Iff_False | Commutativity | Def_Axiom | Intro_Def | Apply_Def | + Iff_Oeq | Nnf_Pos | Nnf_Neg | Nnf_Star | Cnf_Star | Skolemize | + Modus_Ponens_Oeq | Th_Lemma of string list + val string_of_rule: rule -> string + + (*proof parser*) + datatype proof_step = Proof_Step of { + rule: rule, + args: cterm list, + prems: int list, + prop: cterm } + val parse: Proof.context -> typ Symtab.table -> term Symtab.table -> + string list -> + (int * cterm) list * (int * proof_step) list * string list * Proof.context +end + +structure Old_Z3_Proof_Parser: OLD_Z3_PROOF_PARSER = +struct + + +(* proof rules *) + +datatype rule = True_Axiom | Asserted | Goal | Modus_Ponens | Reflexivity | + Symmetry | Transitivity | Transitivity_Star | Monotonicity | Quant_Intro | + Distributivity | And_Elim | Not_Or_Elim | Rewrite | Rewrite_Star | + Pull_Quant | Pull_Quant_Star | Push_Quant | Elim_Unused_Vars | Dest_Eq_Res | + Quant_Inst | Hypothesis | Lemma | Unit_Resolution | Iff_True | Iff_False | + Commutativity | Def_Axiom | Intro_Def | Apply_Def | Iff_Oeq | Nnf_Pos | + Nnf_Neg | Nnf_Star | Cnf_Star | Skolemize | Modus_Ponens_Oeq | + Th_Lemma of string list + +val rule_names = Symtab.make [ + ("true-axiom", True_Axiom), + ("asserted", Asserted), + ("goal", Goal), + ("mp", Modus_Ponens), + ("refl", Reflexivity), + ("symm", Symmetry), + ("trans", Transitivity), + ("trans*", Transitivity_Star), + ("monotonicity", Monotonicity), + ("quant-intro", Quant_Intro), + ("distributivity", Distributivity), + ("and-elim", And_Elim), + ("not-or-elim", Not_Or_Elim), + ("rewrite", Rewrite), + ("rewrite*", Rewrite_Star), + ("pull-quant", Pull_Quant), + ("pull-quant*", Pull_Quant_Star), + ("push-quant", Push_Quant), + ("elim-unused", Elim_Unused_Vars), + ("der", Dest_Eq_Res), + ("quant-inst", Quant_Inst), + ("hypothesis", Hypothesis), + ("lemma", Lemma), + ("unit-resolution", Unit_Resolution), + ("iff-true", Iff_True), + ("iff-false", Iff_False), + ("commutativity", Commutativity), + ("def-axiom", Def_Axiom), + ("intro-def", Intro_Def), + ("apply-def", Apply_Def), + ("iff~", Iff_Oeq), + ("nnf-pos", Nnf_Pos), + ("nnf-neg", Nnf_Neg), + ("nnf*", Nnf_Star), + ("cnf*", Cnf_Star), + ("sk", Skolemize), + ("mp~", Modus_Ponens_Oeq), + ("th-lemma", Th_Lemma [])] + +fun string_of_rule (Th_Lemma args) = space_implode " " ("th-lemma" :: args) + | string_of_rule r = + let fun eq_rule (s, r') = if r = r' then SOME s else NONE + in the (Symtab.get_first eq_rule rule_names) end + + + +(* certified terms and variables *) + +val (var_prefix, decl_prefix) = ("v", "sk") +(* + "decl_prefix" is for skolem constants (represented by free variables), + "var_prefix" is for pseudo-schematic variables (schematic with respect + to the Z3 proof, but represented by free variables). + + Both prefixes must be distinct to avoid name interferences. + More precisely, the naming of pseudo-schematic variables must be + context-independent modulo the current proof context to be able to + use fast inference kernel rules during proof reconstruction. +*) + +fun mk_inst ctxt vars = + let + val max = fold (Integer.max o fst) vars 0 + val ns = fst (Variable.variant_fixes (replicate (max + 1) var_prefix) ctxt) + fun mk (i, v) = + (dest_Var (Thm.term_of v), Thm.cterm_of ctxt (Free (nth ns i, Thm.typ_of_cterm v))) + in map mk vars end + +fun close ctxt (ct, vars) = + let + val inst = mk_inst ctxt vars + val names = fold (Term.add_free_names o Thm.term_of o snd) inst [] + in (Thm.instantiate_cterm ([], inst) ct, names) end + + +fun mk_bound ctxt (i, T) = + let val ct = Thm.cterm_of ctxt (Var ((Name.uu, 0), T)) + in (ct, [(i, ct)]) end + +local + fun mk_quant1 ctxt q T (ct, vars) = + let + val cv = + (case AList.lookup (op =) vars 0 of + SOME cv => cv + | _ => Thm.cterm_of ctxt (Var ((Name.uu, Thm.maxidx_of_cterm ct + 1), T))) + fun dec (i, v) = if i = 0 then NONE else SOME (i-1, v) + val vars' = map_filter dec vars + in (Thm.apply (Old_SMT_Utils.instT' cv q) (Thm.lambda cv ct), vars') end + + fun quant name = + Old_SMT_Utils.mk_const_pat @{theory} name (Old_SMT_Utils.destT1 o Old_SMT_Utils.destT1) + val forall = quant @{const_name All} + val exists = quant @{const_name Ex} +in + +fun mk_quant is_forall ctxt = + fold_rev (mk_quant1 ctxt (if is_forall then forall else exists)) + +end + +local + fun prep (ct, vars) (maxidx, all_vars) = + let + val maxidx' = maxidx + Thm.maxidx_of_cterm ct + 1 + + fun part (i, cv) = + (case AList.lookup (op =) all_vars i of + SOME cu => apfst (if cu aconvc cv then I else cons (cv, cu)) + | NONE => + let val cv' = Thm.incr_indexes_cterm maxidx cv + in apfst (cons (cv, cv')) #> apsnd (cons (i, cv')) end) + + val (inst, vars') = + if null vars then ([], vars) + else fold part vars ([], []) + + in + (Thm.instantiate_cterm ([], map (apfst (dest_Var o Thm.term_of)) inst) ct, + (maxidx', vars' @ all_vars)) + end +in +fun mk_fun f ts = + let val (cts, (_, vars)) = fold_map prep ts (0, []) + in f cts |> Option.map (rpair vars) end +end + + + +(* proof parser *) + +datatype proof_step = Proof_Step of { + rule: rule, + args: cterm list, + prems: int list, + prop: cterm } + + +(** parser context **) + +val not_false = Thm.cterm_of @{context} (@{const Not} $ @{const False}) + +fun make_context ctxt typs terms = + let + val ctxt' = + ctxt + |> Symtab.fold (Variable.declare_typ o snd) typs + |> Symtab.fold (Variable.declare_term o snd) terms + + fun cert @{const True} = not_false + | cert t = Thm.cterm_of ctxt' t + + in (typs, Symtab.map (K cert) terms, Inttab.empty, [], [], ctxt') end + +fun fresh_name n (typs, terms, exprs, steps, vars, ctxt) = + let val (n', ctxt') = yield_singleton Variable.variant_fixes n ctxt + in (n', (typs, terms, exprs, steps, vars, ctxt')) end + +fun context_of (_, _, _, _, _, ctxt) = ctxt + +fun add_decl (n, T) (cx as (_, terms, _, _, _, _)) = + (case Symtab.lookup terms n of + SOME _ => cx + | NONE => cx |> fresh_name (decl_prefix ^ n) + |> (fn (m, (typs, terms, exprs, steps, vars, ctxt)) => + let + val upd = Symtab.update (n, Thm.cterm_of ctxt (Free (m, T))) + in (typs, upd terms, exprs, steps, vars, ctxt) end)) + +fun mk_typ (typs, _, _, _, _, ctxt) (s as Old_Z3_Interface.Sym (n, _)) = + (case Old_Z3_Interface.mk_builtin_typ ctxt s of + SOME T => SOME T + | NONE => Symtab.lookup typs n) + +fun mk_num (_, _, _, _, _, ctxt) (i, T) = + mk_fun (K (Old_Z3_Interface.mk_builtin_num ctxt i T)) [] + +fun mk_app (_, terms, _, _, _, ctxt) (s as Old_Z3_Interface.Sym (n, _), es) = + mk_fun (fn cts => + (case Old_Z3_Interface.mk_builtin_fun ctxt s cts of + SOME ct => SOME ct + | NONE => + Symtab.lookup terms n |> Option.map (Drule.list_comb o rpair cts))) es + +fun add_expr k t (typs, terms, exprs, steps, vars, ctxt) = + (typs, terms, Inttab.update (k, t) exprs, steps, vars, ctxt) + +fun lookup_expr (_, _, exprs, _, _, _) = Inttab.lookup exprs + +fun add_proof_step k ((r, args), prop) cx = + let + val (typs, terms, exprs, steps, vars, ctxt) = cx + val (ct, vs) = close ctxt prop + fun part (SOME e, _) (cts, ps) = (close ctxt e :: cts, ps) + | part (NONE, i) (cts, ps) = (cts, i :: ps) + val (args', prems) = fold (part o `(lookup_expr cx)) args ([], []) + val (cts, vss) = split_list args' + val step = Proof_Step {rule=r, args=rev cts, prems=rev prems, + prop = Old_SMT_Utils.mk_cprop ct} + val vars' = fold (union (op =)) (vs :: vss) vars + in (typs, terms, exprs, (k, step) :: steps, vars', ctxt) end + +fun finish (_, _, _, steps, vars, ctxt) = + let + fun coll (p as (k, Proof_Step {prems, rule, prop, ...})) (ars, ps, ids) = + (case rule of + Asserted => ((k, prop) :: ars, ps, ids) + | Goal => ((k, prop) :: ars, ps, ids) + | _ => + if Inttab.defined ids k then + (ars, p :: ps, fold (Inttab.update o rpair ()) prems ids) + else (ars, ps, ids)) + + val (ars, steps', _) = fold coll steps ([], [], Inttab.make [(~1, ())]) + in (ars, steps', vars, ctxt) end + + +(** core parser **) + +fun parse_exn line_no msg = raise Old_SMT_patch_Failure.SMT (Old_SMT_patch_Failure.Other_Failure + ("Z3 proof parser (line " ^ string_of_int line_no ^ "): " ^ msg)) + +fun scan_exn msg ((line_no, _), _) = parse_exn line_no msg + +fun with_info f cx = + (case f ((NONE, 1), cx) of + ((SOME _, _), cx') => cx' + | ((_, line_no), _) => parse_exn line_no "bad proof") + +fun parse_line _ _ (st as ((SOME _, _), _)) = st + | parse_line scan line ((_, line_no), cx) = + let val st = ((line_no, cx), raw_explode line) + in + (case Scan.catch (Scan.finite' Symbol.stopper (Scan.option scan)) st of + (SOME r, ((_, cx'), _)) => ((r, line_no+1), cx') + | (NONE, _) => parse_exn line_no ("bad proof line: " ^ quote line)) + end + +fun with_context f x ((line_no, cx), st) = + let val (y, cx') = f x cx + in (y, ((line_no, cx'), st)) end + + +fun lookup_context f x (st as ((_, cx), _)) = (f cx x, st) + + +(** parser combinators and parsers for basic entities **) + +fun $$ s = Scan.lift (Scan.$$ s) +fun this s = Scan.lift (Scan.this_string s) +val is_blank = Symbol.is_ascii_blank +fun blank st = Scan.lift (Scan.many1 is_blank) st +fun sep scan = blank |-- scan +fun seps scan = Scan.repeat (sep scan) +fun seps1 scan = Scan.repeat1 (sep scan) +fun seps_by scan_sep scan = scan ::: Scan.repeat (scan_sep |-- scan) + +val lpar = "(" and rpar = ")" +val lbra = "[" and rbra = "]" +fun par scan = $$ lpar |-- scan --| $$ rpar +fun bra scan = $$ lbra |-- scan --| $$ rbra + +val digit = (fn + "0" => SOME 0 | "1" => SOME 1 | "2" => SOME 2 | "3" => SOME 3 | + "4" => SOME 4 | "5" => SOME 5 | "6" => SOME 6 | "7" => SOME 7 | + "8" => SOME 8 | "9" => SOME 9 | _ => NONE) + +fun digits st = (Scan.lift (Scan.many1 Symbol.is_ascii_digit) >> implode) st + +fun nat_num st = (Scan.lift (Scan.repeat1 (Scan.some digit)) >> (fn ds => + fold (fn d => fn i => i * 10 + d) ds 0)) st + +fun int_num st = (Scan.optional ($$ "-" >> K (fn i => ~i)) I :|-- + (fn sign => nat_num >> sign)) st + +val is_char = Symbol.is_ascii_letter orf Symbol.is_ascii_digit orf + member (op =) (raw_explode "_+*-/%~=<>$&|?!.@^#") + +fun name st = (Scan.lift (Scan.many1 is_char) >> implode) st + +fun sym st = (name -- + Scan.optional (bra (seps_by ($$ ":") sym)) [] >> Old_Z3_Interface.Sym) st + +fun id st = ($$ "#" |-- nat_num) st + + +(** parsers for various parts of Z3 proofs **) + +fun sort st = Scan.first [ + this "array" |-- bra (sort --| $$ ":" -- sort) >> (op -->), + par (this "->" |-- seps1 sort) >> ((op --->) o split_last), + sym :|-- (fn s as Old_Z3_Interface.Sym (n, _) => lookup_context mk_typ s :|-- (fn + SOME T => Scan.succeed T + | NONE => scan_exn ("unknown sort: " ^ quote n)))] st + +fun bound st = (par (this ":var" |-- sep nat_num -- sep sort) :|-- + lookup_context (mk_bound o context_of)) st + +fun numb (n as (i, _)) = lookup_context mk_num n :|-- (fn + SOME n' => Scan.succeed n' + | NONE => scan_exn ("unknown number: " ^ quote (string_of_int i))) + +fun appl (app as (Old_Z3_Interface.Sym (n, _), _)) = + lookup_context mk_app app :|-- (fn + SOME app' => Scan.succeed app' + | NONE => scan_exn ("unknown function symbol: " ^ quote n)) + +fun bv_size st = (digits >> (fn sz => + Old_Z3_Interface.Sym ("bv", [Old_Z3_Interface.Sym (sz, [])]))) st + +fun bv_number_sort st = (bv_size :|-- lookup_context mk_typ :|-- (fn + SOME cT => Scan.succeed cT + | NONE => scan_exn ("unknown sort: " ^ quote "bv"))) st + +fun bv_number st = + (this "bv" |-- bra (nat_num --| $$ ":" -- bv_number_sort) :|-- numb) st + +fun frac_number st = ( + int_num --| $$ "/" -- int_num --| this "::" -- sort :|-- (fn ((i, j), T) => + numb (i, T) -- numb (j, T) :|-- (fn (n, m) => + appl (Old_Z3_Interface.Sym ("/", []), [n, m])))) st + +fun plain_number st = (int_num --| this "::" -- sort :|-- numb) st + +fun number st = Scan.first [bv_number, frac_number, plain_number] st + +fun constant st = ((sym >> rpair []) :|-- appl) st + +fun expr_id st = (id :|-- (fn i => lookup_context lookup_expr i :|-- (fn + SOME e => Scan.succeed e + | NONE => scan_exn ("unknown term id: " ^ quote (string_of_int i))))) st + +fun arg st = Scan.first [expr_id, number, constant] st + +fun application st = par ((sym -- Scan.repeat1 (sep arg)) :|-- appl) st + +fun variables st = par (this "vars" |-- seps1 (par (name |-- sep sort))) st + +fun pats st = seps (par ((this ":pat" || this ":nopat") |-- seps1 id)) st + +val ctrue = Thm.cterm_of @{context} @{const True} + +fun pattern st = par (this "pattern" |-- Scan.repeat1 (sep arg) >> + (the o mk_fun (K (SOME ctrue)))) st + +fun quant_kind st = st |> ( + this "forall" >> K (mk_quant true o context_of) || + this "exists" >> K (mk_quant false o context_of)) + +fun quantifier st = + (par (quant_kind -- sep variables --| pats -- sep arg) :|-- + lookup_context (fn cx => fn ((mk_q, Ts), body) => mk_q cx Ts body)) st + +fun expr k = + Scan.first [bound, quantifier, pattern, application, number, constant] :|-- + with_context (pair NONE oo add_expr k) + +val rule_arg = id + (* if this is modified, then 'th_lemma_arg' needs reviewing *) + +fun th_lemma_arg st = Scan.unless (sep rule_arg >> K "" || $$ rbra) (sep name) st + +fun rule_name st = ((name >> `(Symtab.lookup rule_names)) :|-- (fn + (SOME (Th_Lemma _), _) => Scan.repeat th_lemma_arg >> Th_Lemma + | (SOME r, _) => Scan.succeed r + | (NONE, n) => scan_exn ("unknown proof rule: " ^ quote n))) st + +fun rule f k = + bra (rule_name -- seps id) --| $$ ":" -- sep arg #-> + with_context (pair (f k) oo add_proof_step k) + +fun decl st = (this "decl" |-- sep name --| sep (this "::") -- sep sort :|-- + with_context (pair NONE oo add_decl)) st + +fun def st = (id --| sep (this ":=")) st + +fun node st = st |> ( + decl || + def :|-- (fn k => sep (expr k) || sep (rule (K NONE) k)) || + rule SOME ~1) + + +(** overall parser **) + +(* + Currently, terms are parsed bottom-up (i.e., along with parsing the proof + text line by line), but proofs are reconstructed top-down (i.e. by an + in-order top-down traversal of the proof tree/graph). The latter approach + was taken because some proof texts comprise irrelevant proof steps which + will thus not be reconstructed. This approach might also be beneficial + for constructing terms, but it would also increase the complexity of the + (otherwise rather modular) code. +*) + +fun parse ctxt typs terms proof_text = + make_context ctxt typs terms + |> with_info (fold (parse_line node) proof_text) + |> finish + +end diff --git a/src/test-gen/src/main/smt_patch/old_z3_proof_reconstruction_patch.ML b/src/test-gen/src/main/smt_patch/old_z3_proof_reconstruction_patch.ML new file mode 100644 index 0000000..4f9a81f --- /dev/null +++ b/src/test-gen/src/main/smt_patch/old_z3_proof_reconstruction_patch.ML @@ -0,0 +1,891 @@ +(* Title: HOL/Library/Old_SMT/old_z3_proof_reconstruction.ML + Author: Sascha Boehme, TU Muenchen + +Proof reconstruction for proofs found by Z3. +*) + +signature OLD_Z3_PROOF_RECONSTRUCTION = +sig + val add_z3_rule: thm -> Context.generic -> Context.generic + val reconstruct: Proof.context -> Old_SMT_Translate.recon -> string list -> int list * thm +end + +structure Old_Z3_Proof_Reconstruction: OLD_Z3_PROOF_RECONSTRUCTION = +struct + + +fun z3_exn msg = raise Old_SMT_patch_Failure.SMT (Old_SMT_patch_Failure.Other_Failure + ("Z3 proof reconstruction: " ^ msg)) + + + +(* net of schematic rules *) + +local + val description = "declaration of Z3 proof rules" + + val eq = Thm.eq_thm + + structure Old_Z3_Rules = Generic_Data + ( + type T = thm Net.net + val empty = Net.empty + val extend = I + val merge = Net.merge eq + ) + + fun prep context = + `Thm.prop_of o rewrite_rule (Context.proof_of context) [Old_Z3_Proof_Literals.rewrite_true] + + fun ins thm context = + context |> Old_Z3_Rules.map (fn net => Net.insert_term eq (prep context thm) net handle Net.INSERT => net) + fun rem thm context = + context |> Old_Z3_Rules.map (fn net => Net.delete_term eq (prep context thm) net handle Net.DELETE => net) + + val add = Thm.declaration_attribute ins + val del = Thm.declaration_attribute rem +in + +val add_z3_rule = ins + +fun by_schematic_rule ctxt ct = + the (Old_Z3_Proof_Tools.net_instance (Old_Z3_Rules.get (Context.Proof ctxt)) ct) + +val _ = Theory.setup + (Attrib.setup @{binding old_z3_rule} (Attrib.add_del add del) description #> + Global_Theory.add_thms_dynamic (@{binding old_z3_rule}, Net.content o Old_Z3_Rules.get)) + +end + + + +(* proof tools *) + +fun named ctxt name prover ct = + let val _ = Old_SMT_Config.trace_msg ctxt I ("Z3: trying " ^ name ^ " ...") + in prover ct end + +fun NAMED ctxt name tac i st = + let val _ = Old_SMT_Config.trace_msg ctxt I ("Z3: trying " ^ name ^ " ...") + in tac i st end + +fun pretty_goal ctxt thms t = + [Pretty.block [Pretty.str "proposition: ", Syntax.pretty_term ctxt t]] + |> not (null thms) ? cons (Pretty.big_list "assumptions:" + (map (Thm.pretty_thm ctxt) thms)) + +fun try_apply ctxt thms = + let + fun try_apply_err ct = Pretty.string_of (Pretty.chunks [ + Pretty.big_list ("Z3 found a proof," ^ + " but proof reconstruction failed at the following subgoal:") + (pretty_goal ctxt thms (Thm.term_of ct)), + Pretty.str ("Declaring a rule as [old_z3_rule] might solve this problem.")]) + + fun apply [] ct = error (try_apply_err ct) + | apply (prover :: provers) ct = + (case try prover ct of + SOME thm => (Old_SMT_Config.trace_msg ctxt I "Z3: succeeded"; thm) + | NONE => apply provers ct) + + fun schematic_label full = "schematic rules" |> full ? suffix " (full)" + fun schematic ctxt full ct = + ct + |> full ? fold_rev (curry Drule.mk_implies o Thm.cprop_of) thms + |> named ctxt (schematic_label full) (by_schematic_rule ctxt) + |> fold Thm.elim_implies thms + + in apply o cons (schematic ctxt false) o cons (schematic ctxt true) end + +local + val rewr_if = + @{lemma "(if P then Q1 else Q2) = ((P --> Q1) & (~P --> Q2))" by simp} +in + +fun HOL_fast_tac ctxt = + Classical.fast_tac (put_claset HOL_cs ctxt) + +fun simp_fast_tac ctxt = + Simplifier.simp_tac (put_simpset HOL_ss ctxt addsimps [rewr_if]) + THEN_ALL_NEW HOL_fast_tac ctxt + +end + + + +(* theorems and proofs *) + +(** theorem incarnations **) + +datatype theorem = + Thm of thm | (* theorem without special features *) + MetaEq of thm | (* meta equality "t == s" *) + Literals of thm * Old_Z3_Proof_Literals.littab + (* "P1 & ... & Pn" and table of all literals P1, ..., Pn *) + +fun thm_of (Thm thm) = thm + | thm_of (MetaEq thm) = thm COMP @{thm meta_eq_to_obj_eq} + | thm_of (Literals (thm, _)) = thm + +fun meta_eq_of (MetaEq thm) = thm + | meta_eq_of p = mk_meta_eq (thm_of p) + +fun literals_of (Literals (_, lits)) = lits + | literals_of p = Old_Z3_Proof_Literals.make_littab [thm_of p] + + + +(** core proof rules **) + +(* assumption *) + +local + val remove_trigger = mk_meta_eq @{thm trigger_def} + val remove_weight = mk_meta_eq @{thm weight_def} + val remove_fun_app = mk_meta_eq @{thm fun_app_def} + + fun rewrite_conv _ [] = Conv.all_conv + | rewrite_conv ctxt eqs = Simplifier.full_rewrite (empty_simpset ctxt addsimps eqs) + + val prep_rules = [@{thm Let_def}, remove_trigger, remove_weight, + remove_fun_app, Old_Z3_Proof_Literals.rewrite_true] + + fun rewrite _ [] = I + | rewrite ctxt eqs = Conv.fconv_rule (rewrite_conv ctxt eqs) + + fun lookup_assm assms_net ct = + Old_Z3_Proof_Tools.net_instances assms_net ct + |> map (fn ithm as (_, thm) => (ithm, Thm.cprop_of thm aconvc ct)) +in + +fun add_asserted outer_ctxt rewrite_rules assms asserted ctxt = + let + val eqs = map (rewrite ctxt [Old_Z3_Proof_Literals.rewrite_true]) rewrite_rules + val eqs' = union Thm.eq_thm eqs prep_rules + + val assms_net = + assms + |> map (apsnd (rewrite ctxt eqs')) + |> map (apsnd (Conv.fconv_rule Thm.eta_conversion)) + |> Old_Z3_Proof_Tools.thm_net_of snd + + fun revert_conv ctxt = rewrite_conv ctxt eqs' then_conv Thm.eta_conversion + + fun assume thm ctxt = + let + val ct = Thm.cprem_of thm 1 + val (thm', ctxt') = yield_singleton Assumption.add_assumes ct ctxt + in (Thm.implies_elim thm thm', ctxt') end + + fun add1 idx thm1 ((i, th), exact) ((is, thms), (ctxt, ptab)) = + let + val (thm, ctxt') = + if exact then (Thm.implies_elim thm1 th, ctxt) + else assume thm1 ctxt + val thms' = if exact then thms else th :: thms + in + ((insert (op =) i is, thms'), + (ctxt', Inttab.update (idx, Thm thm) ptab)) + end + + fun add (idx, ct) (cx as ((is, thms), (ctxt, ptab))) = + let + val thm1 = + Thm.trivial ct + |> Conv.fconv_rule (Conv.arg1_conv (revert_conv outer_ctxt)) + val thm2 = singleton (Variable.export ctxt outer_ctxt) thm1 + in + (case lookup_assm assms_net (Thm.cprem_of thm2 1) of + [] => + let val (thm, ctxt') = assume thm1 ctxt + in ((is, thms), (ctxt', Inttab.update (idx, Thm thm) ptab)) end + | ithms => fold (add1 idx thm1) ithms cx) + end + in fold add asserted (([], []), (ctxt, Inttab.empty)) end + +end + + +(* P = Q ==> P ==> Q or P --> Q ==> P ==> Q *) +local + val precomp = Old_Z3_Proof_Tools.precompose2 + val comp = Old_Z3_Proof_Tools.compose + + val meta_iffD1 = @{lemma "P == Q ==> P ==> (Q::bool)" by simp} + val meta_iffD1_c = precomp Thm.dest_binop meta_iffD1 + + val iffD1_c = precomp (Thm.dest_binop o Thm.dest_arg) @{thm iffD1} + val mp_c = precomp (Thm.dest_binop o Thm.dest_arg) @{thm mp} +in +fun mp (MetaEq thm) p = Thm (Thm.implies_elim (comp meta_iffD1_c thm) p) + | mp p_q p = + let + val pq = thm_of p_q + val thm = comp iffD1_c pq handle THM _ => comp mp_c pq + in Thm (Thm.implies_elim thm p) end +end + + +(* and_elim: P1 & ... & Pn ==> Pi *) +(* not_or_elim: ~(P1 | ... | Pn) ==> ~Pi *) +local + fun is_sublit conj t = Old_Z3_Proof_Literals.exists_lit conj (fn u => u aconv t) + + fun derive conj t lits idx ptab = + let + val lit = the (Old_Z3_Proof_Literals.get_first_lit (is_sublit conj t) lits) + val ls = Old_Z3_Proof_Literals.explode conj false false [t] lit + val lits' = fold Old_Z3_Proof_Literals.insert_lit ls + (Old_Z3_Proof_Literals.delete_lit lit lits) + + fun upd thm = Literals (thm_of thm, lits') + val ptab' = Inttab.map_entry idx upd ptab + in (the (Old_Z3_Proof_Literals.lookup_lit lits' t), ptab') end + + fun lit_elim conj (p, idx) ct ptab = + let val lits = literals_of p + in + (case Old_Z3_Proof_Literals.lookup_lit lits (Old_SMT_Utils.term_of ct) of + SOME lit => (Thm lit, ptab) + | NONE => apfst Thm (derive conj (Old_SMT_Utils.term_of ct) lits idx ptab)) + end +in +val and_elim = lit_elim true +val not_or_elim = lit_elim false +end + + +(* P1, ..., Pn |- False ==> |- ~P1 | ... | ~Pn *) +local + fun step lit thm = + Thm.implies_elim (Thm.implies_intr (Thm.cprop_of lit) thm) lit + val explode_disj = Old_Z3_Proof_Literals.explode false false false + fun intro hyps thm th = fold step (explode_disj hyps th) thm + + fun dest_ccontr ct = [Thm.dest_arg (Thm.dest_arg (Thm.dest_arg1 ct))] + val ccontr = Old_Z3_Proof_Tools.precompose dest_ccontr @{thm ccontr} +in +fun lemma thm ct = + let + val cu = Old_Z3_Proof_Literals.negate (Thm.dest_arg ct) + val hyps = map_filter (try HOLogic.dest_Trueprop) (Thm.hyps_of thm) + val th = Old_Z3_Proof_Tools.under_assumption (intro hyps thm) cu + in Thm (Old_Z3_Proof_Tools.compose ccontr th) end +end + + +(* \/{P1, ..., Pn, Q1, ..., Qn}, ~P1, ..., ~Pn ==> \/{Q1, ..., Qn} *) +local + val explode_disj = Old_Z3_Proof_Literals.explode false true false + val join_disj = Old_Z3_Proof_Literals.join false + fun unit thm thms th = + let + val t = @{const Not} $ Old_SMT_Utils.prop_of thm + val ts = map Old_SMT_Utils.prop_of thms + in + join_disj (Old_Z3_Proof_Literals.make_littab (thms @ explode_disj ts th)) t + end + + fun dest_arg2 ct = Thm.dest_arg (Thm.dest_arg ct) + fun dest ct = apply2 dest_arg2 (Thm.dest_binop ct) + val contrapos = + Old_Z3_Proof_Tools.precompose2 dest @{lemma "(~P ==> ~Q) ==> Q ==> P" by fast} +in +fun unit_resolution thm thms ct = + Old_Z3_Proof_Literals.negate (Thm.dest_arg ct) + |> Old_Z3_Proof_Tools.under_assumption (unit thm thms) + |> Thm o Old_Z3_Proof_Tools.discharge thm o Old_Z3_Proof_Tools.compose contrapos +end + + +(* P ==> P == True or P ==> P == False *) +local + val iff1 = @{lemma "P ==> P == (~ False)" by simp} + val iff2 = @{lemma "~P ==> P == False" by simp} +in +fun iff_true thm = MetaEq (thm COMP iff1) +fun iff_false thm = MetaEq (thm COMP iff2) +end + + +(* distributivity of | over & *) +fun distributivity ctxt = Thm o try_apply ctxt [] [ + named ctxt "fast" (Old_Z3_Proof_Tools.by_tac ctxt (HOL_fast_tac ctxt))] + (* FIXME: not very well tested *) + + +(* Tseitin-like axioms *) +local + val disjI1 = @{lemma "(P ==> Q) ==> ~P | Q" by fast} + val disjI2 = @{lemma "(~P ==> Q) ==> P | Q" by fast} + val disjI3 = @{lemma "(~Q ==> P) ==> P | Q" by fast} + val disjI4 = @{lemma "(Q ==> P) ==> P | ~Q" by fast} + + fun prove' conj1 conj2 ct2 thm = + let + val littab = + Old_Z3_Proof_Literals.explode conj1 true (conj1 <> conj2) [] thm + |> cons Old_Z3_Proof_Literals.true_thm + |> Old_Z3_Proof_Literals.make_littab + in Old_Z3_Proof_Literals.join conj2 littab (Thm.term_of ct2) end + + fun prove rule (ct1, conj1) (ct2, conj2) = + Old_Z3_Proof_Tools.under_assumption (prove' conj1 conj2 ct2) ct1 COMP rule + + fun prove_def_axiom ct = + let val (ct1, ct2) = Thm.dest_binop (Thm.dest_arg ct) + in + (case Thm.term_of ct1 of + @{const Not} $ (@{const HOL.conj} $ _ $ _) => + prove disjI1 (Thm.dest_arg ct1, true) (ct2, true) + | @{const HOL.conj} $ _ $ _ => + prove disjI3 (Old_Z3_Proof_Literals.negate ct2, false) (ct1, true) + | @{const Not} $ (@{const HOL.disj} $ _ $ _) => + prove disjI3 (Old_Z3_Proof_Literals.negate ct2, false) (ct1, false) + | @{const HOL.disj} $ _ $ _ => + prove disjI2 (Old_Z3_Proof_Literals.negate ct1, false) (ct2, true) + | Const (@{const_name distinct}, _) $ _ => + let + fun dis_conv cv = Conv.arg_conv (Conv.arg1_conv cv) + val unfold_dis_conv = dis_conv Old_Z3_Proof_Tools.unfold_distinct_conv + fun prv cu = + let val (cu1, cu2) = Thm.dest_binop (Thm.dest_arg cu) + in prove disjI4 (Thm.dest_arg cu2, true) (cu1, true) end + in Old_Z3_Proof_Tools.with_conv unfold_dis_conv prv ct end + | @{const Not} $ (Const (@{const_name distinct}, _) $ _) => + let + fun dis_conv cv = Conv.arg_conv (Conv.arg1_conv (Conv.arg_conv cv)) + val unfold_dis_conv = dis_conv Old_Z3_Proof_Tools.unfold_distinct_conv + fun prv cu = + let val (cu1, cu2) = Thm.dest_binop (Thm.dest_arg cu) + in prove disjI1 (Thm.dest_arg cu1, true) (cu2, true) end + in Old_Z3_Proof_Tools.with_conv unfold_dis_conv prv ct end + | _ => raise CTERM ("prove_def_axiom", [ct])) + end +in +fun def_axiom ctxt = Thm o try_apply ctxt [] [ + named ctxt "conj/disj/distinct" prove_def_axiom, + Old_Z3_Proof_Tools.by_abstraction 0 (true, false) ctxt [] (fn ctxt' => + named ctxt' "simp+fast" (Old_Z3_Proof_Tools.by_tac ctxt (simp_fast_tac ctxt')))] +end + + +(* local definitions *) +local + val intro_rules = [ + @{lemma "n == P ==> (~n | P) & (n | ~P)" by simp}, + @{lemma "n == (if P then s else t) ==> (~P | n = s) & (P | n = t)" + by simp}, + @{lemma "n == P ==> n = P" by (rule meta_eq_to_obj_eq)} ] + + val apply_rules = [ + @{lemma "(~n | P) & (n | ~P) ==> P == n" by (atomize(full)) fast}, + @{lemma "(~P | n = s) & (P | n = t) ==> (if P then s else t) == n" + by (atomize(full)) fastforce} ] + + val inst_rule = Old_Z3_Proof_Tools.match_instantiate Thm.dest_arg + + fun apply_rule ct = + (case get_first (try (inst_rule ct)) intro_rules of + SOME thm => thm + | NONE => raise CTERM ("intro_def", [ct])) +in +fun intro_def ct = Old_Z3_Proof_Tools.make_hyp_def (apply_rule ct) #>> Thm + +fun apply_def thm = + get_first (try (fn rule => MetaEq (thm COMP rule))) apply_rules + |> the_default (Thm thm) +end + + +(* negation normal form *) +local + val quant_rules1 = ([ + @{lemma "(!!x. P x == Q) ==> ALL x. P x == Q" by simp}, + @{lemma "(!!x. P x == Q) ==> EX x. P x == Q" by simp}], [ + @{lemma "(!!x. P x == Q x) ==> ALL x. P x == ALL x. Q x" by simp}, + @{lemma "(!!x. P x == Q x) ==> EX x. P x == EX x. Q x" by simp}]) + + val quant_rules2 = ([ + @{lemma "(!!x. ~P x == Q) ==> ~(ALL x. P x) == Q" by simp}, + @{lemma "(!!x. ~P x == Q) ==> ~(EX x. P x) == Q" by simp}], [ + @{lemma "(!!x. ~P x == Q x) ==> ~(ALL x. P x) == EX x. Q x" by simp}, + @{lemma "(!!x. ~P x == Q x) ==> ~(EX x. P x) == ALL x. Q x" by simp}]) + + fun nnf_quant_tac ctxt thm (qs as (qs1, qs2)) i st = ( + resolve_tac ctxt [thm] ORELSE' + (match_tac ctxt qs1 THEN' nnf_quant_tac ctxt thm qs) ORELSE' + (match_tac ctxt qs2 THEN' nnf_quant_tac ctxt thm qs)) i st + + fun nnf_quant_tac_varified ctxt vars eq = + nnf_quant_tac ctxt (Old_Z3_Proof_Tools.varify vars eq) + + fun nnf_quant ctxt vars qs p ct = + Old_Z3_Proof_Tools.as_meta_eq ct + |> Old_Z3_Proof_Tools.by_tac ctxt (nnf_quant_tac_varified ctxt vars (meta_eq_of p) qs) + + fun prove_nnf ctxt = try_apply ctxt [] [ + named ctxt "conj/disj" Old_Z3_Proof_Literals.prove_conj_disj_eq, + Old_Z3_Proof_Tools.by_abstraction 0 (true, false) ctxt [] (fn ctxt' => + named ctxt' "simp+fast" (Old_Z3_Proof_Tools.by_tac ctxt' (simp_fast_tac ctxt')))] +in +fun nnf ctxt vars ps ct = + (case Old_SMT_Utils.term_of ct of + _ $ (l as Const _ $ Abs _) $ (r as Const _ $ Abs _) => + if l aconv r + then MetaEq (Thm.reflexive (Thm.dest_arg (Thm.dest_arg ct))) + else MetaEq (nnf_quant ctxt vars quant_rules1 (hd ps) ct) + | _ $ (@{const Not} $ (Const _ $ Abs _)) $ (Const _ $ Abs _) => + MetaEq (nnf_quant ctxt vars quant_rules2 (hd ps) ct) + | _ => + let + val nnf_rewr_conv = Conv.arg_conv (Conv.arg_conv + (Old_Z3_Proof_Tools.unfold_eqs ctxt + (map (Thm.symmetric o meta_eq_of) ps))) + in Thm (Old_Z3_Proof_Tools.with_conv nnf_rewr_conv (prove_nnf ctxt) ct) end) +end + + + +(** equality proof rules **) + +(* |- t = t *) +fun refl ct = MetaEq (Thm.reflexive (Thm.dest_arg (Thm.dest_arg ct))) + + +(* s = t ==> t = s *) +local + val symm_rule = @{lemma "s = t ==> t == s" by simp} +in +fun symm (MetaEq thm) = MetaEq (Thm.symmetric thm) + | symm p = MetaEq (thm_of p COMP symm_rule) +end + + +(* s = t ==> t = u ==> s = u *) +local + val trans1 = @{lemma "s == t ==> t = u ==> s == u" by simp} + val trans2 = @{lemma "s = t ==> t == u ==> s == u" by simp} + val trans3 = @{lemma "s = t ==> t = u ==> s == u" by simp} +in +fun trans (MetaEq thm1) (MetaEq thm2) = MetaEq (Thm.transitive thm1 thm2) + | trans (MetaEq thm) q = MetaEq (thm_of q COMP (thm COMP trans1)) + | trans p (MetaEq thm) = MetaEq (thm COMP (thm_of p COMP trans2)) + | trans p q = MetaEq (thm_of q COMP (thm_of p COMP trans3)) +end + + +(* t1 = s1 ==> ... ==> tn = sn ==> f t1 ... tn = f s1 .. sn + (reflexive antecendents are droppped) *) +local + exception MONO + + fun prove_refl (ct, _) = Thm.reflexive ct + fun prove_comb f g cp = + let val ((ct1, ct2), (cu1, cu2)) = apply2 Thm.dest_comb cp + in Thm.combination (f (ct1, cu1)) (g (ct2, cu2)) end + fun prove_arg f = prove_comb prove_refl f + + fun prove f cp = prove_comb (prove f) f cp handle CTERM _ => prove_refl cp + + fun prove_nary is_comb f = + let + fun prove (cp as (ct, _)) = f cp handle MONO => + if is_comb (Thm.term_of ct) + then prove_comb (prove_arg prove) prove cp + else prove_refl cp + in prove end + + fun prove_list f n cp = + if n = 0 then prove_refl cp + else prove_comb (prove_arg f) (prove_list f (n-1)) cp + + fun with_length f (cp as (cl, _)) = + f (length (HOLogic.dest_list (Thm.term_of cl))) cp + + fun prove_distinct f = prove_arg (with_length (prove_list f)) + + fun prove_eq exn lookup cp = + (case lookup (Logic.mk_equals (apply2 Thm.term_of cp)) of + SOME eq => eq + | NONE => if exn then raise MONO else prove_refl cp) + + val prove_exn = prove_eq true + and prove_safe = prove_eq false + + fun mono f (cp as (cl, _)) = + (case Term.head_of (Thm.term_of cl) of + @{const HOL.conj} => prove_nary Old_Z3_Proof_Literals.is_conj (prove_exn f) + | @{const HOL.disj} => prove_nary Old_Z3_Proof_Literals.is_disj (prove_exn f) + | Const (@{const_name distinct}, _) => prove_distinct (prove_safe f) + | _ => prove (prove_safe f)) cp +in +fun monotonicity eqs ct = + let + fun and_symmetric (t, thm) = [(t, thm), (t, Thm.symmetric thm)] + val teqs = maps (and_symmetric o `Thm.prop_of o meta_eq_of) eqs + val lookup = AList.lookup (op aconv) teqs + val cp = Thm.dest_binop (Thm.dest_arg ct) + in MetaEq (prove_exn lookup cp handle MONO => mono lookup cp) end +end + + +(* |- f a b = f b a (where f is equality) *) +local + val rule = @{lemma "a = b == b = a" by (atomize(full)) (rule eq_commute)} +in +fun commutativity ct = + MetaEq (Old_Z3_Proof_Tools.match_instantiate I + (Old_Z3_Proof_Tools.as_meta_eq ct) rule) +end + + + +(** quantifier proof rules **) + +(* P ?x = Q ?x ==> (ALL x. P x) = (ALL x. Q x) + P ?x = Q ?x ==> (EX x. P x) = (EX x. Q x) *) +local + val rules = [ + @{lemma "(!!x. P x == Q x) ==> (ALL x. P x) == (ALL x. Q x)" by simp}, + @{lemma "(!!x. P x == Q x) ==> (EX x. P x) == (EX x. Q x)" by simp}] +in +fun quant_intro ctxt vars p ct = + let + val thm = meta_eq_of p + val rules' = Old_Z3_Proof_Tools.varify vars thm :: rules + val cu = Old_Z3_Proof_Tools.as_meta_eq ct + val tac = REPEAT_ALL_NEW (match_tac ctxt rules') + in MetaEq (Old_Z3_Proof_Tools.by_tac ctxt tac cu) end +end + + +(* |- ((ALL x. P x) | Q) = (ALL x. P x | Q) *) +fun pull_quant ctxt = Thm o try_apply ctxt [] [ + named ctxt "fast" (Old_Z3_Proof_Tools.by_tac ctxt (HOL_fast_tac ctxt))] + (* FIXME: not very well tested *) + + +(* |- (ALL x. P x & Q x) = ((ALL x. P x) & (ALL x. Q x)) *) +fun push_quant ctxt = Thm o try_apply ctxt [] [ + named ctxt "fast" (Old_Z3_Proof_Tools.by_tac ctxt (HOL_fast_tac ctxt))] + (* FIXME: not very well tested *) + + +(* |- (ALL x1 ... xn y1 ... yn. P x1 ... xn) = (ALL x1 ... xn. P x1 ... xn) *) +local + val elim_all = @{lemma "P = Q ==> (ALL x. P) = Q" by fast} + val elim_ex = @{lemma "P = Q ==> (EX x. P) = Q" by fast} + + fun elim_unused_tac ctxt i st = ( + match_tac ctxt [@{thm refl}] + ORELSE' (match_tac ctxt [elim_all, elim_ex] THEN' elim_unused_tac ctxt) + ORELSE' ( + match_tac ctxt [@{thm iff_allI}, @{thm iff_exI}] + THEN' elim_unused_tac ctxt)) i st +in + +fun elim_unused_vars ctxt = Thm o Old_Z3_Proof_Tools.by_tac ctxt (elim_unused_tac ctxt) + +end + + +(* |- (ALL x1 ... xn. ~(x1 = t1 & ... xn = tn) | P x1 ... xn) = P t1 ... tn *) +fun dest_eq_res ctxt = Thm o try_apply ctxt [] [ + named ctxt "fast" (Old_Z3_Proof_Tools.by_tac ctxt (HOL_fast_tac ctxt))] + (* FIXME: not very well tested *) + + +(* |- ~(ALL x1...xn. P x1...xn) | P a1...an *) +local + val rule = @{lemma "~ P x | Q ==> ~(ALL x. P x) | Q" by fast} +in +fun quant_inst ctxt = Thm o Old_Z3_Proof_Tools.by_tac ctxt ( + REPEAT_ALL_NEW (match_tac ctxt [rule]) + THEN' resolve_tac ctxt @{thms excluded_middle}) +end + + +(* |- (EX x. P x) = P c |- ~(ALL x. P x) = ~ P c *) +local + val forall = + Old_SMT_Utils.mk_const_pat @{theory} @{const_name Pure.all} + (Old_SMT_Utils.destT1 o Old_SMT_Utils.destT1) + fun mk_forall cv ct = + Thm.apply (Old_SMT_Utils.instT' cv forall) (Thm.lambda cv ct) + + fun get_vars f mk pred ctxt t = + Term.fold_aterms f t [] + |> map_filter (fn v => + if pred v then SOME (Thm.cterm_of ctxt (mk v)) else NONE) + + fun close vars f ct ctxt = + let + val frees_of = get_vars Term.add_frees Free (member (op =) vars o fst) + val vs = frees_of ctxt (Thm.term_of ct) + val (thm, ctxt') = f (fold_rev mk_forall vs ct) ctxt + val vars_of = get_vars Term.add_vars Var (K true) ctxt' + in + (Thm.instantiate ([], map (dest_Var o Thm.term_of) (vars_of (Thm.prop_of thm)) ~~ vs) thm, + ctxt') + end + + val sk_rules = @{lemma + "c = (SOME x. P x) ==> (EX x. P x) = P c" + "c = (SOME x. ~P x) ==> (~(ALL x. P x)) = (~P c)" + by (metis someI_ex)+} +in + +fun skolemize vars = + apfst Thm oo close vars (yield_singleton Assumption.add_assumes) + +fun discharge_sk_tac ctxt i st = ( + resolve_tac ctxt @{thms trans} i + THEN resolve_tac ctxt sk_rules i + THEN (resolve_tac ctxt @{thms refl} ORELSE' discharge_sk_tac ctxt) (i+1) + THEN resolve_tac ctxt @{thms refl} i) st + +end + + + +(** theory proof rules **) + +(* theory lemmas: linear arithmetic, arrays *) +fun th_lemma ctxt simpset thms = Thm o try_apply ctxt thms [ + Old_Z3_Proof_Tools.by_abstraction 0 (false, true) ctxt thms (fn ctxt' => + Old_Z3_Proof_Tools.by_tac ctxt' ( + NAMED ctxt' "arith" (Arith_Data.arith_tac ctxt') + ORELSE' NAMED ctxt' "simp+arith" ( + Simplifier.asm_full_simp_tac (put_simpset simpset ctxt') + THEN_ALL_NEW Arith_Data.arith_tac ctxt')))] + + +(* rewriting: prove equalities: + * ACI of conjunction/disjunction + * contradiction, excluded middle + * logical rewriting rules (for negation, implication, equivalence, + distinct) + * normal forms for polynoms (integer/real arithmetic) + * quantifier elimination over linear arithmetic + * ... ? **) +local + fun spec_meta_eq_of thm = + (case try (fn th => th RS @{thm spec}) thm of + SOME thm' => spec_meta_eq_of thm' + | NONE => mk_meta_eq thm) + + fun prep (Thm thm) = spec_meta_eq_of thm + | prep (MetaEq thm) = thm + | prep (Literals (thm, _)) = spec_meta_eq_of thm + + fun unfold_conv ctxt ths = + Conv.arg_conv (Conv.binop_conv (Old_Z3_Proof_Tools.unfold_eqs ctxt + (map prep ths))) + + fun with_conv _ [] prv = prv + | with_conv ctxt ths prv = + Old_Z3_Proof_Tools.with_conv (unfold_conv ctxt ths) prv + + val unfold_conv = + Conv.arg_conv (Conv.binop_conv + (Conv.try_conv Old_Z3_Proof_Tools.unfold_distinct_conv)) + val prove_conj_disj_eq = + Old_Z3_Proof_Tools.with_conv unfold_conv Old_Z3_Proof_Literals.prove_conj_disj_eq + + fun declare_hyps ctxt thm = + (thm, snd (Assumption.add_assumes (Thm.chyps_of thm) ctxt)) +in + +val abstraction_depth = 3 + (* + This value was chosen large enough to potentially catch exceptions, + yet small enough to not cause too much harm. The value might be + increased in the future, if reconstructing 'rewrite' fails on problems + that get too much abstracted to be reconstructable. + *) + +fun rewrite simpset ths ct ctxt = + apfst Thm (declare_hyps ctxt (with_conv ctxt ths (try_apply ctxt [] [ + named ctxt "conj/disj/distinct" prove_conj_disj_eq, + named ctxt "pull-ite" Old_Z3_Proof_Methods.prove_ite ctxt, + Old_Z3_Proof_Tools.by_abstraction 0 (true, false) ctxt [] (fn ctxt' => + Old_Z3_Proof_Tools.by_tac ctxt' ( + NAMED ctxt' "simp (logic)" (Simplifier.simp_tac (put_simpset simpset ctxt')) + THEN_ALL_NEW NAMED ctxt' "fast (logic)" (fast_tac ctxt'))), + Old_Z3_Proof_Tools.by_abstraction 0 (false, true) ctxt [] (fn ctxt' => + Old_Z3_Proof_Tools.by_tac ctxt' ( + (resolve_tac ctxt' @{thms iff_allI} ORELSE' K all_tac) + THEN' NAMED ctxt' "simp (theory)" (Simplifier.simp_tac (put_simpset simpset ctxt')) + THEN_ALL_NEW ( + NAMED ctxt' "fast (theory)" (HOL_fast_tac ctxt') + ORELSE' NAMED ctxt' "arith (theory)" (Arith_Data.arith_tac ctxt')))), + Old_Z3_Proof_Tools.by_abstraction 0 (true, true) ctxt [] (fn ctxt' => + Old_Z3_Proof_Tools.by_tac ctxt' ( + (resolve_tac ctxt' @{thms iff_allI} ORELSE' K all_tac) + THEN' NAMED ctxt' "simp (full)" (Simplifier.simp_tac (put_simpset simpset ctxt')) + THEN_ALL_NEW ( + NAMED ctxt' "fast (full)" (HOL_fast_tac ctxt') + ORELSE' NAMED ctxt' "arith (full)" (Arith_Data.arith_tac ctxt')))), + named ctxt "injectivity" (Old_Z3_Proof_Methods.prove_injectivity ctxt), + Old_Z3_Proof_Tools.by_abstraction abstraction_depth (true, true) ctxt [] + (fn ctxt' => + Old_Z3_Proof_Tools.by_tac ctxt' ( + (resolve_tac ctxt' @{thms iff_allI} ORELSE' K all_tac) + THEN' NAMED ctxt' "simp (deepen)" (Simplifier.simp_tac (put_simpset simpset ctxt')) + THEN_ALL_NEW ( + NAMED ctxt' "fast (deepen)" (HOL_fast_tac ctxt') + ORELSE' NAMED ctxt' "arith (deepen)" (Arith_Data.arith_tac + ctxt'))))]) ct)) + +end + + + +(* proof reconstruction *) + +(** tracing and checking **) + +fun trace_before ctxt idx = Old_SMT_Config.trace_msg ctxt (fn r => + "Z3: #" ^ string_of_int idx ^ ": " ^ Old_Z3_Proof_Parser.string_of_rule r) + +fun check_after idx r ps ct (p, (ctxt, _)) = + if not (Config.get ctxt Old_SMT_Config.trace) then () + else + let val thm = thm_of p |> tap (Thm.join_proofs o single) + in + if (Thm.cprop_of thm) aconvc ct then () + else + z3_exn (Pretty.string_of (Pretty.big_list + ("proof step failed: " ^ quote (Old_Z3_Proof_Parser.string_of_rule r) ^ + " (#" ^ string_of_int idx ^ ")") + (pretty_goal ctxt (map (thm_of o fst) ps) (Thm.prop_of thm) @ + [Pretty.block [Pretty.str "expected: ", + Syntax.pretty_term ctxt (Thm.term_of ct)]]))) + end + + +(** overall reconstruction procedure **) + +local + fun not_supported r = raise Fail ("Z3: proof rule not implemented: " ^ + quote (Old_Z3_Proof_Parser.string_of_rule r)) + + fun prove_step simpset vars r ps ct (cxp as (cx, ptab)) = + (case (r, ps) of + (* core rules *) + (Old_Z3_Proof_Parser.True_Axiom, _) => (Thm Old_Z3_Proof_Literals.true_thm, cxp) + | (Old_Z3_Proof_Parser.Asserted, _) => raise Fail "bad assertion" + | (Old_Z3_Proof_Parser.Goal, _) => raise Fail "bad assertion" + | (Old_Z3_Proof_Parser.Modus_Ponens, [(p, _), (q, _)]) => + (mp q (thm_of p), cxp) + | (Old_Z3_Proof_Parser.Modus_Ponens_Oeq, [(p, _), (q, _)]) => + (mp q (thm_of p), cxp) + | (Old_Z3_Proof_Parser.And_Elim, [(p, i)]) => + and_elim (p, i) ct ptab ||> pair cx + | (Old_Z3_Proof_Parser.Not_Or_Elim, [(p, i)]) => + not_or_elim (p, i) ct ptab ||> pair cx + | (Old_Z3_Proof_Parser.Hypothesis, _) => (Thm (Thm.assume ct), cxp) + | (Old_Z3_Proof_Parser.Lemma, [(p, _)]) => (lemma (thm_of p) ct, cxp) + | (Old_Z3_Proof_Parser.Unit_Resolution, (p, _) :: ps) => + (unit_resolution (thm_of p) (map (thm_of o fst) ps) ct, cxp) + | (Old_Z3_Proof_Parser.Iff_True, [(p, _)]) => (iff_true (thm_of p), cxp) + | (Old_Z3_Proof_Parser.Iff_False, [(p, _)]) => (iff_false (thm_of p), cxp) + | (Old_Z3_Proof_Parser.Distributivity, _) => (distributivity cx ct, cxp) + | (Old_Z3_Proof_Parser.Def_Axiom, _) => (def_axiom cx ct, cxp) + | (Old_Z3_Proof_Parser.Intro_Def, _) => intro_def ct cx ||> rpair ptab + | (Old_Z3_Proof_Parser.Apply_Def, [(p, _)]) => (apply_def (thm_of p), cxp) + | (Old_Z3_Proof_Parser.Iff_Oeq, [(p, _)]) => (p, cxp) + | (Old_Z3_Proof_Parser.Nnf_Pos, _) => (nnf cx vars (map fst ps) ct, cxp) + | (Old_Z3_Proof_Parser.Nnf_Neg, _) => (nnf cx vars (map fst ps) ct, cxp) + + (* equality rules *) + | (Old_Z3_Proof_Parser.Reflexivity, _) => (refl ct, cxp) + | (Old_Z3_Proof_Parser.Symmetry, [(p, _)]) => (symm p, cxp) + | (Old_Z3_Proof_Parser.Transitivity, [(p, _), (q, _)]) => (trans p q, cxp) + | (Old_Z3_Proof_Parser.Monotonicity, _) => (monotonicity (map fst ps) ct, cxp) + | (Old_Z3_Proof_Parser.Commutativity, _) => (commutativity ct, cxp) + + (* quantifier rules *) + | (Old_Z3_Proof_Parser.Quant_Intro, [(p, _)]) => (quant_intro cx vars p ct, cxp) + | (Old_Z3_Proof_Parser.Pull_Quant, _) => (pull_quant cx ct, cxp) + | (Old_Z3_Proof_Parser.Push_Quant, _) => (push_quant cx ct, cxp) + | (Old_Z3_Proof_Parser.Elim_Unused_Vars, _) => (elim_unused_vars cx ct, cxp) + | (Old_Z3_Proof_Parser.Dest_Eq_Res, _) => (dest_eq_res cx ct, cxp) + | (Old_Z3_Proof_Parser.Quant_Inst, _) => (quant_inst cx ct, cxp) + | (Old_Z3_Proof_Parser.Skolemize, _) => skolemize vars ct cx ||> rpair ptab + + (* theory rules *) + | (Old_Z3_Proof_Parser.Th_Lemma _, _) => (* FIXME: use arguments *) + (th_lemma cx simpset (map (thm_of o fst) ps) ct, cxp) + | (Old_Z3_Proof_Parser.Rewrite, _) => rewrite simpset [] ct cx ||> rpair ptab + | (Old_Z3_Proof_Parser.Rewrite_Star, ps) => + rewrite simpset (map fst ps) ct cx ||> rpair ptab + + | (Old_Z3_Proof_Parser.Nnf_Star, _) => not_supported r + | (Old_Z3_Proof_Parser.Cnf_Star, _) => not_supported r + | (Old_Z3_Proof_Parser.Transitivity_Star, _) => not_supported r + | (Old_Z3_Proof_Parser.Pull_Quant_Star, _) => not_supported r + + | _ => raise Fail ("Z3: proof rule " ^ + quote (Old_Z3_Proof_Parser.string_of_rule r) ^ + " has an unexpected number of arguments.")) + + fun lookup_proof ptab idx = + (case Inttab.lookup ptab idx of + SOME p => (p, idx) + | NONE => z3_exn ("unknown proof id: " ^ quote (string_of_int idx))) + + fun prove simpset vars (idx, step) (_, cxp as (ctxt, ptab)) = + let + val Old_Z3_Proof_Parser.Proof_Step {rule=r, prems, prop, ...} = step + val ps = map (lookup_proof ptab) prems + val _ = trace_before ctxt idx r + val (thm, (ctxt', ptab')) = + cxp + |> prove_step simpset vars r ps prop + |> tap (check_after idx r ps prop) + in (thm, (ctxt', Inttab.update (idx, thm) ptab')) end + + fun make_discharge_rules rules = rules @ [@{thm allI}, @{thm refl}, + @{thm reflexive}, Old_Z3_Proof_Literals.true_thm] + + fun discharge_assms_tac ctxt rules = + REPEAT (HEADGOAL (resolve_tac ctxt rules ORELSE' SOLVED' (discharge_sk_tac ctxt))) + + fun discharge_assms ctxt rules thm = + if Thm.nprems_of thm = 0 then Goal.norm_result ctxt thm + else + (case Seq.pull (discharge_assms_tac ctxt rules thm) of + SOME (thm', _) => Goal.norm_result ctxt thm' + | NONE => raise THM ("failed to discharge premise", 1, [thm])) + + fun discharge rules outer_ctxt (p, (inner_ctxt, _)) = + thm_of p + |> singleton (Proof_Context.export inner_ctxt outer_ctxt) + |> discharge_assms outer_ctxt (make_discharge_rules rules) +in + +fun reconstruct outer_ctxt recon output = + let + val {context=ctxt, typs, terms, rewrite_rules, assms} = recon + val (asserted, steps, vars, ctxt1) = + Old_Z3_Proof_Parser.parse ctxt typs terms output + + val simpset = + Old_Z3_Proof_Tools.make_simpset ctxt1 (Named_Theorems.get ctxt1 @{named_theorems old_z3_simp}) + + val ((is, rules), cxp as (ctxt2, _)) = + add_asserted outer_ctxt rewrite_rules assms asserted ctxt1 + in + if Config.get ctxt2 Old_SMT_Config.filter_only_facts then (is, @{thm TrueI}) + else + (Thm @{thm TrueI}, cxp) + |> fold (prove simpset vars) steps + |> discharge rules outer_ctxt + |> pair [] + end + +end + +end diff --git a/src/test-gen/src/main/version.thy b/src/test-gen/src/main/version.thy new file mode 100644 index 0000000..60b7b48 --- /dev/null +++ b/src/test-gen/src/main/version.thy @@ -0,0 +1,91 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * version.thy --- Version information for HOL-TestGen + * This file is part of HOL-TestGen. + * + * Copyright (c) 2013 Achim D. Brucker, Germany + * 2016 The University of Sheffield, UK + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: log_thy.thy 9570 2013-02-02 14:47:51Z brucker $ *) + +theory + version +imports + Main +begin + +ML_file "config.sml" + +ML {* +val testgen_version = Int.toString(testgen_ver_major)^"."^Int.toString(testgen_ver_minor)^"." + ^Int.toString(testgen_ver_micro)^testgen_ver_tag +val version = "HOL-TestGen "^testgen_version^" , based on "^(Distribution.version); +val _ = if (Distribution.version = (isabelle_version)) + then () + else let + val line0 = "Unsupported Isabelle version: \""^(Distribution.version)^"\"" + val line1 = "Only \""^isabelle_version^"\" is supported." + in + warning line0; + warning line1; + Output.physical_stderr("###\n"); + Output.physical_stderr("### "^line0^"\n"); + Output.physical_stderr("### "^line1^"\n"); + Output.physical_stderr("###\n") + end +*} + + +ML {* + let + val today = (Date.toString(Date.fromTimeUniv (Time.now())))^" (UTC)"; + val tex_header = "% This file is generated automatically. Do not edit.\n" + ^"% Generated by HOL-TestGen"^testgen_version^"\n" + ^"% on "^today^".\n" + ^"% \n"; + val tex_version = "\\newcommand{\\testgen}{HOL-TestGen}\n" + ^"\\newcommand{\\testgenFW}{HOL-TestGen/FW}\n" + ^"\\newcommand{\\isabelleversion}{"^(Distribution.version)^"}\n" + ^"\\newcommand{\\testgenversion}{"^testgen_version^"}\n" + ^"\\newcommand{\\testgenversiontag}{"^testgen_ver_tag^"}\n" + ^"\\newcommand{\\testgenurl}{\\url{"^testgen_url^"}}\n" + in + File.write (Path.explode "document/version.tex") (tex_header^tex_version) + handle (IO.Io{name=name,...}) => warning ("Could not create \""^name + ^"\". Document preparation might fail.") + end +*} + +end From 05a0074685853809152ca8ae70890d2045245772 Mon Sep 17 00:00:00 2001 From: Serguei Mokhov Date: Fri, 25 Feb 2022 11:47:36 -0500 Subject: [PATCH 5/8] [i2021-1] move adjusted ROOT files to their proper place --- examples/ROOT | 2 +- src/ROOT | 9 ++++- src/test-gen/examples/ROOT | 83 -------------------------------------- src/test-gen/src/ROOT | 22 ---------- 4 files changed, 8 insertions(+), 108 deletions(-) delete mode 100644 src/test-gen/examples/ROOT delete mode 100644 src/test-gen/src/ROOT diff --git a/examples/ROOT b/examples/ROOT index 1fe7b08..aa6c5c6 100644 --- a/examples/ROOT +++ b/examples/ROOT @@ -60,7 +60,7 @@ session "HOL-TestGen-MyKeOS" in "concurrency/MyKeOS" = "HOL-TestGen" + "main.tex" "titlepage.tex" -session "HOL-TestGen-MyKeOS2" in "concurrency/MyKeOS" = "HOL-TestGen" + +session "HOL-TestGen-MyKeOS2" in "concurrency/MyKeOS2" = "HOL-TestGen" + options [quick_and_dirty(*, document = pdf,document_variants="document:outline=/proof,/ML",document_output=output*)] theories MyKeOS diff --git a/src/ROOT b/src/ROOT index 570641d..fe0b41f 100644 --- a/src/ROOT +++ b/src/ROOT @@ -1,11 +1,16 @@ -session "HOL-TestGenLib" = HOL + +session "HOL-TestGenLib" in "test" = "HOL-Library" + description {* HOL-TestGen Basic Libraries without Code-Generators and Testgenerator *} + (*directories "/cygdrive/c/Program Files (x86)/Isabelle HOL/Isabelle2021/hol-testgen-1.9.1/hol-testgen-1.9.1/src"*) + theories "TestLib" -session "HOL-TestGen" (main) = "HOL-TestGenLib" + +session "HOL-TestGen" (main) in "main" = "HOL-TestGenLib" + description {* HOL-TestGen *} + (*directories "/cygdrive/c/Program Files (x86)/Isabelle HOL/Isabelle2021/hol-testgen-1.9.1/hol-testgen-1.9.1/src"*) + (*directories "../../../../Isabelle-linux/Isabelle2021-1/src/HOL"*) + directories "codegen_fsharp" "codegen_gdb" theories "codegen_fsharp/Code_String_Fsharp" "codegen_fsharp/Code_Char_chr_Fsharp" diff --git a/src/test-gen/examples/ROOT b/src/test-gen/examples/ROOT deleted file mode 100644 index aa6c5c6..0000000 --- a/src/test-gen/examples/ROOT +++ /dev/null @@ -1,83 +0,0 @@ -session "HOL-TestGen-Max" in "unit/Max" = "HOL-TestGen" + - theories - Max_test - -session "HOL-TestGen-Triangle" in "unit/Triangle" = "HOL-TestGen" + - theories - Triangle - Triangle_test - -session "HOL-TestGen-List" in "unit/List" = "HOL-TestGen" + - options [document = pdf,document_variants="document:outline=/proof,/ML",document_output=output] - theories - "List_test" - document_files - "root.tex" - "root.bib" - "main.tex" - "titlepage.tex" - -session "HOL-TestGen-ListVerified" in "unit/ListVerified" = "HOL-TestGen" + - theories - List_Verified_test - -session "HOL-TestGen-RBT" in "unit/RBT" = "HOL-TestGen" + - theories - RBT_def - RBT_test - -session "HOL-TestGen-AVL" in "unit/AVL" = "HOL-TestGen" + - theories - AVL_def - AVL_test - -session "HOL-TestGen-SharedMemory" in "sequence/SharedMemory" = "HOL-TestGen" + - options [quick_and_dirty] - theories - SharedMemory_test - -session "HOL-TestGen-Bank" in "sequence/Bank" = "HOL-TestGen" + - options [quick_and_dirty, document = pdf,document_variants="document:outline=/proof,/ML",document_output=output] - theories - Bank - NonDetBank - document_files - "root.tex" - "root.bib" - "main.tex" - "titlepage.tex" - -session "HOL-TestGen-MyKeOS" in "concurrency/MyKeOS" = "HOL-TestGen" + - options [quick_and_dirty, document = pdf,document_variants="document:outline=/proof,/ML",document_output=output] - theories - MyKeOS - MyKeOS_test - MyKeOS_test_conc - - document_files - "root.tex" - "root.bib" - "main.tex" - "titlepage.tex" - -session "HOL-TestGen-MyKeOS2" in "concurrency/MyKeOS2" = "HOL-TestGen" + - options [quick_and_dirty(*, document = pdf,document_variants="document:outline=/proof,/ML",document_output=output*)] - theories - MyKeOS - MyKeOS_test_conc - document_files - "root.tex" - "root.bib" - "main.tex" - "titlepage.tex" - - -session "HOL-TestGen-Sequence" in "sequence/RBT" = "HOL-TestGen" + - theories - "../../unit/RBT/RBT_def" - RBT_seq_test - RBT_pfenning_seq_test - -session "HOL-TestGen-MiniFTP" in "reactive_sequence" = "HOL-TestGen" + - theories - "MiniFTP_test" diff --git a/src/test-gen/src/ROOT b/src/test-gen/src/ROOT deleted file mode 100644 index fe0b41f..0000000 --- a/src/test-gen/src/ROOT +++ /dev/null @@ -1,22 +0,0 @@ - -session "HOL-TestGenLib" in "test" = "HOL-Library" + - description {* HOL-TestGen Basic Libraries without Code-Generators and Testgenerator *} - (*directories "/cygdrive/c/Program Files (x86)/Isabelle HOL/Isabelle2021/hol-testgen-1.9.1/hol-testgen-1.9.1/src"*) - - theories - "TestLib" - -session "HOL-TestGen" (main) in "main" = "HOL-TestGenLib" + - description {* HOL-TestGen *} - (*directories "/cygdrive/c/Program Files (x86)/Isabelle HOL/Isabelle2021/hol-testgen-1.9.1/hol-testgen-1.9.1/src"*) - (*directories "../../../../Isabelle-linux/Isabelle2021-1/src/HOL"*) - directories "codegen_fsharp" "codegen_gdb" - theories - "codegen_fsharp/Code_String_Fsharp" - "codegen_fsharp/Code_Char_chr_Fsharp" - "codegen_fsharp/Code_Integer_Fsharp" - "codegen_fsharp/Code_Char_Fsharp" - "codegen_gdb/Code_gdb_script" - "Testing" - "IOCO" - "SharedMemory" From fea4137cefaf2f6c4c1d41d008cad68ee1a20361 Mon Sep 17 00:00:00 2001 From: Serguei Mokhov Date: Fri, 25 Feb 2022 11:50:54 -0500 Subject: [PATCH 6/8] [i2021-1] move re-arranged thy files into proper session dirs --- src/QuickCheckBackend.thy | 113 -- src/RandomBackend.thy | 243 --- src/{ => main}/Automata.thy | 0 src/{ => main}/BackendUtils.thy | 0 src/{ => main}/EFSM_Toolkit.thy | 0 src/{ => main}/IOCO.thy | 0 src/{ => main}/Interleaving.thy | 0 src/{ => main}/Monads.thy | 0 src/{ => main}/Observers.thy | 0 .../src => }/main/Old_SMT/old_smt_builtin.ML | 0 .../src => }/main/Old_SMT/old_smt_config.ML | 0 .../main/Old_SMT/old_smt_datatypes.ML | 0 .../src => }/main/Old_SMT/old_smt_failure.ML | 0 .../main/Old_SMT/old_smt_normalize.ML | 0 .../src => }/main/Old_SMT/old_smt_real.ML | 0 .../main/Old_SMT/old_smt_setup_solvers.ML | 0 .../src => }/main/Old_SMT/old_smt_solver.ML | 0 .../main/Old_SMT/old_smt_translate.ML | 0 .../src => }/main/Old_SMT/old_smt_utils.ML | 0 .../src => }/main/Old_SMT/old_smt_word.ML | 0 .../main/Old_SMT/old_smtlib_interface.ML | 0 .../src => }/main/Old_SMT/old_z3_interface.ML | 0 .../src => }/main/Old_SMT/old_z3_model.ML | 0 .../main/Old_SMT/old_z3_proof_literals.ML | 0 .../main/Old_SMT/old_z3_proof_methods.ML | 0 .../main/Old_SMT/old_z3_proof_parser.ML | 0 .../Old_SMT/old_z3_proof_reconstruction.ML | 0 .../main/Old_SMT/old_z3_proof_tools.ML | 0 .../src => }/main/QuickCheckBackend.thy | 0 src/{test-gen/src => }/main/RandomBackend.thy | 0 .../src => }/main/SMT/z3_replay_util.ML | 0 src/{ => main}/SMTBackend.thy | 0 src/{ => main}/SharedMemory.thy | 0 src/{ => main}/Term_Tactics.thy | 0 src/{ => main}/TestEnv.ML | 0 src/{ => main}/TestEnv.thy | 0 src/{test-gen/src => }/main/TestGen.thy | 0 src/{ => main}/TestLib.thy | 0 src/{ => main}/TestRefinements.thy | 0 src/{ => main}/TestScript.thy | 0 src/{ => main}/TestSequence.thy | 0 src/{ => main}/Testing.thy | 0 src/{ => main}/clocks.ML | 0 src/{ => main}/clocks.thy | 0 .../codegen_C_pthread/Code_C_pthread.thy | 0 .../codegen_fsharp/Code_Char_Fsharp.thy | 0 .../codegen_fsharp/Code_Char_chr_Fsharp.thy | 0 .../codegen_fsharp/Code_Integer_Fsharp.thy | 0 .../codegen_fsharp/Code_String_Fsharp.thy | 0 src/{ => main}/codegen_fsharp/code_fsharp.ML | 0 src/{ => main}/codegen_fsharp/code_fsharp.thy | 0 src/{ => main}/codegen_fsharp/examples/AQ.thy | 0 .../codegen_fsharp/examples/SemiG.thy | 0 .../codegen_fsharp/upstream/code_ml.ML | 0 .../codegen_gdb/Code_gdb_script.thy | 0 src/{ => main}/config.sml | 0 src/{ => main}/debug/profiling_begin.thy | 0 src/{ => main}/debug/profiling_end.thy | 0 src/{ => main}/isar_setup.ML | 0 src/{ => main}/log.thy | 0 .../src => }/main/new_smt_patch/SMT_patch.thy | 0 .../new_smt_patch/smt_config_patch.ML | 0 .../new_smt_patch/smt_normalize_patch.ML | 0 .../new_smt_patch/smt_solver_patch.ML | 0 .../new_smt_patch/smt_systems_patch.ML | 0 .../new_smt_patch/smtlib_interface_patch.ML | 0 src/{ => main}/new_smt_patch/smtlib_patch.ML | 0 src/{ => main}/new_smt_patch/z3_model.ML | 0 .../new_smt_patch/z3_replay_patch.ML | 0 .../src => }/main/smt_patch/Old_SMT_patch.thy | 0 .../smt_patch/old_smt_config_patch.ML | 0 .../smt_patch/old_smt_failure_patch.ML | 0 .../smt_patch/old_smt_normalize_patch.ML | 0 .../smt_patch/old_smt_setup_solvers_patch.ML | 0 .../smt_patch/old_smt_solver_patch.ML | 0 .../smt_patch/old_smt_translate_patch.ML | 0 .../smt_patch/old_z3_interface_patch.ML | 0 .../smt_patch/old_z3_model_patch.ML | 0 .../smt_patch/old_z3_proof_parser_patch.ML | 0 .../old_z3_proof_reconstruction_patch.ML | 0 src/{test-gen/src => }/main/version.thy | 0 src/new_smt_patch/SMT_patch.thy | 435 ----- src/smt_patch/Old_SMT_patch.thy | 431 ----- src/test-gen/src/main/BackendUtils.thy | 93 - src/test-gen/src/main/IOCO.thy | 126 -- src/test-gen/src/main/SMTBackend.thy | 450 ----- src/test-gen/src/main/SharedMemory.thy | 1389 -------------- src/test-gen/src/main/Term_Tactics.thy | 334 ---- src/test-gen/src/main/clocks.ML | 352 ---- src/test-gen/src/main/clocks.thy | 370 ---- .../main/codegen_C_pthread/Code_C_pthread.thy | 86 - .../main/codegen_fsharp/Code_Char_Fsharp.thy | 103 - .../codegen_fsharp/Code_Char_chr_Fsharp.thy | 83 - .../codegen_fsharp/Code_Integer_Fsharp.thy | 101 - .../codegen_fsharp/Code_String_Fsharp.thy | 68 - .../src/main/codegen_fsharp/code_fsharp.ML | 618 ------ .../src/main/codegen_fsharp/code_fsharp.thy | 130 -- .../src/main/codegen_fsharp/examples/AQ.thy | 71 - .../main/codegen_fsharp/examples/SemiG.thy | 92 - .../main/codegen_fsharp/upstream/code_ml.ML | 898 --------- .../src/main/codegen_gdb/Code_gdb_script.thy | 322 ---- src/test-gen/src/main/config.sml | 93 - .../src/main/debug/profiling_begin.thy | 49 - src/test-gen/src/main/debug/profiling_end.thy | 51 - src/test-gen/src/main/isar_setup.ML | 178 -- src/test-gen/src/main/log.thy | 132 -- .../main/new_smt_patch/smt_config_patch.ML | 265 --- .../main/new_smt_patch/smt_normalize_patch.ML | 556 ------ .../main/new_smt_patch/smt_solver_patch.ML | 346 ---- .../main/new_smt_patch/smt_systems_patch.ML | 159 -- .../new_smt_patch/smtlib_interface_patch.ML | 172 -- .../src/main/new_smt_patch/smtlib_patch.ML | 202 -- .../src/main/new_smt_patch/z3_model.ML | 113 -- .../src/main/new_smt_patch/z3_replay_patch.ML | 262 --- .../main/smt_patch/old_smt_config_patch.ML | 254 --- .../main/smt_patch/old_smt_failure_patch.ML | 61 - .../main/smt_patch/old_smt_normalize_patch.ML | 652 ------- .../smt_patch/old_smt_setup_solvers_patch.ML | 189 -- .../main/smt_patch/old_smt_solver_patch.ML | 378 ---- .../main/smt_patch/old_smt_translate_patch.ML | 589 ------ .../main/smt_patch/old_z3_interface_patch.ML | 239 --- .../src/main/smt_patch/old_z3_model_patch.ML | 337 ---- .../smt_patch/old_z3_proof_parser_patch.ML | 446 ----- .../old_z3_proof_reconstruction_patch.ML | 891 --------- src/test-gen/src/test/Automata.thy | 391 ---- src/test-gen/src/test/EFSM_Toolkit.thy | 167 -- src/test-gen/src/test/Interleaving.thy | 244 --- src/test-gen/src/test/Monads.thy | 1256 ------------ src/test-gen/src/test/Observers.thy | 200 -- src/test-gen/src/test/TestEnv.ML | 458 ----- src/test-gen/src/test/TestEnv.thy | 726 ------- src/test-gen/src/test/TestGen.thy | 1704 ----------------- src/test-gen/src/test/TestLib.thy | 57 - src/test-gen/src/test/TestRefinements.thy | 248 --- src/test-gen/src/test/TestScript.thy | 164 -- src/test-gen/src/test/TestSequence.thy | 1001 ---------- src/test-gen/src/test/Testing.thy | 57 - src/{test-gen/src/main => test}/Automata.thy | 0 .../src/main => test}/EFSM_Toolkit.thy | 0 .../src/main => test}/Interleaving.thy | 0 src/{test-gen/src/main => test}/Monads.thy | 0 src/{test-gen/src/main => test}/Observers.thy | 0 src/{test-gen/src/main => test}/TestEnv.ML | 0 src/{test-gen/src/main => test}/TestEnv.thy | 0 src/{ => test}/TestGen.thy | 0 src/{test-gen/src/main => test}/TestLib.thy | 0 .../src/main => test}/TestRefinements.thy | 0 .../src/main => test}/TestScript.thy | 0 .../src/main => test}/TestSequence.thy | 0 src/{test-gen/src/main => test}/Testing.thy | 0 src/version.thy | 91 - 151 files changed, 20286 deletions(-) delete mode 100644 src/QuickCheckBackend.thy delete mode 100644 src/RandomBackend.thy rename src/{ => main}/Automata.thy (100%) rename src/{ => main}/BackendUtils.thy (100%) rename src/{ => main}/EFSM_Toolkit.thy (100%) rename src/{ => main}/IOCO.thy (100%) rename src/{ => main}/Interleaving.thy (100%) rename src/{ => main}/Monads.thy (100%) rename src/{ => main}/Observers.thy (100%) rename src/{test-gen/src => }/main/Old_SMT/old_smt_builtin.ML (100%) rename src/{test-gen/src => }/main/Old_SMT/old_smt_config.ML (100%) rename src/{test-gen/src => }/main/Old_SMT/old_smt_datatypes.ML (100%) rename src/{test-gen/src => }/main/Old_SMT/old_smt_failure.ML (100%) rename src/{test-gen/src => }/main/Old_SMT/old_smt_normalize.ML (100%) rename src/{test-gen/src => }/main/Old_SMT/old_smt_real.ML (100%) rename src/{test-gen/src => }/main/Old_SMT/old_smt_setup_solvers.ML (100%) rename src/{test-gen/src => }/main/Old_SMT/old_smt_solver.ML (100%) rename src/{test-gen/src => }/main/Old_SMT/old_smt_translate.ML (100%) rename src/{test-gen/src => }/main/Old_SMT/old_smt_utils.ML (100%) rename src/{test-gen/src => }/main/Old_SMT/old_smt_word.ML (100%) rename src/{test-gen/src => }/main/Old_SMT/old_smtlib_interface.ML (100%) rename src/{test-gen/src => }/main/Old_SMT/old_z3_interface.ML (100%) rename src/{test-gen/src => }/main/Old_SMT/old_z3_model.ML (100%) rename src/{test-gen/src => }/main/Old_SMT/old_z3_proof_literals.ML (100%) rename src/{test-gen/src => }/main/Old_SMT/old_z3_proof_methods.ML (100%) rename src/{test-gen/src => }/main/Old_SMT/old_z3_proof_parser.ML (100%) rename src/{test-gen/src => }/main/Old_SMT/old_z3_proof_reconstruction.ML (100%) rename src/{test-gen/src => }/main/Old_SMT/old_z3_proof_tools.ML (100%) rename src/{test-gen/src => }/main/QuickCheckBackend.thy (100%) rename src/{test-gen/src => }/main/RandomBackend.thy (100%) rename src/{test-gen/src => }/main/SMT/z3_replay_util.ML (100%) rename src/{ => main}/SMTBackend.thy (100%) rename src/{ => main}/SharedMemory.thy (100%) rename src/{ => main}/Term_Tactics.thy (100%) rename src/{ => main}/TestEnv.ML (100%) rename src/{ => main}/TestEnv.thy (100%) rename src/{test-gen/src => }/main/TestGen.thy (100%) rename src/{ => main}/TestLib.thy (100%) rename src/{ => main}/TestRefinements.thy (100%) rename src/{ => main}/TestScript.thy (100%) rename src/{ => main}/TestSequence.thy (100%) rename src/{ => main}/Testing.thy (100%) rename src/{ => main}/clocks.ML (100%) rename src/{ => main}/clocks.thy (100%) rename src/{ => main}/codegen_C_pthread/Code_C_pthread.thy (100%) rename src/{ => main}/codegen_fsharp/Code_Char_Fsharp.thy (100%) rename src/{ => main}/codegen_fsharp/Code_Char_chr_Fsharp.thy (100%) rename src/{ => main}/codegen_fsharp/Code_Integer_Fsharp.thy (100%) rename src/{ => main}/codegen_fsharp/Code_String_Fsharp.thy (100%) rename src/{ => main}/codegen_fsharp/code_fsharp.ML (100%) rename src/{ => main}/codegen_fsharp/code_fsharp.thy (100%) rename src/{ => main}/codegen_fsharp/examples/AQ.thy (100%) mode change 100644 => 100755 rename src/{ => main}/codegen_fsharp/examples/SemiG.thy (100%) mode change 100644 => 100755 rename src/{ => main}/codegen_fsharp/upstream/code_ml.ML (100%) rename src/{ => main}/codegen_gdb/Code_gdb_script.thy (100%) rename src/{ => main}/config.sml (100%) rename src/{ => main}/debug/profiling_begin.thy (100%) rename src/{ => main}/debug/profiling_end.thy (100%) rename src/{ => main}/isar_setup.ML (100%) rename src/{ => main}/log.thy (100%) rename src/{test-gen/src => }/main/new_smt_patch/SMT_patch.thy (100%) rename src/{ => main}/new_smt_patch/smt_config_patch.ML (100%) rename src/{ => main}/new_smt_patch/smt_normalize_patch.ML (100%) rename src/{ => main}/new_smt_patch/smt_solver_patch.ML (100%) rename src/{ => main}/new_smt_patch/smt_systems_patch.ML (100%) rename src/{ => main}/new_smt_patch/smtlib_interface_patch.ML (100%) rename src/{ => main}/new_smt_patch/smtlib_patch.ML (100%) rename src/{ => main}/new_smt_patch/z3_model.ML (100%) rename src/{ => main}/new_smt_patch/z3_replay_patch.ML (100%) rename src/{test-gen/src => }/main/smt_patch/Old_SMT_patch.thy (100%) rename src/{ => main}/smt_patch/old_smt_config_patch.ML (100%) rename src/{ => main}/smt_patch/old_smt_failure_patch.ML (100%) rename src/{ => main}/smt_patch/old_smt_normalize_patch.ML (100%) rename src/{ => main}/smt_patch/old_smt_setup_solvers_patch.ML (100%) rename src/{ => main}/smt_patch/old_smt_solver_patch.ML (100%) rename src/{ => main}/smt_patch/old_smt_translate_patch.ML (100%) rename src/{ => main}/smt_patch/old_z3_interface_patch.ML (100%) rename src/{ => main}/smt_patch/old_z3_model_patch.ML (100%) rename src/{ => main}/smt_patch/old_z3_proof_parser_patch.ML (100%) rename src/{ => main}/smt_patch/old_z3_proof_reconstruction_patch.ML (100%) rename src/{test-gen/src => }/main/version.thy (100%) delete mode 100644 src/new_smt_patch/SMT_patch.thy delete mode 100644 src/smt_patch/Old_SMT_patch.thy delete mode 100644 src/test-gen/src/main/BackendUtils.thy delete mode 100644 src/test-gen/src/main/IOCO.thy delete mode 100644 src/test-gen/src/main/SMTBackend.thy delete mode 100644 src/test-gen/src/main/SharedMemory.thy delete mode 100644 src/test-gen/src/main/Term_Tactics.thy delete mode 100644 src/test-gen/src/main/clocks.ML delete mode 100644 src/test-gen/src/main/clocks.thy delete mode 100644 src/test-gen/src/main/codegen_C_pthread/Code_C_pthread.thy delete mode 100644 src/test-gen/src/main/codegen_fsharp/Code_Char_Fsharp.thy delete mode 100644 src/test-gen/src/main/codegen_fsharp/Code_Char_chr_Fsharp.thy delete mode 100644 src/test-gen/src/main/codegen_fsharp/Code_Integer_Fsharp.thy delete mode 100644 src/test-gen/src/main/codegen_fsharp/Code_String_Fsharp.thy delete mode 100644 src/test-gen/src/main/codegen_fsharp/code_fsharp.ML delete mode 100644 src/test-gen/src/main/codegen_fsharp/code_fsharp.thy delete mode 100755 src/test-gen/src/main/codegen_fsharp/examples/AQ.thy delete mode 100755 src/test-gen/src/main/codegen_fsharp/examples/SemiG.thy delete mode 100644 src/test-gen/src/main/codegen_fsharp/upstream/code_ml.ML delete mode 100644 src/test-gen/src/main/codegen_gdb/Code_gdb_script.thy delete mode 100644 src/test-gen/src/main/config.sml delete mode 100644 src/test-gen/src/main/debug/profiling_begin.thy delete mode 100644 src/test-gen/src/main/debug/profiling_end.thy delete mode 100644 src/test-gen/src/main/isar_setup.ML delete mode 100644 src/test-gen/src/main/log.thy delete mode 100644 src/test-gen/src/main/new_smt_patch/smt_config_patch.ML delete mode 100644 src/test-gen/src/main/new_smt_patch/smt_normalize_patch.ML delete mode 100644 src/test-gen/src/main/new_smt_patch/smt_solver_patch.ML delete mode 100644 src/test-gen/src/main/new_smt_patch/smt_systems_patch.ML delete mode 100644 src/test-gen/src/main/new_smt_patch/smtlib_interface_patch.ML delete mode 100644 src/test-gen/src/main/new_smt_patch/smtlib_patch.ML delete mode 100644 src/test-gen/src/main/new_smt_patch/z3_model.ML delete mode 100644 src/test-gen/src/main/new_smt_patch/z3_replay_patch.ML delete mode 100644 src/test-gen/src/main/smt_patch/old_smt_config_patch.ML delete mode 100644 src/test-gen/src/main/smt_patch/old_smt_failure_patch.ML delete mode 100644 src/test-gen/src/main/smt_patch/old_smt_normalize_patch.ML delete mode 100644 src/test-gen/src/main/smt_patch/old_smt_setup_solvers_patch.ML delete mode 100644 src/test-gen/src/main/smt_patch/old_smt_solver_patch.ML delete mode 100644 src/test-gen/src/main/smt_patch/old_smt_translate_patch.ML delete mode 100644 src/test-gen/src/main/smt_patch/old_z3_interface_patch.ML delete mode 100644 src/test-gen/src/main/smt_patch/old_z3_model_patch.ML delete mode 100644 src/test-gen/src/main/smt_patch/old_z3_proof_parser_patch.ML delete mode 100644 src/test-gen/src/main/smt_patch/old_z3_proof_reconstruction_patch.ML delete mode 100644 src/test-gen/src/test/Automata.thy delete mode 100644 src/test-gen/src/test/EFSM_Toolkit.thy delete mode 100644 src/test-gen/src/test/Interleaving.thy delete mode 100644 src/test-gen/src/test/Monads.thy delete mode 100644 src/test-gen/src/test/Observers.thy delete mode 100644 src/test-gen/src/test/TestEnv.ML delete mode 100644 src/test-gen/src/test/TestEnv.thy delete mode 100644 src/test-gen/src/test/TestGen.thy delete mode 100644 src/test-gen/src/test/TestLib.thy delete mode 100644 src/test-gen/src/test/TestRefinements.thy delete mode 100644 src/test-gen/src/test/TestScript.thy delete mode 100644 src/test-gen/src/test/TestSequence.thy delete mode 100644 src/test-gen/src/test/Testing.thy rename src/{test-gen/src/main => test}/Automata.thy (100%) rename src/{test-gen/src/main => test}/EFSM_Toolkit.thy (100%) rename src/{test-gen/src/main => test}/Interleaving.thy (100%) rename src/{test-gen/src/main => test}/Monads.thy (100%) rename src/{test-gen/src/main => test}/Observers.thy (100%) rename src/{test-gen/src/main => test}/TestEnv.ML (100%) rename src/{test-gen/src/main => test}/TestEnv.thy (100%) rename src/{ => test}/TestGen.thy (100%) rename src/{test-gen/src/main => test}/TestLib.thy (100%) rename src/{test-gen/src/main => test}/TestRefinements.thy (100%) rename src/{test-gen/src/main => test}/TestScript.thy (100%) rename src/{test-gen/src/main => test}/TestSequence.thy (100%) rename src/{test-gen/src/main => test}/Testing.thy (100%) delete mode 100644 src/version.thy diff --git a/src/QuickCheckBackend.thy b/src/QuickCheckBackend.thy deleted file mode 100644 index c17034b..0000000 --- a/src/QuickCheckBackend.thy +++ /dev/null @@ -1,113 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * TestGen.thy --- the core of the HOL-TestGen System. - * This file is part of HOL-TestGen. - * - * Copyright (c) 2005-2010 ETH Zurich, Switzerland - * 2008-2013 Achim D. Brucker, Germany - * 2009-2013 Universite Paris-Sud, France - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id:$ *) - -chapter {* The QuickCheck backend *} - -theory QuickCheckBackend -imports - HOL - Int - List - TestEnv - BackendUtils - -begin - - -ML{* - -structure QuickCheckBackend = -struct - -fun list_abs_var t [] = t - | list_abs_var t ((x as Var(_, T))::vars) = Abs(Name.internal Name.uu, T, abstract_over(x, list_abs_var t vars)) - -fun iterate f 0 = NONE - | iterate f k = case f () handle Match => NONE - of NONE => iterate f (k - 1) | SOME q => SOME q; - -fun quickcheck_tac' ctxt iters n thm = - let - val size = 15 - val thy = Proof_Context.theory_of ctxt - val vars = BackendUtils.premvars n thm - val prem = Logic.nth_prem(n, Thm.prop_of thm) - val neg = @{term Not} $ (HOLogic.dest_Trueprop prem) - val neg' = list_abs_var neg vars - - (* TODO: use new code generation - - val tester = Codegen.test_term ctxt n - - *) - fun tester x = NONE - fun with_tester k = iterate (fn () => tester k) iters - fun with_size k = if k > size then NONE - else - (case with_tester k - of NONE => with_size (k + 1) - | SOME q => SOME q); - in case with_size 1 of - SOME insts => let - val instantiated = Drule.instantiate_normalize - ([], BackendUtils.certify_pairs ctxt (vars ~~ insts)) thm - in - full_simp_tac ctxt n instantiated - end - | NONE => Seq.empty - end - -fun quickcheck_tac ctxt iters n thm = let - val tac = Object_Logic.full_atomize_tac ctxt THEN' (quickcheck_tac' ctxt iters) -in - (case (Seq.pull (tac n thm)) of - SOME (x, xq) => Seq.single x - | NONE => Seq.empty) - handle ERROR _ => Seq.empty (* Catch "unable to generate code" exceptions *) -end - -end - -*} - -end diff --git a/src/RandomBackend.thy b/src/RandomBackend.thy deleted file mode 100644 index c1b8a1f..0000000 --- a/src/RandomBackend.thy +++ /dev/null @@ -1,243 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * TestGen.thy --- the core of the HOL-TestGen System. - * This file is part of HOL-TestGen. - * - * Copyright (c) 2005-2010 ETH Zurich, Switzerland - * 2008-2013 Achim D. Brucker, Germany - * 2009-2013 Universite Paris-Sud, France - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id:$ *) - -chapter {* The random solver *} - -theory RandomBackend -imports - HOL - Int - List - TestEnv - BackendUtils - -begin - - -ML{* - -structure RandomBackend = -struct - -open HOLogic - -(* Backported from Isabelle2011-1 module Pure/Library.ML *) -fun frequency xs = - let - val sum = Library.foldl op + (0, map fst xs); - fun pick n ((k: int, x) :: xs) = if n <= k then x else pick (n - k) xs - in pick (Random.random_range 1 sum) xs end; - - -val (trace, trace_setup) = Attrib.config_bool @{binding testgen_trace} (K false); - -fun calc_constr_list tgt descr = -let val recTs = Old_Datatype_Aux.get_rec_types descr; - val newTs = Library.take (length descr) recTs; - val (_,(_,insts,tgt_constrs)) = hd(filter (fn (_,(n,_,_)) => n = tgt) descr) - val T = hd(filter (fn (Type(n,_)) => n = tgt) newTs) - val typ_of_dtyp = Old_Datatype_Aux.typ_of_dtyp descr - val constr_decls = map (fn (cname, cargs) => - Const(cname, map typ_of_dtyp cargs ---> T)) - (tgt_constrs) -in (map Old_Datatype_Aux.dest_DtTFree insts, constr_decls) end; - - -(* Getting the information associated to an extended record type name *) -fun is_record thy s = - let - fun remove_prefix [] = [] - | remove_prefix (c::cs) = if c = #"." then cs else remove_prefix cs - fun remove_suffix s = String.implode (List.rev (remove_prefix (List.rev (String.explode s)))) - in - if String.isSuffix "_ext" s then - Record.get_info thy (remove_suffix s) - else - NONE - end - - -(* Random value generator for user-defined records - Note: it does not work for extended records *) -fun random_record w n max ctxt cod_term_tab i = - let - (* Generating random values for the fields *) - val fields = #fields(i) - val random_fields = List.map (fn (_,ty) => random_term' w (n+1) max ctxt cod_term_tab ty) fields - - (* Getting the record maker. Another way would be to generate a Const whose name is the same - as the name of the type *) - fun head (a $ _) = head a - | head t = t - val (_ $ app_make $ _) = Thm.concl_of (hd (#defs(i))) - val make = head app_make - - (* Building the record *) - val res = List.foldl (fn (f,h) => h $ f) make random_fields - in - res - end - -(* Random value generator for user-defined data-types *) -and random_datatype w n max ctxt cod_term_tab s S i = - let - val descr = #descr(i) - - val (insts,constrs) = calc_constr_list s descr - - val weighed_constrs = - let - fun ar args = (length (filter (fn t => - case t of - Old_Datatype_Aux.DtRec _ => true - | _ => false ) args) ) - val constr_arity_list = map (fn (f,args) => (f,(ar args))) - (maps (#3 o snd) descr) - in - map (fn (f,a) => if a = 0 then (1,f) else (a * w,f)) - constr_arity_list - end - - val weighed_constrs = if (n >= max) - then filter (fn (w,_) => w =1) weighed_constrs - else weighed_constrs - fun weight_of t = fst(hd ((filter (fn (_,ty) => ty=t)) weighed_constrs)) - - fun frequency' xs = - let - val max = List.foldl Int.max 0 (map fst xs); - val xs' = map (fn (x,a) => (max-x+1,a)) xs - in - frequency xs' - end - - (* the default is a random bias towards constants *) - val constr = frequency' weighed_constrs - val Const(h,ty) = hd (filter (fn Const(h,ty) => h = constr) constrs) - val w = weight_of h - val ty_binds = insts ~~ S - fun ty_inst s = the (AList.lookup (op =) ty_binds s) - val instantiated_ty = map_type_tfree ty_inst ty - val const_head = Const(h,instantiated_ty) - val arg_ty = binder_types instantiated_ty - in list_comb(const_head, - (map(random_term' w (n+1) max ctxt cod_term_tab)(arg_ty))) - end - -(* Random value generator for various types *) -and random_term' w n max ctxt cod_term_tab (Type(s,S)) = -(* w => the weight on the actual level, initial value 1 - n => level counter, inital value 0 - max => maximal allowed number of levels -*) - let val thy = Proof_Context.theory_of ctxt - in - (case Symtab.lookup cod_term_tab s of - NONE => (* default random term generator ... - TODO : should do also something for functions ... *) - (case Type(s,S) of - Type(@{type_name int},_) => mk_number intT (IntInf.fromInt((Random.random_range 0 20) - 10)) - | Type(@{type_name nat},_) => mk_nat (IntInf.fromInt((Random.random_range 0 40))) - | Type(@{type_name set},_) => Const(@{const_name set},dummyT) $ - (random_term' w n max ctxt cod_term_tab (Type(@{type_name list},S))) - | Type(@{type_name fun},[T, Type(@{type_name bool}, [])]) => - Const(@{const_name set}, Type(@{type_name fun}, [Type(@{type_name list}, [T]), Type(s,S)])) $ - (random_term' w n max ctxt cod_term_tab (Type(@{type_name list},[T]))) - | Type(@{type_name fun},[T,U]) => absdummy T (random_term' w n max ctxt cod_term_tab U) - | _ => - (case is_record thy s of - (* The type is a user-defined record *) - SOME i => random_record w n max ctxt cod_term_tab i - | NONE => - (case BNF_LFP_Compat.get_info thy [] s of - (* The type is a user-defined data-type *) - SOME i => random_datatype w n max ctxt cod_term_tab s S i - | NONE => error("Cannot generate random value for type:" ^s^"\nCan only generate random values for int, nat, set, fun, and user-defined records and datatypes") - ) - ) - ) - | SOME R => R S) - end - |random_term' _ _ _ _ _ _ = error "Internal error in random_term: type not ground"; - - -fun random_term thy cod_term_tab typ = random_term' 1 0 10000 thy cod_term_tab typ -(* test section: - -val ttt = [HOLogic.intT,HOLogic.unitT,HOLogic.boolT, - HOLogic.mk_setT HOLogic.intT, - HOLogic.listT HOLogic.intT]; -map (random_term Symtab.empty) ttt; - - *) - -fun random_insts ctxt cod_tab vars () = - map(fn(x as Var(s,t))=> - (Thm.cterm_of ctxt x,Thm.cterm_of ctxt (random_term ctxt cod_tab t))) vars - -fun single_rand_inst_tac ctxt vars thm = let - val te = TestEnv.get_testenv ctxt - val cod_tab = #cod_term_tab(TestEnv.rep_testenv te) - val to_var_index = (fn Var(s,t) => (s,t)) o Thm.term_of - val insts = map (fn(x,y)=> (to_var_index x,y)) (random_insts ctxt cod_tab vars ()) -in - Seq.single (Drule.instantiate_normalize ([], insts) thm) -end - -fun random_inst_tac ctxt iters n thm = let - val _ = if iters > 0 andalso Config.get ctxt trace then tracing ("Random solving subgoal "^Int.toString(n)) else () - val single_tac = (* print_tac "A" THEN *) - (single_rand_inst_tac ctxt (BackendUtils.premvars n thm)) THEN - (* print_tac "B" THEN *) - (BackendUtils.solve_by_simp_tac ctxt n) -in - (FIRST (replicate iters single_tac)) thm -end - -end - -*} - - - -end diff --git a/src/Automata.thy b/src/main/Automata.thy similarity index 100% rename from src/Automata.thy rename to src/main/Automata.thy diff --git a/src/BackendUtils.thy b/src/main/BackendUtils.thy similarity index 100% rename from src/BackendUtils.thy rename to src/main/BackendUtils.thy diff --git a/src/EFSM_Toolkit.thy b/src/main/EFSM_Toolkit.thy similarity index 100% rename from src/EFSM_Toolkit.thy rename to src/main/EFSM_Toolkit.thy diff --git a/src/IOCO.thy b/src/main/IOCO.thy similarity index 100% rename from src/IOCO.thy rename to src/main/IOCO.thy diff --git a/src/Interleaving.thy b/src/main/Interleaving.thy similarity index 100% rename from src/Interleaving.thy rename to src/main/Interleaving.thy diff --git a/src/Monads.thy b/src/main/Monads.thy similarity index 100% rename from src/Monads.thy rename to src/main/Monads.thy diff --git a/src/Observers.thy b/src/main/Observers.thy similarity index 100% rename from src/Observers.thy rename to src/main/Observers.thy diff --git a/src/test-gen/src/main/Old_SMT/old_smt_builtin.ML b/src/main/Old_SMT/old_smt_builtin.ML similarity index 100% rename from src/test-gen/src/main/Old_SMT/old_smt_builtin.ML rename to src/main/Old_SMT/old_smt_builtin.ML diff --git a/src/test-gen/src/main/Old_SMT/old_smt_config.ML b/src/main/Old_SMT/old_smt_config.ML similarity index 100% rename from src/test-gen/src/main/Old_SMT/old_smt_config.ML rename to src/main/Old_SMT/old_smt_config.ML diff --git a/src/test-gen/src/main/Old_SMT/old_smt_datatypes.ML b/src/main/Old_SMT/old_smt_datatypes.ML similarity index 100% rename from src/test-gen/src/main/Old_SMT/old_smt_datatypes.ML rename to src/main/Old_SMT/old_smt_datatypes.ML diff --git a/src/test-gen/src/main/Old_SMT/old_smt_failure.ML b/src/main/Old_SMT/old_smt_failure.ML similarity index 100% rename from src/test-gen/src/main/Old_SMT/old_smt_failure.ML rename to src/main/Old_SMT/old_smt_failure.ML diff --git a/src/test-gen/src/main/Old_SMT/old_smt_normalize.ML b/src/main/Old_SMT/old_smt_normalize.ML similarity index 100% rename from src/test-gen/src/main/Old_SMT/old_smt_normalize.ML rename to src/main/Old_SMT/old_smt_normalize.ML diff --git a/src/test-gen/src/main/Old_SMT/old_smt_real.ML b/src/main/Old_SMT/old_smt_real.ML similarity index 100% rename from src/test-gen/src/main/Old_SMT/old_smt_real.ML rename to src/main/Old_SMT/old_smt_real.ML diff --git a/src/test-gen/src/main/Old_SMT/old_smt_setup_solvers.ML b/src/main/Old_SMT/old_smt_setup_solvers.ML similarity index 100% rename from src/test-gen/src/main/Old_SMT/old_smt_setup_solvers.ML rename to src/main/Old_SMT/old_smt_setup_solvers.ML diff --git a/src/test-gen/src/main/Old_SMT/old_smt_solver.ML b/src/main/Old_SMT/old_smt_solver.ML similarity index 100% rename from src/test-gen/src/main/Old_SMT/old_smt_solver.ML rename to src/main/Old_SMT/old_smt_solver.ML diff --git a/src/test-gen/src/main/Old_SMT/old_smt_translate.ML b/src/main/Old_SMT/old_smt_translate.ML similarity index 100% rename from src/test-gen/src/main/Old_SMT/old_smt_translate.ML rename to src/main/Old_SMT/old_smt_translate.ML diff --git a/src/test-gen/src/main/Old_SMT/old_smt_utils.ML b/src/main/Old_SMT/old_smt_utils.ML similarity index 100% rename from src/test-gen/src/main/Old_SMT/old_smt_utils.ML rename to src/main/Old_SMT/old_smt_utils.ML diff --git a/src/test-gen/src/main/Old_SMT/old_smt_word.ML b/src/main/Old_SMT/old_smt_word.ML similarity index 100% rename from src/test-gen/src/main/Old_SMT/old_smt_word.ML rename to src/main/Old_SMT/old_smt_word.ML diff --git a/src/test-gen/src/main/Old_SMT/old_smtlib_interface.ML b/src/main/Old_SMT/old_smtlib_interface.ML similarity index 100% rename from src/test-gen/src/main/Old_SMT/old_smtlib_interface.ML rename to src/main/Old_SMT/old_smtlib_interface.ML diff --git a/src/test-gen/src/main/Old_SMT/old_z3_interface.ML b/src/main/Old_SMT/old_z3_interface.ML similarity index 100% rename from src/test-gen/src/main/Old_SMT/old_z3_interface.ML rename to src/main/Old_SMT/old_z3_interface.ML diff --git a/src/test-gen/src/main/Old_SMT/old_z3_model.ML b/src/main/Old_SMT/old_z3_model.ML similarity index 100% rename from src/test-gen/src/main/Old_SMT/old_z3_model.ML rename to src/main/Old_SMT/old_z3_model.ML diff --git a/src/test-gen/src/main/Old_SMT/old_z3_proof_literals.ML b/src/main/Old_SMT/old_z3_proof_literals.ML similarity index 100% rename from src/test-gen/src/main/Old_SMT/old_z3_proof_literals.ML rename to src/main/Old_SMT/old_z3_proof_literals.ML diff --git a/src/test-gen/src/main/Old_SMT/old_z3_proof_methods.ML b/src/main/Old_SMT/old_z3_proof_methods.ML similarity index 100% rename from src/test-gen/src/main/Old_SMT/old_z3_proof_methods.ML rename to src/main/Old_SMT/old_z3_proof_methods.ML diff --git a/src/test-gen/src/main/Old_SMT/old_z3_proof_parser.ML b/src/main/Old_SMT/old_z3_proof_parser.ML similarity index 100% rename from src/test-gen/src/main/Old_SMT/old_z3_proof_parser.ML rename to src/main/Old_SMT/old_z3_proof_parser.ML diff --git a/src/test-gen/src/main/Old_SMT/old_z3_proof_reconstruction.ML b/src/main/Old_SMT/old_z3_proof_reconstruction.ML similarity index 100% rename from src/test-gen/src/main/Old_SMT/old_z3_proof_reconstruction.ML rename to src/main/Old_SMT/old_z3_proof_reconstruction.ML diff --git a/src/test-gen/src/main/Old_SMT/old_z3_proof_tools.ML b/src/main/Old_SMT/old_z3_proof_tools.ML similarity index 100% rename from src/test-gen/src/main/Old_SMT/old_z3_proof_tools.ML rename to src/main/Old_SMT/old_z3_proof_tools.ML diff --git a/src/test-gen/src/main/QuickCheckBackend.thy b/src/main/QuickCheckBackend.thy similarity index 100% rename from src/test-gen/src/main/QuickCheckBackend.thy rename to src/main/QuickCheckBackend.thy diff --git a/src/test-gen/src/main/RandomBackend.thy b/src/main/RandomBackend.thy similarity index 100% rename from src/test-gen/src/main/RandomBackend.thy rename to src/main/RandomBackend.thy diff --git a/src/test-gen/src/main/SMT/z3_replay_util.ML b/src/main/SMT/z3_replay_util.ML similarity index 100% rename from src/test-gen/src/main/SMT/z3_replay_util.ML rename to src/main/SMT/z3_replay_util.ML diff --git a/src/SMTBackend.thy b/src/main/SMTBackend.thy similarity index 100% rename from src/SMTBackend.thy rename to src/main/SMTBackend.thy diff --git a/src/SharedMemory.thy b/src/main/SharedMemory.thy similarity index 100% rename from src/SharedMemory.thy rename to src/main/SharedMemory.thy diff --git a/src/Term_Tactics.thy b/src/main/Term_Tactics.thy similarity index 100% rename from src/Term_Tactics.thy rename to src/main/Term_Tactics.thy diff --git a/src/TestEnv.ML b/src/main/TestEnv.ML similarity index 100% rename from src/TestEnv.ML rename to src/main/TestEnv.ML diff --git a/src/TestEnv.thy b/src/main/TestEnv.thy similarity index 100% rename from src/TestEnv.thy rename to src/main/TestEnv.thy diff --git a/src/test-gen/src/main/TestGen.thy b/src/main/TestGen.thy similarity index 100% rename from src/test-gen/src/main/TestGen.thy rename to src/main/TestGen.thy diff --git a/src/TestLib.thy b/src/main/TestLib.thy similarity index 100% rename from src/TestLib.thy rename to src/main/TestLib.thy diff --git a/src/TestRefinements.thy b/src/main/TestRefinements.thy similarity index 100% rename from src/TestRefinements.thy rename to src/main/TestRefinements.thy diff --git a/src/TestScript.thy b/src/main/TestScript.thy similarity index 100% rename from src/TestScript.thy rename to src/main/TestScript.thy diff --git a/src/TestSequence.thy b/src/main/TestSequence.thy similarity index 100% rename from src/TestSequence.thy rename to src/main/TestSequence.thy diff --git a/src/Testing.thy b/src/main/Testing.thy similarity index 100% rename from src/Testing.thy rename to src/main/Testing.thy diff --git a/src/clocks.ML b/src/main/clocks.ML similarity index 100% rename from src/clocks.ML rename to src/main/clocks.ML diff --git a/src/clocks.thy b/src/main/clocks.thy similarity index 100% rename from src/clocks.thy rename to src/main/clocks.thy diff --git a/src/codegen_C_pthread/Code_C_pthread.thy b/src/main/codegen_C_pthread/Code_C_pthread.thy similarity index 100% rename from src/codegen_C_pthread/Code_C_pthread.thy rename to src/main/codegen_C_pthread/Code_C_pthread.thy diff --git a/src/codegen_fsharp/Code_Char_Fsharp.thy b/src/main/codegen_fsharp/Code_Char_Fsharp.thy similarity index 100% rename from src/codegen_fsharp/Code_Char_Fsharp.thy rename to src/main/codegen_fsharp/Code_Char_Fsharp.thy diff --git a/src/codegen_fsharp/Code_Char_chr_Fsharp.thy b/src/main/codegen_fsharp/Code_Char_chr_Fsharp.thy similarity index 100% rename from src/codegen_fsharp/Code_Char_chr_Fsharp.thy rename to src/main/codegen_fsharp/Code_Char_chr_Fsharp.thy diff --git a/src/codegen_fsharp/Code_Integer_Fsharp.thy b/src/main/codegen_fsharp/Code_Integer_Fsharp.thy similarity index 100% rename from src/codegen_fsharp/Code_Integer_Fsharp.thy rename to src/main/codegen_fsharp/Code_Integer_Fsharp.thy diff --git a/src/codegen_fsharp/Code_String_Fsharp.thy b/src/main/codegen_fsharp/Code_String_Fsharp.thy similarity index 100% rename from src/codegen_fsharp/Code_String_Fsharp.thy rename to src/main/codegen_fsharp/Code_String_Fsharp.thy diff --git a/src/codegen_fsharp/code_fsharp.ML b/src/main/codegen_fsharp/code_fsharp.ML similarity index 100% rename from src/codegen_fsharp/code_fsharp.ML rename to src/main/codegen_fsharp/code_fsharp.ML diff --git a/src/codegen_fsharp/code_fsharp.thy b/src/main/codegen_fsharp/code_fsharp.thy similarity index 100% rename from src/codegen_fsharp/code_fsharp.thy rename to src/main/codegen_fsharp/code_fsharp.thy diff --git a/src/codegen_fsharp/examples/AQ.thy b/src/main/codegen_fsharp/examples/AQ.thy old mode 100644 new mode 100755 similarity index 100% rename from src/codegen_fsharp/examples/AQ.thy rename to src/main/codegen_fsharp/examples/AQ.thy diff --git a/src/codegen_fsharp/examples/SemiG.thy b/src/main/codegen_fsharp/examples/SemiG.thy old mode 100644 new mode 100755 similarity index 100% rename from src/codegen_fsharp/examples/SemiG.thy rename to src/main/codegen_fsharp/examples/SemiG.thy diff --git a/src/codegen_fsharp/upstream/code_ml.ML b/src/main/codegen_fsharp/upstream/code_ml.ML similarity index 100% rename from src/codegen_fsharp/upstream/code_ml.ML rename to src/main/codegen_fsharp/upstream/code_ml.ML diff --git a/src/codegen_gdb/Code_gdb_script.thy b/src/main/codegen_gdb/Code_gdb_script.thy similarity index 100% rename from src/codegen_gdb/Code_gdb_script.thy rename to src/main/codegen_gdb/Code_gdb_script.thy diff --git a/src/config.sml b/src/main/config.sml similarity index 100% rename from src/config.sml rename to src/main/config.sml diff --git a/src/debug/profiling_begin.thy b/src/main/debug/profiling_begin.thy similarity index 100% rename from src/debug/profiling_begin.thy rename to src/main/debug/profiling_begin.thy diff --git a/src/debug/profiling_end.thy b/src/main/debug/profiling_end.thy similarity index 100% rename from src/debug/profiling_end.thy rename to src/main/debug/profiling_end.thy diff --git a/src/isar_setup.ML b/src/main/isar_setup.ML similarity index 100% rename from src/isar_setup.ML rename to src/main/isar_setup.ML diff --git a/src/log.thy b/src/main/log.thy similarity index 100% rename from src/log.thy rename to src/main/log.thy diff --git a/src/test-gen/src/main/new_smt_patch/SMT_patch.thy b/src/main/new_smt_patch/SMT_patch.thy similarity index 100% rename from src/test-gen/src/main/new_smt_patch/SMT_patch.thy rename to src/main/new_smt_patch/SMT_patch.thy diff --git a/src/new_smt_patch/smt_config_patch.ML b/src/main/new_smt_patch/smt_config_patch.ML similarity index 100% rename from src/new_smt_patch/smt_config_patch.ML rename to src/main/new_smt_patch/smt_config_patch.ML diff --git a/src/new_smt_patch/smt_normalize_patch.ML b/src/main/new_smt_patch/smt_normalize_patch.ML similarity index 100% rename from src/new_smt_patch/smt_normalize_patch.ML rename to src/main/new_smt_patch/smt_normalize_patch.ML diff --git a/src/new_smt_patch/smt_solver_patch.ML b/src/main/new_smt_patch/smt_solver_patch.ML similarity index 100% rename from src/new_smt_patch/smt_solver_patch.ML rename to src/main/new_smt_patch/smt_solver_patch.ML diff --git a/src/new_smt_patch/smt_systems_patch.ML b/src/main/new_smt_patch/smt_systems_patch.ML similarity index 100% rename from src/new_smt_patch/smt_systems_patch.ML rename to src/main/new_smt_patch/smt_systems_patch.ML diff --git a/src/new_smt_patch/smtlib_interface_patch.ML b/src/main/new_smt_patch/smtlib_interface_patch.ML similarity index 100% rename from src/new_smt_patch/smtlib_interface_patch.ML rename to src/main/new_smt_patch/smtlib_interface_patch.ML diff --git a/src/new_smt_patch/smtlib_patch.ML b/src/main/new_smt_patch/smtlib_patch.ML similarity index 100% rename from src/new_smt_patch/smtlib_patch.ML rename to src/main/new_smt_patch/smtlib_patch.ML diff --git a/src/new_smt_patch/z3_model.ML b/src/main/new_smt_patch/z3_model.ML similarity index 100% rename from src/new_smt_patch/z3_model.ML rename to src/main/new_smt_patch/z3_model.ML diff --git a/src/new_smt_patch/z3_replay_patch.ML b/src/main/new_smt_patch/z3_replay_patch.ML similarity index 100% rename from src/new_smt_patch/z3_replay_patch.ML rename to src/main/new_smt_patch/z3_replay_patch.ML diff --git a/src/test-gen/src/main/smt_patch/Old_SMT_patch.thy b/src/main/smt_patch/Old_SMT_patch.thy similarity index 100% rename from src/test-gen/src/main/smt_patch/Old_SMT_patch.thy rename to src/main/smt_patch/Old_SMT_patch.thy diff --git a/src/smt_patch/old_smt_config_patch.ML b/src/main/smt_patch/old_smt_config_patch.ML similarity index 100% rename from src/smt_patch/old_smt_config_patch.ML rename to src/main/smt_patch/old_smt_config_patch.ML diff --git a/src/smt_patch/old_smt_failure_patch.ML b/src/main/smt_patch/old_smt_failure_patch.ML similarity index 100% rename from src/smt_patch/old_smt_failure_patch.ML rename to src/main/smt_patch/old_smt_failure_patch.ML diff --git a/src/smt_patch/old_smt_normalize_patch.ML b/src/main/smt_patch/old_smt_normalize_patch.ML similarity index 100% rename from src/smt_patch/old_smt_normalize_patch.ML rename to src/main/smt_patch/old_smt_normalize_patch.ML diff --git a/src/smt_patch/old_smt_setup_solvers_patch.ML b/src/main/smt_patch/old_smt_setup_solvers_patch.ML similarity index 100% rename from src/smt_patch/old_smt_setup_solvers_patch.ML rename to src/main/smt_patch/old_smt_setup_solvers_patch.ML diff --git a/src/smt_patch/old_smt_solver_patch.ML b/src/main/smt_patch/old_smt_solver_patch.ML similarity index 100% rename from src/smt_patch/old_smt_solver_patch.ML rename to src/main/smt_patch/old_smt_solver_patch.ML diff --git a/src/smt_patch/old_smt_translate_patch.ML b/src/main/smt_patch/old_smt_translate_patch.ML similarity index 100% rename from src/smt_patch/old_smt_translate_patch.ML rename to src/main/smt_patch/old_smt_translate_patch.ML diff --git a/src/smt_patch/old_z3_interface_patch.ML b/src/main/smt_patch/old_z3_interface_patch.ML similarity index 100% rename from src/smt_patch/old_z3_interface_patch.ML rename to src/main/smt_patch/old_z3_interface_patch.ML diff --git a/src/smt_patch/old_z3_model_patch.ML b/src/main/smt_patch/old_z3_model_patch.ML similarity index 100% rename from src/smt_patch/old_z3_model_patch.ML rename to src/main/smt_patch/old_z3_model_patch.ML diff --git a/src/smt_patch/old_z3_proof_parser_patch.ML b/src/main/smt_patch/old_z3_proof_parser_patch.ML similarity index 100% rename from src/smt_patch/old_z3_proof_parser_patch.ML rename to src/main/smt_patch/old_z3_proof_parser_patch.ML diff --git a/src/smt_patch/old_z3_proof_reconstruction_patch.ML b/src/main/smt_patch/old_z3_proof_reconstruction_patch.ML similarity index 100% rename from src/smt_patch/old_z3_proof_reconstruction_patch.ML rename to src/main/smt_patch/old_z3_proof_reconstruction_patch.ML diff --git a/src/test-gen/src/main/version.thy b/src/main/version.thy similarity index 100% rename from src/test-gen/src/main/version.thy rename to src/main/version.thy diff --git a/src/new_smt_patch/SMT_patch.thy b/src/new_smt_patch/SMT_patch.thy deleted file mode 100644 index 300816b..0000000 --- a/src/new_smt_patch/SMT_patch.thy +++ /dev/null @@ -1,435 +0,0 @@ -(* Title: HOL/SMT.thy - Author: Sascha Boehme, TU Muenchen -*) - -section \Bindings to Satisfiability Modulo Theories (SMT) solvers based on SMT-LIB 2\ - -theory SMT_patch -imports Divides -keywords "smt_status_patch" :: diag -begin - -subsection \A skolemization tactic and proof method\ - -lemma choices: - "\Q. \x. \y ya. Q x y ya \ \f fa. \x. Q x (f x) (fa x)" - "\Q. \x. \y ya yb. Q x y ya yb \ \f fa fb. \x. Q x (f x) (fa x) (fb x)" - "\Q. \x. \y ya yb yc. Q x y ya yb yc \ \f fa fb fc. \x. Q x (f x) (fa x) (fb x) (fc x)" - "\Q. \x. \y ya yb yc yd. Q x y ya yb yc yd \ - \f fa fb fc fd. \x. Q x (f x) (fa x) (fb x) (fc x) (fd x)" - "\Q. \x. \y ya yb yc yd ye. Q x y ya yb yc yd ye \ - \f fa fb fc fd fe. \x. Q x (f x) (fa x) (fb x) (fc x) (fd x) (fe x)" - "\Q. \x. \y ya yb yc yd ye yf. Q x y ya yb yc yd ye yf \ - \f fa fb fc fd fe ff. \x. Q x (f x) (fa x) (fb x) (fc x) (fd x) (fe x) (ff x)" - "\Q. \x. \y ya yb yc yd ye yf yg. Q x y ya yb yc yd ye yf yg \ - \f fa fb fc fd fe ff fg. \x. Q x (f x) (fa x) (fb x) (fc x) (fd x) (fe x) (ff x) (fg x)" - by metis+ - -lemma bchoices: - "\Q. \x \ S. \y ya. Q x y ya \ \f fa. \x \ S. Q x (f x) (fa x)" - "\Q. \x \ S. \y ya yb. Q x y ya yb \ \f fa fb. \x \ S. Q x (f x) (fa x) (fb x)" - "\Q. \x \ S. \y ya yb yc. Q x y ya yb yc \ \f fa fb fc. \x \ S. Q x (f x) (fa x) (fb x) (fc x)" - "\Q. \x \ S. \y ya yb yc yd. Q x y ya yb yc yd \ - \f fa fb fc fd. \x \ S. Q x (f x) (fa x) (fb x) (fc x) (fd x)" - "\Q. \x \ S. \y ya yb yc yd ye. Q x y ya yb yc yd ye \ - \f fa fb fc fd fe. \x \ S. Q x (f x) (fa x) (fb x) (fc x) (fd x) (fe x)" - "\Q. \x \ S. \y ya yb yc yd ye yf. Q x y ya yb yc yd ye yf \ - \f fa fb fc fd fe ff. \x \ S. Q x (f x) (fa x) (fb x) (fc x) (fd x) (fe x) (ff x)" - "\Q. \x \ S. \y ya yb yc yd ye yf yg. Q x y ya yb yc yd ye yf yg \ - \f fa fb fc fd fe ff fg. \x \ S. Q x (f x) (fa x) (fb x) (fc x) (fd x) (fe x) (ff x) (fg x)" - by metis+ - -ML \ -fun moura_tac ctxt = - Atomize_Elim.atomize_elim_tac ctxt THEN' - SELECT_GOAL (Clasimp.auto_tac (ctxt addSIs @{thms choice choices bchoice bchoices}) THEN - ALLGOALS (Metis_Tactic.metis_tac (take 1 ATP_Proof_Reconstruct.partial_type_encs) - ATP_Proof_Reconstruct.default_metis_lam_trans ctxt [] ORELSE' - blast_tac ctxt)) -\ - -method_setup moura = \ - Scan.succeed (SIMPLE_METHOD' o moura_tac) -\ "solve skolemization goals, especially those arising from Z3 proofs" - -hide_fact (open) choices bchoices - - -subsection \Triggers for quantifier instantiation\ - -text \ -Some SMT solvers support patterns as a quantifier instantiation -heuristics. Patterns may either be positive terms (tagged by "pat") -triggering quantifier instantiations -- when the solver finds a -term matching a positive pattern, it instantiates the corresponding -quantifier accordingly -- or negative terms (tagged by "nopat") -inhibiting quantifier instantiations. A list of patterns -of the same kind is called a multipattern, and all patterns in a -multipattern are considered conjunctively for quantifier instantiation. -A list of multipatterns is called a trigger, and their multipatterns -act disjunctively during quantifier instantiation. Each multipattern -should mention at least all quantified variables of the preceding -quantifier block. -\ - -typedecl 'a symb_list - -consts - Symb_Nil :: "'a symb_list" - Symb_Cons :: "'a \ 'a symb_list \ 'a symb_list" - -typedecl pattern - -consts - pat :: "'a \ pattern" - nopat :: "'a \ pattern" - -definition trigger :: "pattern symb_list symb_list \ bool \ bool" where - "trigger _ P = P" - - -subsection \Higher-order encoding\ - -text \ -Application is made explicit for constants occurring with varying -numbers of arguments. This is achieved by the introduction of the -following constant. -\ - -definition fun_app :: "'a \ 'a" where "fun_app f = f" - -text \ -Some solvers support a theory of arrays which can be used to encode -higher-order functions. The following set of lemmas specifies the -properties of such (extensional) arrays. -\ - -lemmas array_rules = ext fun_upd_apply fun_upd_same fun_upd_other fun_upd_upd fun_app_def - - -subsection \Normalization\ - -lemma case_bool_if[abs_def]: "case_bool x y P = (if P then x else y)" - by simp - -lemmas Ex1_def_raw = Ex1_def[abs_def] -lemmas Ball_def_raw = Ball_def[abs_def] -lemmas Bex_def_raw = Bex_def[abs_def] -lemmas abs_if_raw = abs_if[abs_def] -lemmas min_def_raw = min_def[abs_def] -lemmas max_def_raw = max_def[abs_def] - - -subsection \Integer division and modulo for Z3\ - -text \ -The following Z3-inspired definitions are overspecified for the case where \l = 0\. This -Schönheitsfehler is corrected in the \div_as_z3div\ and \mod_as_z3mod\ theorems. -\ - -definition z3div :: "int \ int \ int" where - "z3div k l = (if l \ 0 then k div l else - (k div - l))" - -definition z3mod :: "int \ int \ int" where - "z3mod k l = k mod (if l \ 0 then l else - l)" - -lemma div_as_z3div: - "\k l. k div l = (if l = 0 then 0 else if l > 0 then z3div k l else z3div (- k) (- l))" - by (simp add: z3div_def) - -lemma mod_as_z3mod: - "\k l. k mod l = (if l = 0 then k else if l > 0 then z3mod k l else - z3mod (- k) (- l))" - by (simp add: z3mod_def) - - -subsection \Setup\ - -ML_file "~~/src/HOL/Tools/SMT/smt_util.ML" -ML_file "~~/src/HOL/Tools/SMT/smt_failure.ML" -ML_file "smt_config_patch.ML" -ML_file "~~/src/HOL/Tools/SMT/smt_builtin.ML" -ML_file "~~/src/HOL/Tools/SMT/smt_datatypes.ML" -ML_file "smt_normalize_patch.ML" -ML_file "~~/src/HOL/Tools/SMT/smt_translate.ML" -ML_file "smtlib_patch.ML" -ML_file "smtlib_interface_patch.ML" -ML_file "~~/src/HOL/Tools/SMT/smtlib_proof.ML" -ML_file "~~/src/HOL/Tools/SMT/smtlib_isar.ML" -ML_file "~~/src/HOL/Tools/SMT/z3_proof.ML" -ML_file "~~/src/HOL/Tools/SMT/z3_isar.ML" -ML_file "smt_solver_patch.ML" -(* We currently do not use CVC4 nor veriT so we remove them from the patch *) -(* ML_file "~~/src/HOL/Tools/SMT/cvc4_interface.ML" -ML_file "~~/src/HOL/Tools/SMT/cvc4_proof_parse.ML" -ML_file "~~/src/HOL/Tools/SMT/verit_proof.ML" -ML_file "~~/src/HOL/Tools/SMT/verit_isar.ML" -ML_file "~~/src/HOL/Tools/SMT/verit_proof_parse.ML" *) -ML_file "~~/src/HOL/Tools/SMT/conj_disj_perm.ML" -ML_file "~~/src/HOL/Tools/SMT/z3_interface.ML" -ML_file "~~/src/HOL/Tools/SMT/z3_replay_util.ML" -ML_file "~~/src/HOL/Tools/SMT/z3_replay_rules.ML" -ML_file "~~/src/HOL/Tools/SMT/z3_replay_methods.ML" -ML_file "z3_replay_patch.ML" -ML_file "z3_model.ML" -ML_file "smt_systems_patch.ML" - -method_setup smt = \ - Scan.optional Attrib.thms [] >> - (fn thms => fn ctxt => - METHOD (fn facts => HEADGOAL (SMT_patch_Solver.smt_tac ctxt (thms @ facts)))) -\ "apply an SMT solver to the current goal" - - -subsection \Configuration\ - -text \ -The current configuration can be printed by the command -\smt_status_patch\, which shows the values of most options. -\ - - -subsection \General configuration options\ - -text \ -The option \smt_solver\ can be used to change the target SMT -solver. The possible values can be obtained from the \smt_status_patch\ -command. -\ - -declare [[smt_solver = z3]] - -text \ -Since SMT solvers are potentially nonterminating, there is a timeout -(given in seconds) to restrict their runtime. -\ - -declare [[smt_timeout = 20]] - -text \ -SMT solvers apply randomized heuristics. In case a problem is not -solvable by an SMT solver, changing the following option might help. -\ - -declare [[smt_random_seed = 1]] - -text \ -In general, the binding to SMT solvers runs as an oracle, i.e, the SMT -solvers are fully trusted without additional checks. The following -option can cause the SMT solver to run in proof-producing mode, giving -a checkable certificate. This is currently only implemented for Z3. -\ - -declare [[smt_oracle = false]] - -text \ -Each SMT solver provides several commandline options to tweak its -behaviour. They can be passed to the solver by setting the following -options. -\ - -declare [[cvc3_options = ""]] -declare [[cvc4_options = "--full-saturate-quant --inst-when=full-last-call --inst-no-entail --term-db-mode=relevant"]] -declare [[verit_options = ""]] -declare [[z3_options = ""]] - -text \ -The SMT method provides an inference mechanism to detect simple triggers -in quantified formulas, which might increase the number of problems -solvable by SMT solvers (note: triggers guide quantifier instantiations -in the SMT solver). To turn it on, set the following option. -\ - -declare [[smt_infer_triggers = false]] - -text \ -Enable the following option to use built-in support for datatypes, -codatatypes, and records in CVC4. Currently, this is implemented only -in oracle mode. -\ - -declare [[cvc4_extensions = false]] - -text \ -Enable the following option to use built-in support for div/mod, datatypes, -and records in Z3. Currently, this is implemented only in oracle mode. -\ - -declare [[z3_extensions = false]] - - -subsection \Certificates\ - -text \ -By setting the option \smt_certificates\ to the name of a file, -all following applications of an SMT solver a cached in that file. -Any further application of the same SMT solver (using the very same -configuration) re-uses the cached certificate instead of invoking the -solver. An empty string disables caching certificates. - -The filename should be given as an explicit path. It is good -practice to use the name of the current theory (with ending -\.certs\ instead of \.thy\) as the certificates file. -Certificate files should be used at most once in a certain theory context, -to avoid race conditions with other concurrent accesses. -\ - -declare [[smt_certificates = ""]] - -text \ -The option \smt_read_only_certificates\ controls whether only -stored certificates are should be used or invocation of an SMT solver -is allowed. When set to \true\, no SMT solver will ever be -invoked and only the existing certificates found in the configured -cache are used; when set to \false\ and there is no cached -certificate for some proposition, then the configured SMT solver is -invoked. -\ - -declare [[smt_read_only_certificates = false]] - - -subsection \Tracing\ - -text \ -The SMT method, when applied, traces important information. To -make it entirely silent, set the following option to \false\. -\ - -declare [[smt_verbose = true]] - -text \ -For tracing the generated problem file given to the SMT solver as -well as the returned result of the solver, the option -\smt_trace\ should be set to \true\. -\ - -declare [[smt_trace = false]] - - -subsection \Schematic rules for Z3 proof reconstruction\ - -text \ -Several prof rules of Z3 are not very well documented. There are two -lemma groups which can turn failing Z3 proof reconstruction attempts -into succeeding ones: the facts in \z3_rule\ are tried prior to -any implemented reconstruction procedure for all uncertain Z3 proof -rules; the facts in \z3_simp\ are only fed to invocations of -the simplifier when reconstructing theory-specific proof steps. -\ - -lemmas [z3_rule] = - refl eq_commute conj_commute disj_commute simp_thms nnf_simps - ring_distribs field_simps times_divide_eq_right times_divide_eq_left - if_True if_False not_not - NO_MATCH_def - -lemma [z3_rule]: - "(P \ Q) = (\ (\ P \ \ Q))" - "(P \ Q) = (\ (\ Q \ \ P))" - "(\ P \ Q) = (\ (P \ \ Q))" - "(\ P \ Q) = (\ (\ Q \ P))" - "(P \ \ Q) = (\ (\ P \ Q))" - "(P \ \ Q) = (\ (Q \ \ P))" - "(\ P \ \ Q) = (\ (P \ Q))" - "(\ P \ \ Q) = (\ (Q \ P))" - by auto - -lemma [z3_rule]: - "(P \ Q) = (Q \ \ P)" - "(\ P \ Q) = (P \ Q)" - "(\ P \ Q) = (Q \ P)" - "(True \ P) = P" - "(P \ True) = True" - "(False \ P) = True" - "(P \ P) = True" - "(\ (A \ \ B)) \ (A \ B)" - by auto - -lemma [z3_rule]: - "((P = Q) \ R) = (R | (Q = (\ P)))" - by auto - -lemma [z3_rule]: - "(\ True) = False" - "(\ False) = True" - "(x = x) = True" - "(P = True) = P" - "(True = P) = P" - "(P = False) = (\ P)" - "(False = P) = (\ P)" - "((\ P) = P) = False" - "(P = (\ P)) = False" - "((\ P) = (\ Q)) = (P = Q)" - "\ (P = (\ Q)) = (P = Q)" - "\ ((\ P) = Q) = (P = Q)" - "(P \ Q) = (Q = (\ P))" - "(P = Q) = ((\ P \ Q) \ (P \ \ Q))" - "(P \ Q) = ((\ P \ \ Q) \ (P \ Q))" - by auto - -lemma [z3_rule]: - "(if P then P else \ P) = True" - "(if \ P then \ P else P) = True" - "(if P then True else False) = P" - "(if P then False else True) = (\ P)" - "(if P then Q else True) = ((\ P) \ Q)" - "(if P then Q else True) = (Q \ (\ P))" - "(if P then Q else \ Q) = (P = Q)" - "(if P then Q else \ Q) = (Q = P)" - "(if P then \ Q else Q) = (P = (\ Q))" - "(if P then \ Q else Q) = ((\ Q) = P)" - "(if \ P then x else y) = (if P then y else x)" - "(if P then (if Q then x else y) else x) = (if P \ (\ Q) then y else x)" - "(if P then (if Q then x else y) else x) = (if (\ Q) \ P then y else x)" - "(if P then (if Q then x else y) else y) = (if P \ Q then x else y)" - "(if P then (if Q then x else y) else y) = (if Q \ P then x else y)" - "(if P then x else if P then y else z) = (if P then x else z)" - "(if P then x else if Q then x else y) = (if P \ Q then x else y)" - "(if P then x else if Q then x else y) = (if Q \ P then x else y)" - "(if P then x = y else x = z) = (x = (if P then y else z))" - "(if P then x = y else y = z) = (y = (if P then x else z))" - "(if P then x = y else z = y) = (y = (if P then x else z))" - by auto - -lemma [z3_rule]: - "0 + (x::int) = x" - "x + 0 = x" - "x + x = 2 * x" - "0 * x = 0" - "1 * x = x" - "x + y = y + x" - by (auto simp add: mult_2) - -lemma [z3_rule]: (* for def-axiom *) - "P = Q \ P \ Q" - "P = Q \ \ P \ \ Q" - "(\ P) = Q \ \ P \ Q" - "(\ P) = Q \ P \ \ Q" - "P = (\ Q) \ \ P \ Q" - "P = (\ Q) \ P \ \ Q" - "P \ Q \ P \ \ Q" - "P \ Q \ \ P \ Q" - "P \ (\ Q) \ P \ Q" - "(\ P) \ Q \ P \ Q" - "P \ Q \ P \ (\ Q)" - "P \ Q \ (\ P) \ Q" - "P \ \ Q \ P \ Q" - "\ P \ Q \ P \ Q" - "P \ y = (if P then x else y)" - "P \ (if P then x else y) = y" - "\ P \ x = (if P then x else y)" - "\ P \ (if P then x else y) = x" - "P \ R \ \ (if P then Q else R)" - "\ P \ Q \ \ (if P then Q else R)" - "\ (if P then Q else R) \ \ P \ Q" - "\ (if P then Q else R) \ P \ R" - "(if P then Q else R) \ \ P \ \ Q" - "(if P then Q else R) \ P \ \ R" - "(if P then \ Q else R) \ \ P \ Q" - "(if P then Q else \ R) \ P \ R" - by auto - -hide_type (open) symb_list pattern -hide_const (open) Symb_Nil Symb_Cons trigger pat nopat fun_app z3div z3mod - -end diff --git a/src/smt_patch/Old_SMT_patch.thy b/src/smt_patch/Old_SMT_patch.thy deleted file mode 100644 index 1bd40c3..0000000 --- a/src/smt_patch/Old_SMT_patch.thy +++ /dev/null @@ -1,431 +0,0 @@ -(* Title: HOL/Library/Old_SMT.thy - Author: Sascha Boehme, TU Muenchen -*) - -section \Old Version of Bindings to Satisfiability Modulo Theories (SMT) solvers\ - -theory Old_SMT_patch -imports Real (* "~~/src/HOL/Word/Word" *) -keywords "old_smt_status" :: diag -begin - -ML_file "~~/src/HOL/Library/Old_SMT/old_smt_utils.ML" -ML_file "old_smt_failure_patch.ML" -ML_file "old_smt_config_patch.ML" - - -subsection \Triggers for quantifier instantiation\ - -text \ -Some SMT solvers support patterns as a quantifier instantiation -heuristics. Patterns may either be positive terms (tagged by "pat") -triggering quantifier instantiations -- when the solver finds a -term matching a positive pattern, it instantiates the corresponding -quantifier accordingly -- or negative terms (tagged by "nopat") -inhibiting quantifier instantiations. A list of patterns -of the same kind is called a multipattern, and all patterns in a -multipattern are considered conjunctively for quantifier instantiation. -A list of multipatterns is called a trigger, and their multipatterns -act disjunctively during quantifier instantiation. Each multipattern -should mention at least all quantified variables of the preceding -quantifier block. -\ - -typedecl pattern - -consts - pat :: "'a \ pattern" - nopat :: "'a \ pattern" - -definition trigger :: "pattern list list \ bool \ bool" where "trigger _ P = P" - - -subsection \Quantifier weights\ - -text \ -Weight annotations to quantifiers influence the priority of quantifier -instantiations. They should be handled with care for solvers, which support -them, because incorrect choices of weights might render a problem unsolvable. -\ - -definition weight :: "int \ bool \ bool" where "weight _ P = P" - -text \ -Weights must be non-negative. The value \0\ is equivalent to providing -no weight at all. - -Weights should only be used at quantifiers and only inside triggers (if the -quantifier has triggers). Valid usages of weights are as follows: - -\begin{itemize} -\item -@{term "\x. trigger [[pat (P x)]] (weight 2 (P x))"} -\item -@{term "\x. weight 3 (P x)"} -\end{itemize} -\ - - -subsection \Higher-order encoding\ - -text \ -Application is made explicit for constants occurring with varying -numbers of arguments. This is achieved by the introduction of the -following constant. -\ - -definition fun_app where "fun_app f = f" - -text \ -Some solvers support a theory of arrays which can be used to encode -higher-order functions. The following set of lemmas specifies the -properties of such (extensional) arrays. -\ - -lemmas array_rules = ext fun_upd_apply fun_upd_same fun_upd_other - fun_upd_upd fun_app_def - - -subsection \First-order logic\ - -text \ -Some SMT solvers only accept problems in first-order logic, i.e., -where formulas and terms are syntactically separated. When -translating higher-order into first-order problems, all -uninterpreted constants (those not built-in in the target solver) -are treated as function symbols in the first-order sense. Their -occurrences as head symbols in atoms (i.e., as predicate symbols) are -turned into terms by logically equating such atoms with @{term True}. -For technical reasons, @{term True} and @{term False} occurring inside -terms are replaced by the following constants. -\ - -definition term_true where "term_true = True" -definition term_false where "term_false = False" - - -subsection \Integer division and modulo for Z3\ - -definition z3div :: "int \ int \ int" where - "z3div k l = (if 0 \ l then k div l else -(k div (-l)))" - -definition z3mod :: "int \ int \ int" where - "z3mod k l = (if 0 \ l then k mod l else k mod (-l))" - - -subsection \Setup\ - -ML_file "~~/src/HOL/Library/Old_SMT/old_smt_builtin.ML" -ML_file "~~/src/HOL/Library/Old_SMT/old_smt_datatypes.ML" -ML_file "old_smt_normalize_patch.ML" -ML_file "old_smt_translate_patch.ML" -(* This is the file that is actually patched.*) -ML_file "old_smt_solver_patch.ML" -ML_file "~~/src/HOL/Library/Old_SMT/old_smtlib_interface.ML" -ML_file "old_z3_interface_patch.ML" -ML_file "old_z3_proof_parser_patch.ML" -ML_file "~~/src/HOL/Library/Old_SMT/old_z3_proof_tools.ML" -ML_file "~~/src/HOL/Library/Old_SMT/old_z3_proof_literals.ML" -ML_file "~~/src/HOL/Library/Old_SMT/old_z3_proof_methods.ML" -named_theorems old_z3_simp "simplification rules for Z3 proof reconstruction" -ML_file "old_z3_proof_reconstruction_patch.ML" -ML_file "old_z3_model_patch.ML" -ML_file "old_smt_setup_solvers_patch.ML" - -setup \ - Old_SMT_Config.setup #> - Old_SMT_Normalize.setup #> - Old_SMTLIB_Interface.setup #> - Old_Z3_Interface.setup #> - Old_SMT_Setup_Solvers.setup -\ - -method_setup old_smt = \ - Scan.optional Attrib.thms [] >> - (fn thms => fn ctxt => - METHOD (fn facts => HEADGOAL (Old_SMT_patch_Solver.smt_tac ctxt (thms @ facts)))) -\ "apply an SMT solver to the current goal" - - -subsection \Configuration\ - -text \ -The current configuration can be printed by the command -\old_smt_status\, which shows the values of most options. -\ - - - -subsection \General configuration options\ - -text \ -The option \old_smt_solver\ can be used to change the target SMT -solver. The possible values can be obtained from the \old_smt_status\ -command. - -Due to licensing restrictions, Yices and Z3 are not installed/enabled -by default. Z3 is free for non-commercial applications and can be enabled -by setting the \OLD_Z3_NON_COMMERCIAL\ environment variable to -\yes\. -\ - -declare [[ old_smt_solver = z3 ]] - -text \ -Since SMT solvers are potentially non-terminating, there is a timeout -(given in seconds) to restrict their runtime. A value greater than -120 (seconds) is in most cases not advisable. -\ - -declare [[ old_smt_timeout = 20 ]] - -text \ -SMT solvers apply randomized heuristics. In case a problem is not -solvable by an SMT solver, changing the following option might help. -\ - -declare [[ old_smt_random_seed = 1 ]] - -text \ -In general, the binding to SMT solvers runs as an oracle, i.e, the SMT -solvers are fully trusted without additional checks. The following -option can cause the SMT solver to run in proof-producing mode, giving -a checkable certificate. This is currently only implemented for Z3. -\ - -declare [[ old_smt_oracle = false ]] - -text \ -Each SMT solver provides several commandline options to tweak its -behaviour. They can be passed to the solver by setting the following -options. -\ - -declare [[ old_cvc3_options = "" ]] -declare [[ old_yices_options = "" ]] -declare [[ old_z3_options = "" ]] - -text \ -Enable the following option to use built-in support for datatypes and -records. Currently, this is only implemented for Z3 running in oracle -mode. -\ - -declare [[ old_smt_datatypes = false ]] - -text \ -The SMT method provides an inference mechanism to detect simple triggers -in quantified formulas, which might increase the number of problems -solvable by SMT solvers (note: triggers guide quantifier instantiations -in the SMT solver). To turn it on, set the following option. -\ - -declare [[ old_smt_infer_triggers = false ]] - -text \ -The SMT method monomorphizes the given facts, that is, it tries to -instantiate all schematic type variables with fixed types occurring -in the problem. This is a (possibly nonterminating) fixed-point -construction whose cycles are limited by the following option. -\ - -declare [[ monomorph_max_rounds = 5 ]] - -text \ -In addition, the number of generated monomorphic instances is limited -by the following option. -\ - -declare [[ monomorph_max_new_instances = 500 ]] - - - -subsection \Certificates\ - -text \ -By setting the option \old_smt_certificates\ to the name of a file, -all following applications of an SMT solver a cached in that file. -Any further application of the same SMT solver (using the very same -configuration) re-uses the cached certificate instead of invoking the -solver. An empty string disables caching certificates. - -The filename should be given as an explicit path. It is good -practice to use the name of the current theory (with ending -\.certs\ instead of \.thy\) as the certificates file. -Certificate files should be used at most once in a certain theory context, -to avoid race conditions with other concurrent accesses. -\ - -declare [[ old_smt_certificates = "" ]] - -text \ -The option \old_smt_read_only_certificates\ controls whether only -stored certificates are should be used or invocation of an SMT solver -is allowed. When set to \true\, no SMT solver will ever be -invoked and only the existing certificates found in the configured -cache are used; when set to \false\ and there is no cached -certificate for some proposition, then the configured SMT solver is -invoked. -\ - -declare [[ old_smt_read_only_certificates = false ]] - - - -subsection \Tracing\ - -text \ -The SMT method, when applied, traces important information. To -make it entirely silent, set the following option to \false\. -\ - -declare [[ old_smt_verbose = true ]] - -text \ -For tracing the generated problem file given to the SMT solver as -well as the returned result of the solver, the option -\old_smt_trace\ should be set to \true\. -\ - -declare [[ old_smt_trace = false ]] - -text \ -From the set of assumptions given to the SMT solver, those assumptions -used in the proof are traced when the following option is set to -@{term true}. This only works for Z3 when it runs in non-oracle mode -(see options \old_smt_solver\ and \old_smt_oracle\ above). -\ - -declare [[ old_smt_trace_used_facts = false ]] - - - -subsection \Schematic rules for Z3 proof reconstruction\ - -text \ -Several prof rules of Z3 are not very well documented. There are two -lemma groups which can turn failing Z3 proof reconstruction attempts -into succeeding ones: the facts in \z3_rule\ are tried prior to -any implemented reconstruction procedure for all uncertain Z3 proof -rules; the facts in \z3_simp\ are only fed to invocations of -the simplifier when reconstructing theory-specific proof steps. -\ - -lemmas [old_z3_rule] = - refl eq_commute conj_commute disj_commute simp_thms nnf_simps - ring_distribs field_simps times_divide_eq_right times_divide_eq_left - if_True if_False not_not - -lemma [old_z3_rule]: - "(P \ Q) = (\(\P \ \Q))" - "(P \ Q) = (\(\Q \ \P))" - "(\P \ Q) = (\(P \ \Q))" - "(\P \ Q) = (\(\Q \ P))" - "(P \ \Q) = (\(\P \ Q))" - "(P \ \Q) = (\(Q \ \P))" - "(\P \ \Q) = (\(P \ Q))" - "(\P \ \Q) = (\(Q \ P))" - by auto - -lemma [old_z3_rule]: - "(P \ Q) = (Q \ \P)" - "(\P \ Q) = (P \ Q)" - "(\P \ Q) = (Q \ P)" - "(True \ P) = P" - "(P \ True) = True" - "(False \ P) = True" - "(P \ P) = True" - by auto - -lemma [old_z3_rule]: - "((P = Q) \ R) = (R | (Q = (\P)))" - by auto - -lemma [old_z3_rule]: - "(\True) = False" - "(\False) = True" - "(x = x) = True" - "(P = True) = P" - "(True = P) = P" - "(P = False) = (\P)" - "(False = P) = (\P)" - "((\P) = P) = False" - "(P = (\P)) = False" - "((\P) = (\Q)) = (P = Q)" - "\(P = (\Q)) = (P = Q)" - "\((\P) = Q) = (P = Q)" - "(P \ Q) = (Q = (\P))" - "(P = Q) = ((\P \ Q) \ (P \ \Q))" - "(P \ Q) = ((\P \ \Q) \ (P \ Q))" - by auto - -lemma [old_z3_rule]: - "(if P then P else \P) = True" - "(if \P then \P else P) = True" - "(if P then True else False) = P" - "(if P then False else True) = (\P)" - "(if P then Q else True) = ((\P) \ Q)" - "(if P then Q else True) = (Q \ (\P))" - "(if P then Q else \Q) = (P = Q)" - "(if P then Q else \Q) = (Q = P)" - "(if P then \Q else Q) = (P = (\Q))" - "(if P then \Q else Q) = ((\Q) = P)" - "(if \P then x else y) = (if P then y else x)" - "(if P then (if Q then x else y) else x) = (if P \ (\Q) then y else x)" - "(if P then (if Q then x else y) else x) = (if (\Q) \ P then y else x)" - "(if P then (if Q then x else y) else y) = (if P \ Q then x else y)" - "(if P then (if Q then x else y) else y) = (if Q \ P then x else y)" - "(if P then x else if P then y else z) = (if P then x else z)" - "(if P then x else if Q then x else y) = (if P \ Q then x else y)" - "(if P then x else if Q then x else y) = (if Q \ P then x else y)" - "(if P then x = y else x = z) = (x = (if P then y else z))" - "(if P then x = y else y = z) = (y = (if P then x else z))" - "(if P then x = y else z = y) = (y = (if P then x else z))" - by auto - -lemma [old_z3_rule]: - "0 + (x::int) = x" - "x + 0 = x" - "x + x = 2 * x" - "0 * x = 0" - "1 * x = x" - "x + y = y + x" - by auto - -lemma [old_z3_rule]: (* for def-axiom *) - "P = Q \ P \ Q" - "P = Q \ \P \ \Q" - "(\P) = Q \ \P \ Q" - "(\P) = Q \ P \ \Q" - "P = (\Q) \ \P \ Q" - "P = (\Q) \ P \ \Q" - "P \ Q \ P \ \Q" - "P \ Q \ \P \ Q" - "P \ (\Q) \ P \ Q" - "(\P) \ Q \ P \ Q" - "P \ Q \ P \ (\Q)" - "P \ Q \ (\P) \ Q" - "P \ \Q \ P \ Q" - "\P \ Q \ P \ Q" - "P \ y = (if P then x else y)" - "P \ (if P then x else y) = y" - "\P \ x = (if P then x else y)" - "\P \ (if P then x else y) = x" - "P \ R \ \(if P then Q else R)" - "\P \ Q \ \(if P then Q else R)" - "\(if P then Q else R) \ \P \ Q" - "\(if P then Q else R) \ P \ R" - "(if P then Q else R) \ \P \ \Q" - "(if P then Q else R) \ P \ \R" - "(if P then \Q else R) \ \P \ Q" - "(if P then Q else \R) \ P \ R" - by auto - -ML_file "~~/src/HOL/Library/Old_SMT/old_smt_real.ML" -(* ML_file "~~/src/HOL/Library/Old_SMT/old_smt_word.ML" *) - -hide_type (open) pattern -hide_const fun_app term_true term_false z3div z3mod -hide_const (open) trigger pat nopat weight - -end diff --git a/src/test-gen/src/main/BackendUtils.thy b/src/test-gen/src/main/BackendUtils.thy deleted file mode 100644 index 6faee28..0000000 --- a/src/test-gen/src/main/BackendUtils.thy +++ /dev/null @@ -1,93 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * TestGen.thy --- the core of the HOL-TestGen System. - * This file is part of HOL-TestGen. - * - * Copyright (c) 2005-2010 ETH Zurich, Switzerland - * 2008-2013 Achim D. Brucker, Germany - * 2009-2013 Universite Paris-Sud, France - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id:$ *) - -chapter {* Utilities for the various backends *} - -theory BackendUtils -imports TestEnv - -begin - -ML{* - -structure BackendUtils = -struct - -fun certify_pairs ctxt l = map (fn (Var(x,t),y) => ((x, t), Thm.cterm_of ctxt y)) l -fun uncertify_pairs l = map (fn (x,(a,t)) => (Thm.term_of x, Var (a,t))) l - -fun solve_by_simp_tac ctxt = SOLVED' (full_simp_tac ctxt) -fun solve_by_simp_or_auto_tac ctxt = - let - val pon = Config.get ctxt TestEnv.pon - val solved' = if (pon = 0) then SOLVED' else (fn x => x) - val full_simp = full_simp_tac ctxt - val clarsimp = clarsimp_tac ctxt - (* val metis = Metis_Tactic.metis_tac [] ATP_Proof_Reconstruct.metis_default_lam_trans ctxt (TestEnv.get_smt_facts ctxt) *) - (* We can use SMT from the standard library, no need for the patch here *) - val smt = SMT_Solver.smt_tac ctxt (TestEnv.get_smt_facts ctxt) - (* val use_metis = Config.get ctxt TestEnv.use_metis *) - val use_smt = Config.get ctxt TestEnv.use_smt - val tactic = if use_smt then ( - fn y => (SOLVE (full_simp y)) - ORELSE (SOLVE (clarsimp y)) - ORELSE (smt y) - ) else ( - fn y => (SOLVE (full_simp y)) - ORELSE (SOLVE (clarsimp y)) - ORELSE (auto_tac ctxt) - ) - in solved' tactic - end - -fun premvars n thm = let - val prem = Logic.nth_prem(n, Thm.prop_of thm) -in - map Var (Term.add_vars prem []) -end - -end - -*} - -end diff --git a/src/test-gen/src/main/IOCO.thy b/src/test-gen/src/main/IOCO.thy deleted file mode 100644 index cf2e5d0..0000000 --- a/src/test-gen/src/main/IOCO.thy +++ /dev/null @@ -1,126 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * IOCO --- formalizing the IOCO theory - * This file is part of HOL-TestGen. - * - * Copyright (c) 2005-2007 ETH Zurich, Switzerland - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id: IOCO.thy 8455 2009-04-08 07:58:38Z wolff $ *) - -chapter {* Basic Testing Setup *} - -theory IOCO imports Main begin - -section{* A Bit of IOCO Theory *} -text{* See Jan Tretmanns, Axel Belinfante: Automatic Testing with Formal - Methods. We follow more or less the notation here, but are more - detailed wrt. concepts such as ``initial states'' which are part of - the concept of a transition system. *} - - -text{* Transition systems and IO-Transition Systems *} - -record ('\, '\) TS = - init :: "'\ set" - trans :: "('\\'\\'\) set" - -type_synonym ('\,'o,'\) io_lts = "('\ + 'o,'\) TS" - - - -inductive_set mult :: "('\, '\) TS \ ('\ \ '\ list \ '\) set" -for TS :: "('\, '\) TS" -where refl: "(s,[],s) \ mult(TS)" - | step: "\ (s,a,s') \ (trans TS); (s',R,s'') \ mult(TS)\ \ (s,a#R,s'') \ mult(TS)" - -definition Straces :: "('\,'\) TS \ '\ list set" where - "Straces TS \ {l. \ \\<^sub>0 \(init TS). \ s'. (\\<^sub>0,l,s') \ mult(TS)}" -definition after :: "[('\,'\) TS, '\ list] \ '\ set" (infixl "after" 100) where - "TS after l \ {s' . \ \\<^sub>0 \(init TS). (\\<^sub>0,l,s') \ mult(TS)}" - (* again, we make the set of initial states explicit here *) - -definition out :: "[('\,'o ,'\) io_lts,'\ set] \ ('o ) set" where - "out TS ss \ {a. \ s \ ss. \ s'. (s,Inr a,s') \ (trans TS)}" - -definition ioco :: "[('\,'o ,'\)io_lts,('\,'o ,'\)io_lts] \ bool" (infixl "ioco" 200) where - "i ioco s \ (\ t \ Straces(s). out i (i after t) \ out s (s after t))" - - - -(* The following notation is based on a concrete ts. *) -consts ts :: "('\, '\) TS" -(* underspecified *) - - -syntax "_ts" :: "['\,'\,'\] \ bool" ("_ --<_>--> _" [0,0,60] 60) - -syntax (xsymbols) - "_tc" :: "['\,'\,'\] \ bool" ("_ --<_>\ _" [0,0,60] 60) - -syntax "_tsm" :: "['\,'\,'\] \ bool" ("_ ==<_>==> _" [0,0,60] 60) - -syntax (xsymbols) - "_tc," :: "['\,'\,'\] \ bool" ("_ =<_>\ _" [0,0,60] 60) - -translations "s ----> s'" == "(s,c,s') \ CONST ts" - -text{* Purpose: Prove under which conditions Mfold-Test is equivalent - to ioco, i.e. under which conditions do we actually test ioco. - I foresee two problems: - \begin{enumerate} - \item \textbf{Quiescense} IOCO theory assumes in the notion of - output elements "quit actions" $\delta$ which were treated - as "non-observable" conceptually. Actually, in our testing approach, - we will assume that termination of a program under test is - observable, so the test harness will always at least deliver - "None" (representing $\delta$). - \item \textbf{Deep Nondeterminism}. IOCO theory assumes the possibilty - of a branching of the LTS whose concequences can be observed - in terms of output actions much later; i.e. there are transitions - such as $(s,a,s') \isasymin (snd TS)$ and $(s,a,s'') \isasymin (snd TS)$ - with $s' \isasymnoteq s''$. - \item \textbf{IO Nondeterminism}. A system under test should always - accept one (possibly trivial) input and produce an output; there - should be no possibility for it to decide non-deterministically - to produce input or output. - \end{enumerate} - - \textbf{Conjecture}: Our Mfold-Testing corresponds exactly to IOCO testing - if the underlying transition systems are deterministic and - quiescence is observable. -*} - - -end diff --git a/src/test-gen/src/main/SMTBackend.thy b/src/test-gen/src/main/SMTBackend.thy deleted file mode 100644 index c9df909..0000000 --- a/src/test-gen/src/main/SMTBackend.thy +++ /dev/null @@ -1,450 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * TestGen.thy --- the core of the HOL-TestGen System. - * This file is part of HOL-TestGen. - * - * Copyright (c) 2005-2010 ETH Zurich, Switzerland - * 2008-2013 Achim D. Brucker, Germany - * 2009-2013 Universite Paris-Sud, France - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id:$ *) - -chapter {* The SMT backend *} - -theory SMTBackend -imports - clocks - BackendUtils - "smt_patch/Old_SMT_patch" - "new_smt_patch/SMT_patch" - -begin - - -ML{* - -signature SMTBACKEND = sig - (* The main tactic to call the SMT backend on a PO *) - val testgen_smt_tac : Proof.context -> int -> tactic -end - -*} - -(* Common part between the two interfaces *) - -lemma add_bound: "\ P \ (size x) <= k \ \ P" by(auto) - -ML{* - -(* Backported from Isabelle 13-2 *) -fun matches_subterm thy (pat, obj) = - let - fun msub bounds obj = Pattern.matches thy (pat, obj) orelse - (case obj of - Abs (x, T, t) => msub (bounds + 1) (snd (Term.dest_abs (Name.bound bounds, T, t))) - | t $ u => msub bounds t orelse msub bounds u - | _ => false) - in msub 0 obj end; - -*} - - -(* Old interface (deprecated, will be removed in next versions) *) - -declare [[old_smt_solver = z3]] -declare [[old_z3_options = "AUTO_CONFIG=false MBQI=false"]] -declare [[old_smt_oracle = true]] -declare [[old_z3_with_extensions = true]] -declare [[old_smt_datatypes = true]] - -(* abbreviation "trigger == Old_SMT_patch.trigger" -abbreviation "pat == Old_SMT_patch.pat" *) - -lemma old_trigger_intr: "(x = y) \ Old_SMT_patch.trigger [[Old_SMT_patch.pat (x)]] (x = y)" -by(simp add: Old_SMT_patch.trigger_def) - - -ML{* - -structure OldSMTBackend : SMTBACKEND = struct - -fun trig_thm thy thm = - let val is_trig = matches_subterm thy (@{const Old_SMT_patch.trigger}, Thm.prop_of thm) - orelse not (exists_subterm is_Var (Thm.prop_of thm)) - in - if is_trig then thm else thm RS @{thm "old_trigger_intr"} handle THM _ => thm - end - -fun pretty_insts ctxt msg insts = let - fun mkeq (x,y) = Const(@{const_name HOL.eq}, dummyT) $ x $ y - val pretty_insts = map (fn inst => (Syntax.pretty_term ctxt (mkeq inst))) insts -in - Pretty.string_of (Pretty.big_list (msg^":") pretty_insts) -end - -fun scan_def (Const (@{const_name HOL.eq}, _) $ t $ u) = (t,u) - | scan_def _ = error ("Unexpected SMT counterexample format") - -fun mkinsts thy vars ce tr = let - val defs = map scan_def ce - val _ = if tr then tracing (pretty_insts @{context} "Raw SMT counterexample" defs) else () - val defs' = defs @ (map swap defs) - - (* The defs that are directly usable *) - fun is_Free_or_Var t = (is_Free t) orelse (is_Var t) - val real_defs = filter (fn (x,_) => is_Free_or_Var x) defs' - val _ = if tr then tracing (pretty_insts @{context} "SMT counterexample after filtering real defs" real_defs) else () - - (* The defs of functions that we have to infer from their partial definition via equalities - (might want to change this by looking directly at ce) *) - fun insert (t1, t2, u) acc = case acc of - [] => [(t1, [t2], [u])] - | (t1', t2s, us)::acc => if t1' = t1 then (t1', t2::t2s, u::us)::acc else (t1', t2s, us)::(insert (t1, t2, u) acc) - fun aggregate_fun_defs acc l = case l of - [] => acc - | (t1 $ t2, u)::l => if is_Free_or_Var t1 then aggregate_fun_defs (insert (t1, t2, u) acc) l else (aggregate_fun_defs acc l) - | _::l => aggregate_fun_defs acc l - val aggregates = aggregate_fun_defs [] defs' - fun mk_body x Tx Tu t2s us = case t2s of - [t2] => (case us of [u] => u) - | t2::t2s => (case us of u::us => (Const (@{const_name HOL.If}, HOLogic.boolT --> Tu --> Tu --> Tu)) $ ((Const (@{const_name HOL.eq}, Tx --> Tx --> HOLogic.boolT)) $ x $ t2) $ u $ (mk_body x Tx Tu t2s us)) (* if x = t2 then u else mk_body x t2s us *) - fun mk_fun_defs l = case l of - [] => [] - | (t1, t2::t2s, u::us)::l => let - val Tx = fastype_of t2 - val name = "xMkFun" - val x = (name, Tx) - val Tu = fastype_of u - in (t1, absfree x (mk_body (Free x) Tx Tu (t2::t2s) (u::us)))::(mk_fun_defs l) end - val fun_defs = mk_fun_defs aggregates - val _ = if tr then tracing (pretty_insts @{context} "SMT counterexample with partial defs of functions" fun_defs) else () - - (* Generation of the instantiation *) - val all_defs = real_defs @ fun_defs - val var_insts = map (fn x => (x, subst_atomic all_defs x)) vars - val _ = if tr then tracing (pretty_insts @{context} "Generated instantiation from SMT counterexample" var_insts) else () -in - BackendUtils.certify_pairs thy var_insts -end - -val (profiling, profiling_setup) = Attrib.config_bool @{binding testgen_profiling} (K false); - -(* a wrapper around smt_tac' that fixes the provided counterexamples *) -fun smt_ce_tac ctxt rules = -Subgoal.FOCUS(fn {context, prems, schematics, ...} => (fn thm => -let - val prof_name = "SMT" - val start_tac = (if Config.get ctxt profiling - then Clocks.start_clock_tac prof_name - else all_tac) - val stop_tac = (if Config.get ctxt profiling - then Clocks.stop_clock_tac prof_name - else all_tac) -in - (start_tac - THEN (Old_SMT_patch_Solver.smt_tac' context (rules @ prems) 1) - THEN stop_tac) thm - |> Seq.hd - |> Seq.single - - handle Old_SMT_patch_Failure.SMT (Old_SMT_patch_Failure.Counterexample {is_real_cex = is_real_cex', - free_constraints, - const_defs}) => - let - val _ = if Config.get ctxt profiling then Clocks.stop_clock prof_name else () - val inv_insts = map swap (snd schematics) - val term_insts = BackendUtils.uncertify_pairs inv_insts - val free_constraints' = map (subst_atomic term_insts) free_constraints - val const_defs' = map (subst_atomic term_insts) const_defs - in - raise Old_SMT_patch_Failure.SMT - (Old_SMT_patch_Failure.Counterexample {is_real_cex = is_real_cex', - free_constraints = free_constraints', - const_defs = const_defs'}) - end - - | exc => let val _ = if Config.get ctxt profiling - then Clocks.stop_clock prof_name - else () - in - raise exc - end - -end)) ctxt - - -fun try_inst_tac ctxt insts n thm = let - val thm' = Drule.instantiate_normalize ([], insts) thm -in - BackendUtils.solve_by_simp_or_auto_tac ctxt n thm' -end - -fun smt_inst_tac ctxt rules n thm = let - val prem = Logic.nth_prem(n, Thm.prop_of thm) - val neg = @{const Pure.imp} - $ prem - $ (@{const "Trueprop"} $ @{const "False"}) - val goal = Goal.init (Thm.cterm_of ctxt neg) -in - (Seq.hd (smt_ce_tac ctxt rules 1 goal); Seq.empty) - handle Old_SMT_patch_Failure.SMT (Old_SMT_patch_Failure.Counterexample {free_constraints, const_defs, ...}) - => try_inst_tac ctxt (mkinsts ctxt (BackendUtils.premvars n thm) (free_constraints @ const_defs) (Config.get ctxt TestEnv.smt_model)) n thm - | Old_SMT_patch_Failure.SMT _ => Seq.empty -end - -(* fun add_bound_tac ctxt bound t = - Subgoal.FOCUS_PARAMS(fn {context, schematics, ...} => let - val thy = Proof_Context.theory_of context - val ct = Thm.instantiate_cterm schematics (cterm_of thy t) - val bound_term = cterm_of thy (HOLogic.mk_nat bound) - val xtype = ctyp_of thy (TVar (("'a",0), @{sort "Nat.size"})) - val tinst = (xtype, ctyp_of_term ct) - val k = cterm_of thy (Var (("k",0), @{typ "nat"})) - val x = cterm_of thy (Var (("x",0), type_of t)) - val inst1 = (k, bound_term) - val inst2 = (x, ct) - in - TacticPatch.res_terminst_tac [tinst] [inst1, inst2] @{thm add_bound} 1 - end) ctxt - -fun add_bounds_tac ctxt bound ts = let - fun next (t, tac) = tac THEN' (add_bound_tac ctxt bound t) -in - List.foldl next (K all_tac) ts -end - -fun bounded_smt_tac ctxt bound rules = - Subgoal.FOCUS_PARAMS(fn {context, ...} => - (fn thm => let - val thy = Proof_Context.theory_of context - val datatype_vars = filter (fn x => isDataType thy (type_of x)) - (premvars 1 thm) - in - EVERY[add_bounds_tac context bound datatype_vars 1, - smt_inst_tac context rules 1, - ALLGOALS (full_simp_tac context)] thm - end)) ctxt *) - -fun unbounded_smt_tac ctxt rules = - Subgoal.FOCUS_PARAMS(fn {context, ...} => - (fn thm => - EVERY[smt_inst_tac context rules 1, - ALLGOALS (full_simp_tac context)] thm - )) ctxt - -fun testgen_smt_tac ctxt = - let - val thy = Proof_Context.theory_of ctxt - val smt_facts = map (trig_thm thy) (TestEnv.get_smt_facts ctxt) - in - unbounded_smt_tac ctxt smt_facts - (* bounded_smt_tac ctxt (Config.get ctxt TestEnv.depth) smt_facts *) - end - -end - -*} - - -(* New interface *) - -declare [[smt_solver = z3]] -declare [[z3_options = "AUTO_CONFIG=false smt.mbqi=false"]] -declare [[smt_oracle = true]] -declare [[z3_extensions = true]] - -lemma trigger_intr: "(x = y) \ SMT_patch.trigger (SMT_patch.Symb_Cons (SMT_patch.Symb_Cons (SMT_patch.pat (x)) SMT_patch.Symb_Nil) SMT_patch.Symb_Nil) (x = y)" -by(simp add: SMT_patch.trigger_def) - - -ML{* - -structure NewSMTBackend : SMTBACKEND = struct - -fun trig_thm thy thm = - let val is_trig = matches_subterm thy (@{const SMT_patch.trigger}, Thm.prop_of thm) - orelse not (exists_subterm is_Var (Thm.prop_of thm)) - in - if is_trig then thm else thm RS @{thm "trigger_intr"} handle THM _ => thm - end - -fun pretty_insts ctxt msg insts = let - fun mkeq (x,y) = Const(@{const_name HOL.eq}, dummyT) $ x $ y - val pretty_insts = map (fn inst => (Syntax.pretty_term ctxt (mkeq inst))) insts -in - Pretty.string_of (Pretty.big_list (msg^":") pretty_insts) -end - -fun mkinsts thy vars defs tr = let - val _ = if tr then tracing (pretty_insts @{context} "Raw SMT counterexample" defs) else () - val defs' = defs @ (map swap defs) - - (* The defs that are directly usable *) - fun is_Free_or_Var t = (is_Free t) orelse (is_Var t) - val real_defs = filter (fn (x,_) => is_Free_or_Var x) defs' - val _ = if tr then tracing (pretty_insts @{context} "SMT counterexample after filtering real defs" real_defs) else () - - (* The defs of functions that we have to infer from their partial definition via equalities - (might want to change this by looking directly at ce) *) - fun insert (t1, t2, u) acc = case acc of - [] => [(t1, [t2], [u])] - | (t1', t2s, us)::acc => if t1' = t1 then (t1', t2::t2s, u::us)::acc else (t1', t2s, us)::(insert (t1, t2, u) acc) - fun aggregate_fun_defs acc l = case l of - [] => acc - | (t1 $ t2, u)::l => if is_Free_or_Var t1 then aggregate_fun_defs (insert (t1, t2, u) acc) l else (aggregate_fun_defs acc l) - | _::l => aggregate_fun_defs acc l - val aggregates = aggregate_fun_defs [] defs' - fun mk_body x Tx Tu t2s us = case t2s of - [t2] => (case us of [u] => u) - | t2::t2s => (case us of u::us => (Const (@{const_name HOL.If}, HOLogic.boolT --> Tu --> Tu --> Tu)) $ ((Const (@{const_name HOL.eq}, Tx --> Tx --> HOLogic.boolT)) $ x $ t2) $ u $ (mk_body x Tx Tu t2s us)) (* if x = t2 then u else mk_body x t2s us *) - fun mk_fun_defs l = case l of - [] => [] - | (t1, t2::t2s, u::us)::l => let - val Tx = fastype_of t2 - val name = "xMkFun" - val x = (name, Tx) - val Tu = fastype_of u - in (t1, absfree x (mk_body (Free x) Tx Tu (t2::t2s) (u::us)))::(mk_fun_defs l) end - val fun_defs = mk_fun_defs aggregates - val _ = if tr then tracing (pretty_insts @{context} "SMT counterexample with partial defs of functions" fun_defs) else () - - (* Generation of the instantiation *) - val all_defs = real_defs @ fun_defs - val var_insts = map (fn x => (x, subst_atomic all_defs x)) vars - val _ = if tr then tracing (pretty_insts @{context} "Generated instantiation from SMT counterexample" var_insts) else () -in - BackendUtils.certify_pairs thy var_insts -end - -val (profiling, profiling_setup) = Attrib.config_bool @{binding testgen_profiling} (K false); - -(* a wrapper around smt_tac' that fixes the provided counterexamples *) -fun smt_ce_tac ctxt rules = -Subgoal.FOCUS(fn {context, prems, schematics, ...} => (fn thm => -let - val prof_name = "SMT" - val start_tac = (if Config.get ctxt profiling - then Clocks.start_clock_tac prof_name - else all_tac) - val stop_tac = (if Config.get ctxt profiling - then Clocks.stop_clock_tac prof_name - else all_tac) -in - (start_tac - THEN (SMT_patch_Solver.smt_get_model_tac context (rules @ prems) 1) - THEN stop_tac) thm - |> Seq.hd - |> Seq.single - - handle SMT_patch_Solver.SMT_Model {const_defs} => - let - val _ = if Config.get ctxt profiling then Clocks.stop_clock prof_name else () - val inv_insts = map swap (snd schematics) - val term_insts = BackendUtils.uncertify_pairs inv_insts - (* val free_constraints' = map (subst_atomic term_insts) free_constraints *) - val const_defs' = map (apply2 (subst_atomic term_insts)) const_defs - in - raise SMT_patch_Solver.SMT_Model {const_defs = const_defs'} - end - - | exc => let val _ = if Config.get ctxt profiling - then Clocks.stop_clock prof_name - else () - in - raise exc - end - -end)) ctxt - - -fun try_inst_tac ctxt insts n thm = let - val thm' = Drule.instantiate_normalize ([], insts) thm -in - BackendUtils.solve_by_simp_or_auto_tac ctxt n thm' -end - -fun smt_inst_tac ctxt rules n thm = let - val prem = Logic.nth_prem(n, Thm.prop_of thm) - val neg = @{const Pure.imp} - $ prem - $ (@{const "Trueprop"} $ @{const "False"}) - val goal = Goal.init (Thm.cterm_of ctxt neg) -in - (Seq.hd (smt_ce_tac ctxt rules 1 goal); Seq.empty) - handle SMT_patch_Solver.SMT_Model {const_defs} - => try_inst_tac ctxt (mkinsts ctxt (BackendUtils.premvars n thm) ((* free_constraints @ *) const_defs) (Config.get ctxt TestEnv.smt_model)) n thm -end - -fun unbounded_smt_tac ctxt rules = - Subgoal.FOCUS_PARAMS(fn {context, ...} => - (fn thm => - EVERY[smt_inst_tac context rules 1, - ALLGOALS (full_simp_tac context)] thm - )) ctxt - -fun testgen_smt_tac ctxt = - let - val thy = Proof_Context.theory_of ctxt - val smt_facts = map (trig_thm thy) (TestEnv.get_smt_facts ctxt) - in - unbounded_smt_tac ctxt smt_facts - end - -end - -*} - - -(* Choice of the interface *) - -ML{* - -structure SMTBackend : SMTBACKEND = struct - - fun testgen_smt_tac ctxt = - if Config.get ctxt TestEnv.SMT then - OldSMTBackend.testgen_smt_tac ctxt - else if Config.get ctxt TestEnv.SMT2 then - NewSMTBackend.testgen_smt_tac ctxt - else - K no_tac - -end - -*} - - - -end diff --git a/src/test-gen/src/main/SharedMemory.thy b/src/test-gen/src/main/SharedMemory.thy deleted file mode 100644 index 3ded9c4..0000000 --- a/src/test-gen/src/main/SharedMemory.thy +++ /dev/null @@ -1,1389 +0,0 @@ -chapter{* A Shared-Memory-Model*} - -theory SharedMemory -imports Main -begin -section {*Shared Memory Model\label{SharedMemoryThy}*} -subsection{* Prerequisites *} - -text{* Prerequisite: a generalization of @{thm [source] fun_upd_def}: @{thm fun_upd_def}. - It represents updating modulo a sharing equivalence, i.e. an equivalence relation - on parts of the domain of a memory. *} - -definition fun_upd_equivp :: "('\ \ '\ \ bool) \ ('\ \ '\) \ '\ \ '\ \ ('\ \ '\)" -where "fun_upd_equivp eq f a b = (\x. if eq x a then b else f x)" - - ---{*This lemma is the same as @{thm [source] Fun.fun_upd_same}: @{thm Fun.fun_upd_same}; applied - on our genralization @{thm fun_upd_equivp_def} of @{thm fun_upd_def}. This proof tell - if our function @{term "fun_upd_equivp (op =) f x y" } is equal to @{term f} this is equivalent - to the fact that @{term "f x = y"}*} - -lemma fun_upd_equivp_iff: "((fun_upd_equivp (op =) f x y) = f) = (f x = y)" - by (simp add :fun_upd_equivp_def, safe, erule subst, auto) - ---{*Now we try to proof the same lemma applied on any equivalent relation @{term "equivp eqv"} - instead of the equivalent relation @{term "op ="}. For this case, we had split the lemma to 2 - parts. the lemma @{term "fun_upd_equivp_iff_part1"} to proof the case when - @{term "eq (f a) b \ eq (fun_upd_equivp eqv f a b z) (f z) "}, and the second part is - the lemma @{term "fun_upd_equivp_iff_part2"} to proof the case - @{term "equivp eqv \ fun_upd_equivp eqv f a b = f \ f a = b"}. *} - -lemma fun_upd_equivp_iff_part1: - assumes is_equivp: "equivp R" - and 2: "(\z. R x z \ R (f z) y) " - shows "R (fun_upd_equivp R f x y z) (f z)" - using assms - unfolding fun_upd_equivp_def - by (auto simp: Equiv_Relations.equivp_reflp Equiv_Relations.equivp_symp) - -lemma fun_upd_equivp_iff_part2: - assumes is_equivp: "equivp R" - shows "fun_upd_equivp R f x y = f \ f x = y" - using assms - apply (simp add :fun_upd_equivp_def, safe) - apply (erule subst, auto simp: Equiv_Relations.equivp_reflp) -done - ---{*Just anotther way to formalise @{thm fun_upd_equivp_iff_part2} without using the strong equality*} - -lemma - assumes is_equivp:"equivp R" - and 2: "(\z. R x z \ R (fun_upd_equivp R f x y z) (f z))" - shows "R y (f x)" - using assms - by (simp add: fun_upd_equivp_def Equiv_Relations.equivp_symp equivp_reflp) - -text{*this lemma is the same in @{thm fun_upd_equivp_iff_part1} where @{term "(op =)"} is - generalized by another equivalence relation*} - -lemma fun_upd_equivp_idem: - assumes image:"f x = y" - shows "(fun_upd_equivp (op =) f x y) = f" - using assms - by (simp only: fun_upd_equivp_iff) - -lemma fun_upd_equivp_triv : - "fun_upd_equivp (op =) f x (f x) = f " - by (simp only: fun_upd_equivp_iff) - ---{*This is the generalization of @{thm fun_upd_equivp_triv} on a given equivalence relation*} - -lemma fun_upd_equivp_triv_part1 : - "equivp R \ (\z. R x z \fun_upd_equivp (R') f x (f x) z) \ f x " - apply (auto simp:fun_upd_equivp_def) - apply (metis equivp_reflp) -done - -lemma fun_upd_equivp_triv_part2 : - "equivp R \ (\z. R x z \ f z ) \ fun_upd_equivp (R') f x (f x) x " - by (simp add:fun_upd_equivp_def equivp_reflp split: if_split) - -lemma fun_upd_equivp_apply [simp]: - "(fun_upd_equivp (op =) f x y) z = (if z = x then y else f z)" - by (simp only: fun_upd_equivp_def) - ---{*This is the generalization of @{thm fun_upd_equivp_apply} with e given equivalence relation and - not only with @{term "op ="}*} - -lemma fun_upd_equivp_apply1 [simp]: - "equivp R \(fun_upd_equivp R f x y) z = (if R z x then y else f z)" - by (simp add: fun_upd_equivp_def) - -lemma fun_upd_equivp_same: - "(fun_upd_equivp (op =) f x y) x = y" - by (simp only: fun_upd_equivp_def)simp - ---{*This is the generalization of @{thm fun_upd_equivp_same} with a given equivalence relation*} - -lemma fun_upd_equivp_same1: - assumes is_equivp:"equivp R" - shows "(fun_upd_equivp R f x y) x = y" - using assms - by (simp add: fun_upd_equivp_def equivp_reflp) - - -text{* For the special case that {@term eq} is just the equality {@term "op ="}, sharing -update and classical update are identical.*} - -lemma fun_upd_equivp_vs_fun_upd: "(fun_upd_equivp (op =)) = fun_upd" - by(rule ext, rule ext, rule ext,simp add:fun_upd_def fun_upd_equivp_def) - - - -subsection{* Definition of the shared-memory type*} - -typedef ('\, '\) memory = "{(\::'\ \'\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" -proof - show "(Map.empty, (op =)) \ ?memory" - by (auto simp: identity_equivp) -qed - -fun memory_inv :: "('\ \ '\ option) \ ('\ \ '\ \ bool) \ bool" -where "memory_inv (Pair f R) = (equivp R \ (\x y. R x y \ f x = f y))" - - - -lemma Abs_Rep_memory [simp]: - "Abs_memory (Rep_memory \) = \" - by (simp add:Rep_memory_inverse) - -lemma memory_invariant [simp]: - "memory_inv \_rep = (Rep_memory (Abs_memory \_rep) = \_rep)" - using Rep_memory [of "Abs_memory \_rep"] Abs_memory_inverse mem_Collect_eq - case_prodE case_prodI2 memory_inv.simps - by smt - -lemma Pair_code_eq : - "Rep_memory \ = Pair (fst (Rep_memory \)) (snd (Rep_memory \))" - by (simp add: Product_Type.surjective_pairing) - -lemma snd_memory_equivp [simp]: "equivp(snd(Rep_memory \))" - by(insert Rep_memory[of \], auto) - -subsection{* Operations on Shared-Memory *} - -setup_lifting type_definition_memory (*Mandatory for lift_definition*) - -abbreviation mem_init :: "('a \ 'b option) \ ('a \ 'a \ bool)" -where - "mem_init \ (Map.empty, (op =))" - -lemma memory_init_eq_sound: - "mem_init \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" -proof - - obtain mem and R - where Pair: "(mem, R) =mem_init " and Eq: "equivp R" - using identity_equivp by auto - have D1: "R = (op =)" - and D2: "mem = Map.empty " - using Pair prod.inject - by auto - moreover have inv_part2: "\ x y . R x y \ mem x = mem y" - unfolding D1 D2 by auto - ultimately show ?thesis - using Eq Abs_memory_cases Pair_inject Rep_memory_cases Rep_memory_inverse - identity_equivp memory_inv.elims(3) memory_invariant - by auto -qed - -lift_definition init :: "('\, '\) memory" - is "mem_init :: ('\ \ '\ option) \ ('\ \ '\ \ bool)" - using memory_init_eq_sound by simp - -(*code generation test*) -value "init::(nat,int)memory" -value "map (\x. the (fst (Rep_memory init)x)) [1 .. 10]" -value "take (10) (map (Pair Map.empty) [(op =) ])" -value "replicate 10 init" -term "Rep_memory \" -term "[(\::nat \ int, R )<-xs . equivp R \ (\x y. R x y \ \ x = \ y)]" - -(* deprecated >>>> *) -definition init_mem_list :: "'\ list \ (nat, '\) memory" -where "init_mem_list s = Abs_memory (let h = zip (map nat [0 .. int(length s)]) s - in foldl (\x (y,z). fun_upd x y (Some z)) - Map.empty h, - op =)" - -(* <<<<<<<<<<<<<<<< *) - - -subsubsection{* Memory Read Operation*} - -definition lookup :: "('\, '\) memory \ '\ \ '\" (infixl "$" 100) -where "\ $ x = the (fst (Rep_memory \) x)" - -subsubsection{* Memory Update Operation*} - -fun Pair_upd_lifter:: "('\ \ '\ option) \ ('\ \ '\ \ bool) \ '\ \ '\ \ - ('\ \ '\ option) \ ('\ \ '\ \ bool)" - where "Pair_upd_lifter ((f, R)) x y = (fun_upd_equivp R f x (Some y), R)" - -lemma update\<^sub>_sound': - assumes "\ \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" - shows "Pair_upd_lifter \ x y \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" -proof - - obtain mem and R - where Pair: "(mem, R) = \" and Eq: "equivp R" and Mem: "\ x y . R x y \ mem x = mem y" - using assms equivpE by auto - obtain mem' and R' - where Pair': "(mem', R') = Pair_upd_lifter \ x y" - using surjective_pairing by metis - have Def1: "mem' = fun_upd_equivp R mem x (Some y)" - and Def2: "R' = R" - using Pair Pair' by auto - have Eq': "equivp R'" - using Def2 Eq by auto - moreover have "\ y z . R' y z \ mem' y = mem' z" - using Mem equivp_symp equivp_transp - unfolding Def1 Def2 by (metis Eq fun_upd_equivp_def) - ultimately show ?thesis - using Pair' by auto -qed - -lemma memory_inv_update_rep: - "memory_inv (Pair_upd_lifter (Rep_memory \) x y)" -proof - - have *:"(equivp o snd) (Pair_upd_lifter (Rep_memory \) x y)" - and **:"(\w z. snd (Pair_upd_lifter (Rep_memory \) x y) w z \ - fst (Pair_upd_lifter (Rep_memory \) x y) w = - fst (Pair_upd_lifter (Rep_memory \) x y) z)" - using update\<^sub>_sound'[OF Rep_memory,of \ x y] - by auto - have ***:"memory_inv (Pair_upd_lifter (Rep_memory \) x y) = - memory_inv (fst (Pair_upd_lifter (Rep_memory \) x y), - snd (Pair_upd_lifter (Rep_memory \) x y))" - using surjective_pairing[of "(Pair_upd_lifter (Rep_memory \) x y)"] - by simp - show ?thesis - apply (simp only: * ** *** memory_inv.simps) - using * ** - apply simp - done -qed - -lift_definition update :: " ('\, '\) memory \'\ \ '\ \ ('\, '\) memory" ("_ '(_ :=\<^sub>$ _')" 100) - is Pair_upd_lifter - using update\<^sub>_sound' - by simp - -lemma update': "\ (x :=\<^sub>$ y) = Abs_memory (fun_upd_equivp (snd (Rep_memory \)) - (fst (Rep_memory \)) x (Some y), (snd (Rep_memory \)))" - using Rep_memory_inverse surjective_pairing Pair_upd_lifter.simps update.rep_eq - by metis - -(*update on list*) - -fun update_list_rep :: "('\ \ '\) \ ('\ \ '\ \ bool) \ ('\ \ '\ )list \ - ('\ \ '\) \ ('\ \ '\ \ bool)" -where "update_list_rep (f, R) nlist = - (foldl (\(f, R)(addr,val). - Pair_upd_lifter (f, R) addr val) (f, R) nlist)" - -lemma update_list_rep_p: - assumes 1: "P \" - and 2: "\src dst \. P \ \ P (Pair_upd_lifter \ src dst)" - shows "P (update_list_rep \ list)" - using 1 2 - apply (induct "list" arbitrary: \) - apply (force,safe) - apply (simp del: Pair_upd_lifter.simps) - using surjective_pairing - apply simp -done - -lemma update_list_rep_sound: - assumes 1: "\ \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" - shows "update_list_rep \ (nlist) \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" - using 1 - apply (elim update_list_rep_p) - apply (erule update\<^sub>_sound') -done - -lift_definition update_list :: "('\, '\) memory \ ('\ \ '\ )list \ ('\, '\) memory" (infixl "'/:=\<^sub>$" 30) - is update_list_rep - using update_list_rep_sound by simp - -lemma update_list_Nil[simp]: "(\ /:=\<^sub>$ []) = \" - unfolding update_list_def - by(simp,subst surjective_pairing[of "Rep_memory \"], - subst update_list_rep.simps, simp) - -lemma update_list_Cons[simp] : "(\ /:=\<^sub>$ ((a,b)#S)) = (\(a :=\<^sub>$ b) /:=\<^sub>$ S)" - unfolding update_list_def - apply(simp,subst surjective_pairing[of "Rep_memory \"], - subst update_list_rep.simps, simp) - apply(subst surjective_pairing[of "Rep_memory (\ (a :=\<^sub>$ b))"], - subst update_list_rep.simps, simp) - apply(simp add: update_def) - apply(subst Abs_memory_inverse) - apply (metis (lifting, mono_tags) Rep_memory update\<^sub>_sound') - apply simp -done - -text{* Type-invariant: *} - -lemma update\<^sub>_sound: - assumes "Rep_memory \ = (\', eq)" - shows "(fun_upd_equivp eq \' x (Some y), eq) \ - {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" - using assms insert Rep_memory[of "\"] - apply(auto simp: fun_upd_equivp_def) - apply(rename_tac "xa" "xb", erule contrapos_np) - apply(rule_tac R=eq and y=xa in equivp_transp,simp) - apply(erule equivp_symp, simp_all) - apply(rename_tac "xa" "xb", erule contrapos_np) - apply(rule_tac R=eq and y=xb in equivp_transp,simp_all) -done - -subsubsection{* Memory Transfer Based on Sharing Transformation*} - -(*ref: def by Oto Havle *) - -fun transfer_rep :: "('\ \ '\) \ ('\\'\ \ bool) \ '\ \ '\ \ ('\\'\) \ ('\\'\ \ bool)" -where "transfer_rep (m, r) src dst = - (m o (id (dst := src)), - (\ x y . r ((id (dst := src)) x) ((id (dst := src)) y)))" - -lemma transfer_rep_simp : - "transfer_rep X src dst = - ((fst X) o (id (dst := src)), - (\ x y . (snd X) ((id (dst := src)) x) ((id (dst := src)) y)))" - by(subst surjective_pairing[of "X"], - subst transfer_rep.simps, simp) - -(*ref: proof by Oto Havle *) - -lemma transfer_rep_sound: - assumes "\ \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" - shows "transfer_rep \ src dst \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" -proof - - obtain mem and R - where P: "(mem, R) = \" and E: "equivp R" and M: "\ x y . R x y \ mem x = mem y" - using assms equivpE by auto - obtain mem' and R' - where P': "(mem', R') = transfer_rep \ src dst" - by (metis surj_pair) - have D1: "mem' = (mem o (id (dst := src)))" - and D2: "R' = (\ x y . R ((id (dst := src)) x) ((id (dst := src)) y))" - using P P' by auto - have "equivp R'" - using E unfolding D2 equivp_def by metis - moreover have "\ y z . R' y z \ mem' y = mem' z" - using M unfolding D1 D2 by auto - ultimately show ?thesis - using P' by auto -qed - -lift_definition - transfer :: "('\,'\)memory \ '\ \ '\ \ ('\, '\)memory" ("_ '(_ \ _')" [0,111,111]110) - is transfer_rep - using transfer_rep_sound - by simp - -lemma transfer_rep_sound2 : - "transfer_rep (Rep_memory \) a b \ - {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" - by (metis (lifting, mono_tags) Rep_memory transfer_rep_sound) - -(* the following share_list construction is pretty indirect and motivated by code-generation - principles; why not a definition which is more direct ? e.g. :*) - -fun share_list2 :: "('\, '\) memory \ ('\ \ '\ )list \ ('\, '\) memory" (infix "'/\" 60) -where "\ /\ S = (foldl (\ \ (a,b). (\ (a\b))) \ S)" - -lemma sharelist2_Nil[simp] : "\ /\ [] = \" by simp - -lemma sharelist2_Cons[simp] : "\ /\ ((a,b)#S) = (\(a\b) /\ S)" by simp - -(* deprecated ??? >>> *) - -fun share_list_rep :: "('\ \ '\) \ ('\ \ '\ \ bool) \ ('\ \ '\ )list \ - ('\ \ '\) \ ('\ \ '\ \ bool)" -where "share_list_rep (f, R) nlist = - (foldl (\(f, R) (src,dst). transfer_rep (f, R) src dst) (f, R) nlist)" - -fun share_list_rep' :: "('\ \ '\) \ ('\ \ '\ \ bool) \ ('\ \ '\)list \ - ('\ \ '\) \ ('\ \ '\ \ bool)" -where "share_list_rep' (f, R) [] = (f, R)" - | "share_list_rep' (f, R) (n#nlist) = share_list_rep' (transfer_rep(f,R)(fst n)(snd n)) nlist" - -lemma share_list_rep'_p: - assumes 1: "P \" - and 2: " \src dst \. P \ \ P (transfer_rep \ src dst)" - shows "P (share_list_rep' \ list)" - using 1 2 - apply (induct "list" arbitrary: \ P) - apply force - apply safe - apply (simp del: transfer_rep.simps) - using surjective_pairing - apply metis -done - -lemma foldl_preserve_p: - assumes 1: "P mem" - and 2: "\y z mem . P mem \ P (f mem y z)" - shows "P (foldl (\a (y, z). f mem y z) mem list)" - using 1 2 - apply (induct "list" arbitrary: f mem , auto) - apply metis -done - -lemma share_list_rep_p: - assumes 1: "P \" - and 2: "\src dst \. P \ \ P (transfer_rep \ src dst)" - shows "P (share_list_rep \ list)" - using 1 2 - apply (induct "list" arbitrary: \) - apply force - apply safe - apply (simp del: transfer_rep.simps) - using surjective_pairing - apply metis -done - -text{* The modification of the underlying equivalence relation on adresses is only defined - on very strong conditions --- which are fulfilled for the empty memory, but difficult to - establish on a non-empty-one. And of course, the given relation must be proven to - be an equivalence relation. So, the case is geared towards shared-memory scenarios - where the sharing is defined initially once and for all. *} - -definition update\<^sub>R :: "('\, '\)memory \ ('\ \ '\ \ bool) \ ('\, '\)memory" ("_ :=\<^sub>R _" 100) -where "\ :=\<^sub>R R \ Abs_memory (fst(Rep_memory \), R)" - -definition lookup\<^sub>R :: "('\, '\)memory \ ('\ \ '\ \ bool)" ("$\<^sub>R _" 100) -where "$\<^sub>R \ \ (snd(Rep_memory \))" - -lemma update\<^sub>R_comp_lookup\<^sub>R: -assumes equiv : "equivp R" - and sharing_conform : " \ x y. R x y \ fst(Rep_memory \) x = fst(Rep_memory \) y" -shows "($\<^sub>R (\ :=\<^sub>R R)) = R" -unfolding lookup\<^sub>R_def update\<^sub>R_def -by(subst Abs_memory_inverse, simp_all add: equiv sharing_conform) - -subsection{* Sharing Relation Definition*} - -definition sharing :: "'\ \ ('\, '\)memory \ '\ \ bool" - ("(_ shares()\<^bsub>_\<^esub>/ _)" [201, 0, 201] 200) -where "(x shares\<^bsub>\\<^esub> y) \ (snd(Rep_memory \) x y)" - -definition Sharing :: "'\ set \ ('\, '\)memory \ '\ set \ bool" - ("(_ Shares()\<^bsub>_\<^esub>/ _)" [201, 0, 201] 200) -where "(X Shares\<^bsub>\\<^esub> Y) \ (\ x\X. \ y\Y. x shares\<^bsub>\\<^esub> y)" - -subsection{* Properties on Sharing Relation*} - -lemma sharing_charn: - "equivp (snd (Rep_memory \))" - by auto - -lemma sharing_charn': - assumes 1: "(x shares\<^bsub>\\<^esub> y)" - shows" (\R. equivp R \ R x y)" - by (auto simp add: snd_def equivp_def) - -lemma sharing_charn2: - shows"\x y. (equivp (snd (Rep_memory \)) \ (snd (Rep_memory \)) x y) " - using sharing_charn [THEN equivp_reflp ] - by (simp)fast - ---{*Lemma to show that @{thm sharing_def} is reflexive*} -lemma sharing_refl: "(x shares\<^bsub>\\<^esub> x)" - using insert Rep_memory[of "\"] - by (auto simp: sharing_def elim: equivp_reflp) - ---{*Lemma to show that @{thm sharing_def} is symetric*} -lemma sharing_sym [sym]: - assumes 1: "x shares\<^bsub>\\<^esub> y" - shows "y shares\<^bsub>\\<^esub> x" - using 1 Rep_memory[of "\"] - by (auto simp: sharing_def elim: equivp_symp) - -lemma sharing_commute : "x shares\<^bsub>\\<^esub> y = (y shares\<^bsub>\\<^esub> x)" - by(auto intro: sharing_sym) - ---{*Lemma to show that @{thm sharing_def} is transitive*} - -lemma sharing_trans [trans]: - assumes 1: "x shares\<^bsub>\\<^esub> y" - and 2: "y shares\<^bsub>\\<^esub> z" - shows "x shares\<^bsub>\\<^esub> z" - using assms insert Rep_memory[of "\"] - by(auto simp: sharing_def elim: equivp_transp) - -lemma shares_result: - assumes 1: "x shares\<^bsub>\\<^esub> y" - shows "fst (Rep_memory \) x = fst (Rep_memory \) y" - using 1 - unfolding sharing_def - using Rep_memory[of "\"] - by auto - -lemma sharing_init: - assumes 1: "i \ k" - shows "\(i shares\<^bsub>init\<^esub> k)" - unfolding sharing_def init_def - using 1 - by (auto simp: Abs_memory_inverse identity_equivp) - -lemma shares_init[simp]: "(x shares\<^bsub>init\<^esub> y) = (x=y)" - unfolding sharing_def init_def - by (metis init_def sharing_init sharing_def sharing_refl) - -lemma sharing_init_mem_list: - assumes 1: "i \ k" - shows "\(i shares\<^bsub>init_mem_list S\<^esub> k)" - unfolding sharing_def init_mem_list_def - using 1 - by (auto simp: Abs_memory_inverse identity_equivp) - -(* experimental: a simultaneous update to None for all elements in X and their equivalents. *) -definition reset :: "('\, '\) memory \ '\ set\ ('\, '\)memory" ("_ '(reset _')" 100) -where "\ (reset X) = (let (\',eq) = Rep_memory \; - eq' = \ a b. eq a b \ (\x\X. eq a x \ eq b x) - in if X={} then \ - else Abs_memory (fun_upd_equivp eq' \' (SOME x. x\X) None, eq))" - -lemma reset_mt : "\ (reset {}) = \" - unfolding reset_def Let_def - by simp - -lemma reset_sh : -assumes * : "(x shares\<^bsub>\\<^esub> y)" - and **: "x \ X" -shows "\ (reset X) $ y = None" -oops - -subsection{* Memory Domain Definition*} - -definition Domain :: "('\, '\)memory \ '\ set" -where "Domain \ = dom (fst (Rep_memory \))" - -subsection{* Properties on Memory Domain*} - -lemma Domain_charn: - assumes 1:"x \ Domain \" - shows "\ y. Some y = fst (Rep_memory \) x" - using 1 - by(auto simp: Domain_def) - -lemma Domain_charn1: - assumes 1:"x \ Domain \" - shows "\ y. the (Some y) = \ $ x" - using 1 - by(auto simp: Domain_def lookup_def) - ---{*This lemma says that if @{term "x"} and @{term "y"} are quivalent this - means that they are in the same set of equivalent classes*} - -lemma shares_dom [code_unfold, intro]: - assumes 1:"x shares\<^bsub>\\<^esub> y" - shows "(x \ Domain \) = (y \ Domain \)" - using insert Rep_memory[of "\"] 1 - by (auto simp: sharing_def Domain_def) - -lemma Domain_mono: - assumes 1: "x \ Domain \" - and 2: "(x shares\<^bsub>\\<^esub> y)" - shows "y \ Domain \" - using 1 2 Rep_memory[of "\"] - by (auto simp add: sharing_def Domain_def ) - -corollary Domain_nonshares : - assumes 1: "x \ Domain \" - and 2: "y \ Domain \ " - shows "\(x shares\<^bsub>\\<^esub> y)" - using 1 2 Domain_mono - by fast - -lemma Domain_init[simp] : "Domain init = {}" - unfolding init_def Domain_def - by(simp_all add:identity_equivp Abs_memory_inverse) - -lemma Domain_update[simp] :"Domain (\ (x :=\<^sub>$ y)) = (Domain \) \ {y . y shares\<^bsub>\\<^esub> x}" -unfolding update_def Domain_def sharing_def -proof (simp_all) - have * : "Pair_upd_lifter (Rep_memory \) x y \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" - by (simp, metis (lifting, mono_tags) Rep_memory mem_Collect_eq update\<^sub>_sound') - have ** : "snd (Rep_memory \) x x" - by(metis equivp_reflp sharing_charn2) - show "dom (fst (Rep_memory (Abs_memory (Pair_upd_lifter (Rep_memory \) x y)))) = - dom (fst (Rep_memory \)) \ {y. snd (Rep_memory \) y x}" - apply(simp_all add: Abs_memory_inverse[OF *] ) - apply(subst surjective_pairing [of "(Rep_memory \)"]) - apply(subst Pair_upd_lifter.simps, simp) - apply(auto simp: ** fun_upd_equivp_def) - done -qed - -lemma Domain_share1: -assumes 1 : "a \ Domain \" - and 2 : "b \ Domain \" -shows "Domain (\(a\b)) = Domain \" -proof(simp_all add:Set.set_eq_iff, rule allI) - fix x - have ***: "transfer_rep (Rep_memory \) (id a) (id b) \ - {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" - by (metis (lifting, mono_tags) Rep_memory transfer_rep_sound) - show "(x \ Domain (\ (a \ b))) = (x \ Domain \)" - unfolding sharing_def Domain_def transfer_def map_fun_def o_def - apply(subst Abs_memory_inverse[OF ***]) - apply(insert 1 2, simp add: o_def transfer_rep_simp Domain_def ) - apply(auto split: if_split if_split_asm ) - done -qed - -lemma Domain_share_tgt : - assumes 1:"a \ Domain \" - shows " b \ Domain (\ (a \ b))" - using 1 - unfolding sharing_def Domain_def transfer_def map_fun_def o_def id_def - apply(subst Abs_memory_inverse[OF transfer_rep_sound2]) - unfolding sharing_def Domain_def transfer_def map_fun_def o_def id_def - apply(simp add: o_def transfer_rep_simp Domain_def ) - apply(auto split: if_split) - done - -lemma Domain_share2 : -assumes 1 : "a \ Domain \" - and 2 : "b \ Domain \" -shows "Domain (\(a\b)) = (Domain \ - {x. x shares\<^bsub>\\<^esub> b} \ {b})" -proof(simp_all add:Set.set_eq_iff, auto) - fix x - assume 3 : "x \ SharedMemory.Domain (\ (a \ b))" - and 4 : "x \ b" - show "x \ SharedMemory.Domain \" - apply(insert 3 4) - unfolding sharing_def Domain_def transfer_def map_fun_def o_def id_def - apply(subst (asm) Abs_memory_inverse[OF transfer_rep_sound2]) - apply(insert 1 , simp add: o_def transfer_rep_simp Domain_def ) - apply(auto split: if_split if_split_asm ) - done -next - fix x - assume 3 : "x \ Domain (\ (a \ b))" - and 4 : "x \ b" - and 5 : "x shares\<^bsub>\\<^esub> b" - have ** : "x \ Domain \" using "2" "5" Domain_mono by (fast ) - show "False" - apply(insert 3 4 5, erule contrapos_pp, simp) - unfolding sharing_def Domain_def transfer_def map_fun_def o_def id_def - apply(subst Abs_memory_inverse[OF transfer_rep_sound2]) - apply(insert 1 , simp add: o_def transfer_rep_simp Domain_def ) - apply(auto split: if_split if_split_asm ) - using "**" SharedMemory.Domain_def domI apply fast - done -next - show "b \ Domain (\ (a \ b))" - using 1 Domain_share_tgt by fast -next - fix x - assume 3 : "x \ Domain \" - and 4 : "\ x shares\<^bsub>\\<^esub> b " - show " x \ Domain (\ (a \ b))" - unfolding sharing_def Domain_def transfer_def map_fun_def o_def id_def - apply(subst Abs_memory_inverse[OF transfer_rep_sound2]) - apply(insert 1 , simp add: o_def transfer_rep_simp Domain_def ) - apply(auto split: if_split if_split_asm ) - using "3" SharedMemory.Domain_def domD - apply fast - done -qed - -lemma Domain_share3: -assumes 1 : "a \ Domain \" -shows "Domain (\(a\b)) = (Domain \ - {b}) " -proof(simp_all add:Set.set_eq_iff, auto) - fix x - assume 3: "x \ Domain (\ (a \ b))" - show "x \ Domain \" - apply(insert 3) - unfolding sharing_def Domain_def transfer_def map_fun_def o_def id_def - apply(subst (asm) Abs_memory_inverse[OF transfer_rep_sound2]) - apply(insert 1 , simp add: o_def transfer_rep_simp Domain_def ) - apply(auto split: if_split if_split_asm ) - done -next - assume 3: "b \ Domain (\ (a \ b))" - show False - apply(insert 1 3) - apply(erule contrapos_pp[of "b \ SharedMemory.Domain (\ (a \ b))"], simp) - unfolding sharing_def Domain_def transfer_def map_fun_def o_def id_def - apply(subst Abs_memory_inverse[OF transfer_rep_sound2]) - apply(insert 1 , simp add: o_def transfer_rep_simp Domain_def ) - apply(auto split: if_split ) - done -next - fix x - assume 3: "x \ Domain \ " - and 4: "x \ b" - show "x \ Domain (\ (a \ b))" - apply(insert 3 4) - unfolding sharing_def Domain_def transfer_def map_fun_def o_def id_def - apply(subst Abs_memory_inverse[OF transfer_rep_sound2]) - apply(insert 1 , simp add: o_def transfer_rep_simp Domain_def ) - apply(auto split: if_split if_split_asm ) - done -qed - -lemma Domain_transfer : -"Domain (\(a\b)) = (if a \ Domain \ - then (Domain \ - {b}) - else if b \ Domain \ - then (Domain \ - {x. x shares\<^bsub>\\<^esub> b} \ {b}) - else Domain \ )" - using Domain_share1 Domain_share2 Domain_share3 - by metis - -lemma Domain_transfer_approx: - "Domain (\(a\b)) \ Domain (\) \ {b}" - by(auto simp: Domain_transfer) - -lemma Domain_update1: - "add \ Domain (\(add :=\<^sub>$ val))" - by (simp add: sharing_refl) - -subsection{* Sharing Relation and Memory Update*} - -lemma sharing_upd: "x shares\<^bsub>(\(a :=\<^sub>$ b))\<^esub> y = x shares\<^bsub>\\<^esub> y" - using insert Rep_memory[of "\"] - by(auto simp: sharing_def update_def Abs_memory_inverse[OF update\<^sub>_sound]) - ---{*this lemma says that if we do an update on an adress @{term "x"} all the elements that are - equivalent of @{term "x"} are updated*} - - -lemma update'': - "\ (x :=\<^sub>$ y) = Abs_memory(fun_upd_equivp (\x y. x shares\<^bsub>\\<^esub> y) (fst (Rep_memory \)) x (Some y), - snd (Rep_memory \))" - unfolding update_def sharing_def - by (metis update' update_def) - -theorem update_cancel: -assumes "x shares\<^bsub>\\<^esub> x'" -shows "\(x :=\<^sub>$ y)(x' :=\<^sub>$ z) = (\(x' :=\<^sub>$ z))" - proof - - have * : "(fun_upd_equivp(snd(Rep_memory \))(fst(Rep_memory \)) x (Some y),snd (Rep_memory \)) - \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" - unfolding fun_upd_equivp_def - by(rule update\<^sub>_sound[simplified fun_upd_equivp_def], simp) - have ** : "\ R \. equivp R \ R x x' \ - fun_upd_equivp R (fun_upd_equivp R \ x (Some y)) x' (Some z) - = fun_upd_equivp R \ x' (Some z)" - unfolding fun_upd_equivp_def - apply(rule ext) - apply(case_tac "R xa x'", auto) - apply(erule contrapos_np, erule equivp_transp, simp_all) - done - show ?thesis - apply(simp add: update') - apply(insert sharing_charn assms[simplified sharing_def]) - apply(simp add: Abs_memory_inverse [OF *] **) - done -qed - -theorem update_commute: - assumes 1:"\ (x shares\<^bsub>\\<^esub> x')" - shows "(\(x :=\<^sub>$ y))(x' :=\<^sub>$ z) = (\(x':=\<^sub>$ z)(x :=\<^sub>$ y))" - proof - - have * : "\ x y.(fun_upd_equivp(snd(Rep_memory \))(fst(Rep_memory \)) x (Some y),snd (Rep_memory \)) - \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" - unfolding fun_upd_equivp_def - by(rule update\<^sub>_sound[simplified fun_upd_equivp_def], simp) - have ** : "\ R \. equivp R \ \ R x x' \ - fun_upd_equivp R (fun_upd_equivp R \ x (Some y)) x' (Some z) = - fun_upd_equivp R (fun_upd_equivp R \ x' (Some z)) x (Some y)" - unfolding fun_upd_equivp_def - apply(rule ext) - apply(case_tac "R xa x'", auto) - apply(erule contrapos_np) - apply(frule equivp_transp, simp_all) - apply(erule equivp_symp, simp_all) - done - show ?thesis - apply(simp add: update') - apply(insert assms[simplified sharing_def]) - apply(simp add: Abs_memory_inverse [OF *] **) - done -qed - -subsection{* Properties on lookup and update wrt the Sharing Relation*} - -lemma update_triv: - assumes 1: "x shares\<^bsub>\\<^esub> y" - and 2: "y \ Domain \" - shows "\ (x :=\<^sub>$ (\ $ y)) = \" -proof - - { - fix z - assume zx: "z shares\<^bsub>\\<^esub> x" - then have zy: "z shares\<^bsub>\\<^esub> y" - using 1 by (rule sharing_trans) - have F: "y \ Domain \ \ x shares\<^bsub>\\<^esub> y - \ Some (the (fst (Rep_memory \) x)) = fst (Rep_memory \) y" - by(auto simp: Domain_def dest: shares_result) - have "Some (the (fst (Rep_memory \) y)) = fst (Rep_memory \) z" - using zx and shares_result [OF zy] shares_result [OF zx] - using F [OF 2 1] - by simp - } note 3 = this - show ?thesis - unfolding update'' lookup_def fun_upd_equivp_def - by (simp add: 3 Rep_memory_inverse if_cong) -qed - -lemma update_idem' : - assumes 1: "x shares\<^bsub>\\<^esub> y" - and 2: "x \ Domain \" - and 3: "\ $ x = z" - shows "\(y:=\<^sub>$ z) = \" -proof - - have * : "y \ Domain \" - by(simp add: shares_dom[OF 1, symmetric] 2) - have **: "\ (x :=\<^sub>$ (\ $ y)) = \" - using 1 2 * - by (simp add: update_triv) - also have "(\ $ y) = \ $ x" - by (simp only: lookup_def shares_result [OF 1]) - finally show ?thesis - using 1 2 3 sharing_sym update_triv - by fast -qed - -lemma update_idem : - assumes 2: "x \ Domain \" - and 3: "\ $ x = z" - shows "\(x:=\<^sub>$ z) = \" -proof - - show ?thesis - using 2 3 sharing_refl update_triv - by fast -qed - -lemma update_apply: "(\(x :=\<^sub>$ y)) $ z = (if z shares\<^bsub>\\<^esub> x then y else \ $ z)" -proof - - have *: "(\z. if z shares\<^bsub>\\<^esub> x then Some y else fst (Rep_memory \) z, snd (Rep_memory \)) - \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" - unfolding sharing_def - by(rule update\<^sub>_sound[simplified fun_upd_equivp_def], simp) - show ?thesis - proof (cases "z shares\<^bsub>\\<^esub> x") - case True - assume A: "z shares\<^bsub>\\<^esub> x" - show "\ (x :=\<^sub>$ y) $ z = (if z shares\<^bsub>\\<^esub> x then y else \ $ z)" - unfolding update'' lookup_def fun_upd_equivp_def - by(simp add: Abs_memory_inverse [OF *]) - next - case False - assume A: "\ z shares\<^bsub>\\<^esub> x " - show "\ (x :=\<^sub>$ y) $ z = (if z shares\<^bsub>\\<^esub> x then y else \ $ z)" - unfolding update'' lookup_def fun_upd_equivp_def - by(simp add: Abs_memory_inverse [OF *]) - qed -qed - -lemma update_share: - assumes "z shares\<^bsub>\\<^esub> x" - shows "\(x :=\<^sub>$ a) $ z = a" - using assms - by (simp only: update_apply if_True) - -lemma update_other: - assumes "\(z shares\<^bsub>\\<^esub> x)" - shows "\(x :=\<^sub>$ a) $ z = \ $ z" - using assms - by (simp only: update_apply if_False) - -lemma lookup_update_rep: - assumes 1: "(snd (Rep_memory \')) x y" - shows "(fst (Pair_upd_lifter (Rep_memory \') src dst)) x = - (fst (Pair_upd_lifter (Rep_memory \') src dst)) y" - using 1 shares_result sharing_def sharing_upd update.rep_eq - by (metis (hide_lams, no_types) ) - -lemma lookup_update_rep'': - assumes 1: "x shares\<^bsub>\\<^esub> y" - shows " (\ (src :=\<^sub>$ dst)) $ x = (\ (src :=\<^sub>$ dst)) $ y" - using 1 lookup_def lookup_update_rep sharing_def update.rep_eq - by metis - -theorem memory_ext : - assumes * : "\ x y. (x shares\<^bsub>\\<^esub> y) = (x shares\<^bsub>\'\<^esub> y)" - and ** : "Domain \ = Domain \'" - and *** : "\ x. \ $ x = \' $ x" - shows "\ = \'" -apply(subst Rep_memory_inverse[symmetric]) -apply(subst (3) Rep_memory_inverse[symmetric]) -apply(rule arg_cong[of _ _ "Abs_memory"]) -apply(auto simp:Product_Type.prod_eq_iff) -proof - - show "fst (Rep_memory \) = fst (Rep_memory \')" - apply(rule ext, insert ** ***, simp add: SharedMemory.lookup_def Domain_def) - apply (metis domIff option.expand) - done -next - show "snd (Rep_memory \) = snd (Rep_memory \')" - by(rule ext, rule ext, insert *, simp add: sharing_def) -qed - -text{* Nice connection between sharing relation, domain of the memory and content equaltiy - on the one hand and equality on the other; this proves that our memory model is fully - abstract in these three operations. *} -corollary memory_ext2: "(\ = \') = ((\ x y. (x shares\<^bsub>\\<^esub> y) = (x shares\<^bsub>\'\<^esub> y)) - \ Domain \ = Domain \' - \ (\ x. \ $ x = \' $ x))" -by(auto intro: memory_ext) - -subsection{* Rules On Sharing and Memory Transfer *} - -(*memory transfer*) - -lemma transfer_rep_inv_E: - assumes 1 : "\ \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" - and 2 : "memory_inv (transfer_rep \ src dst) \ Q" - shows Q - using assms transfer_rep_sound[of \] - by (auto simp: Abs_memory_inverse) - -lemma transfer_rep_fst1: - assumes 1: "\ = fst(transfer_rep (Rep_memory \') src dst)" - shows "\x. x = dst \ \ x = (fst (Rep_memory \')) src" - using 1 unfolding transfer_rep_simp - by simp - -lemma transfer_rep_fst2: - assumes 1: "\ = fst(transfer_rep (Rep_memory \') src dst)" - shows "\x. x \ dst \ \ x = (fst (Rep_memory \')) (id x)" - using 1 unfolding transfer_rep_simp - by simp - -lemma lookup_transfer_rep': - "(fst (transfer_rep (Rep_memory \') src dst)) src = - (fst (transfer_rep (Rep_memory \') src dst)) dst" - using Rep_memory [of "\'"] - apply (erule_tac src= "src" and dst = "dst" in transfer_rep_inv_E) - apply (rotate_tac 1) - apply (subst (asm) surjective_pairing[of "(transfer_rep (Rep_memory \') src dst)"]) - unfolding memory_inv.simps - apply (erule conjE) - apply (erule allE)+ - apply (erule impE) - unfolding transfer_rep_simp - apply auto - using equivp_reflp snd_memory_equivp - apply metis -done - -theorem share_transfer: - "x shares\<^bsub>\(a \ b)\<^esub> y = ( (y = b \ (x = b - \ (x \ b \ x shares\<^bsub>\\<^esub> a))) \ - (y \ b \ ((x = b \ a shares\<^bsub>\\<^esub> y) - \ (x \ b \ x shares\<^bsub>\\<^esub> y))))" -unfolding sharing_def transfer_def -unfolding transfer_def map_fun_def o_def id_def -apply(subst Abs_memory_inverse[OF transfer_rep_sound2], - simp add: transfer_rep_simp) -apply (metis equivp_reflp sharing_charn2) -done - -lemma transfer_share:"a shares\<^bsub>\(a \ b)\<^esub> b" - by(simp add: share_transfer sharing_refl) - -lemma transfer_share_sym:"a shares\<^bsub>\ (b \ a)\<^esub> b" - by(simp add: share_transfer sharing_refl) - -lemma transfer_share_mono:"x shares\<^bsub>\\<^esub> y \ \(x shares\<^bsub>\\<^esub> b) \ (x shares\<^bsub>\ (a \ b)\<^esub> y)" - by(auto simp: share_transfer sharing_refl) - -lemma transfer_share_charn: - "\(x shares\<^bsub>\\<^esub> b) \ \(y shares\<^bsub>\\<^esub> b) \ x shares\<^bsub>\(a \ b)\<^esub> y = x shares\<^bsub>\\<^esub> y" - by(auto simp: share_transfer sharing_refl) - -lemma transfer_share_trans:"(a shares\<^bsub>\\<^esub> x) \ (x shares\<^bsub>\(a \ b)\<^esub> b)" - by(auto simp: share_transfer sharing_refl sharing_sym) - -lemma transfer_share_trans_sym:"(a shares\<^bsub>\\<^esub> y) \ (b shares\<^bsub>(\(a \ b))\<^esub> y)" - using transfer_share_trans sharing_sym - by fast - -lemma transfer_share_trans': "(a shares\<^bsub>(\(a \ b))\<^esub> z) \ (b shares\<^bsub>(\(a \ b))\<^esub> z)" - using transfer_share sharing_sym sharing_trans - by fast - -lemma transfer_tri : "x shares\<^bsub>\ (a \ b)\<^esub> y \ x shares\<^bsub>\\<^esub> b \ b shares\<^bsub>\\<^esub> y \ x shares\<^bsub>\\<^esub> y" -by (metis sharing_sym transfer_share_charn) - -lemma transfer_tri' : "\ x shares\<^bsub>\ (a \ b)\<^esub> y \ y shares\<^bsub>\\<^esub> b \ \ x shares\<^bsub>\\<^esub> y" -by (metis sharing_sym sharing_trans transfer_share_mono) - -lemma transfer_dest' : -assumes 1: "a shares\<^bsub>\ (a \ b)\<^esub> y" - and 2: "b \ y" - shows "a shares\<^bsub>\\<^esub> y" - using assms - by(auto simp: share_transfer sharing_refl sharing_sym) - -lemma transfer_dest : -assumes 1: "\(x shares\<^bsub>\\<^esub> a)" - and 2: "x \ b" - and 3: "x shares\<^bsub>\\<^esub> b" - shows "\(x shares\<^bsub>\ (a \ b)\<^esub> b)" - using assms - by(auto simp: share_transfer sharing_refl sharing_sym) - -lemma transfer_dest'':"x = b \ y shares\<^bsub>\\<^esub> a \ x shares\<^bsub>\(a \ b)\<^esub> y" -by (metis sharing_sym transfer_share_trans_sym) - -thm share_transfer (* the universal catch-all *) - transfer_share - transfer_share_sym - sharing_sym [THEN transfer_share_trans] - (* transfer_share_trans *) - sharing_sym [THEN transfer_share_trans_sym] - (* transfer_share_trans_sym *) - transfer_share_trans' - transfer_dest'' - transfer_dest' - transfer_tri' - transfer_share_mono - transfer_tri - transfer_share_charn - transfer_dest - -subsection{* Properties on Memory Transfer and Lookup *} - -lemma transfer_share_lookup1: "(\(x \ y)) $ x = \ $ x" - using lookup_transfer_rep' transfer_rep_fst1 - unfolding lookup_def transfer.rep_eq - by metis - -lemma transfer_share_lookup2: - "(\(x \ y)) $ y = \ $ x" - using transfer_rep_fst1 - unfolding transfer.rep_eq lookup_def - by metis - -lemma add\<^sub>e_not_share_lookup: - assumes 1: "\(x shares\<^bsub>\\<^esub> z)" - and 2: "\(y shares\<^bsub>\\<^esub> z)" - shows "\ (x \ y) $ z = \ $ z" - using assms - unfolding sharing_def lookup_def transfer.rep_eq - using id_def sharing_def sharing_refl transfer_rep_fst2 - by metis - -lemma transfer_share_dom: - assumes 1: "z \ Domain \" - and 2: "\(y shares\<^bsub>\\<^esub> z)" - shows "(\(x \ y)) $ z = \ $ z" - using assms - unfolding Domain_def sharing_def lookup_def - using 2 transfer.rep_eq id_apply sharing_refl transfer_rep_fst2 - by metis - -lemma shares_result': - assumes 1: "(x shares\<^bsub>\\<^esub> y)" - shows " \ $ x = \ $ y" - using assms lookup_def shares_result - by metis - -lemma transfer_share_cancel1: - assumes 1: "(x shares\<^bsub>\\<^esub> z)" - shows "(\(x \ y)) $ z = \ $ x" - using 1 transfer.rep_eq transfer_share_trans lookup_def - transfer_rep_fst1 shares_result - by (metis) - -subsection{* Test on Sharing and Transfer via smt ... *} - -(*test to see the needed lemmas by smt*) -lemma "\x y. x \ y \ \(x shares\<^bsub>\\<^esub> y) \ - \ $ x > \ $ y \ \(3 \ (4::nat))= \' \ - \'' = (\'(3 :=\<^sub>$ ((\' $ 4) + 2))) \ - x \ 3 \ x \ 4 \ y \ 3 \ y \ 4 \ \'' $ x > \'' $ y" -by (smt add\<^sub>e_not_share_lookup share_transfer update_apply) - -subsection{* Instrumentation of the smt Solver*} - -lemma transfer_share_charn_smt : - "\(i shares\<^bsub>\\<^esub> k') \ - \(k shares\<^bsub>\\<^esub> k') \ - i shares\<^bsub>\(i' \ k')\<^esub> k = i shares\<^bsub>\\<^esub> k" - using transfer_share_charn - by fast - -lemma add\<^sub>e_not_share_lookup_smt: - "\(x shares\<^bsub>\\<^esub> z)\ \(y shares\<^bsub>\\<^esub> z)\ (\ (x \ y) $ z) = (\ $ z)" - using add\<^sub>e_not_share_lookup - by auto - -lemma transfer_share_dom_smt: - "z \ Domain \ \ \(y shares\<^bsub>\\<^esub> z)\ (\(x \ y)) $ z = \ $ z" - using transfer_share_dom - by auto - -lemma transfer_share_cancel1_smt: - "(x shares\<^bsub>\\<^esub> z)\ (\(x \ y)) $ z = \ $ x" - using transfer_share_cancel1 - by auto - -lemma lookup_update_rep''_smt: - "x shares\<^bsub>\\<^esub> y\(\ (src :=\<^sub>$ dst)) $ x = (\ (src :=\<^sub>$ dst)) $ y" - using lookup_update_rep'' - by auto - -theorem update_commute_smt: - "\ (x shares\<^bsub>\\<^esub> x') \ ((\(x :=\<^sub>$ y))(x' :=\<^sub>$ z)) = (\(x':=\<^sub>$ z)(x :=\<^sub>$ y))" - using update_commute - by auto - -theorem update_cancel_smt: - "(x shares\<^bsub>\\<^esub> x')\ (\(x :=\<^sub>$ y)(x' :=\<^sub>$ z)) = (\(x' :=\<^sub>$ z))" - using update_cancel - by auto - -lemma update_other_smt: - "\(z shares\<^bsub>\\<^esub> x)\ (\(x :=\<^sub>$ a) $ z) = \ $ z" - using update_other - by auto - -lemma update_share_smt: - "(z shares\<^bsub>\\<^esub> x) \ (\(x :=\<^sub>$ a) $ z) = a" - using update_share - by auto - -lemma update_idem_smt : - "(x shares\<^bsub>\\<^esub> y)\ x \ Domain \ \ (\ $ x = z) \ (\(x:=\<^sub>$ z)) = \" - using update_idem - by fast - -lemma update_triv_smt: - "(x shares\<^bsub>\\<^esub> y) \ y \ Domain \ \ (\ (x :=\<^sub>$ (\ $ y))) = \" - using update_triv - by auto - -lemma shares_result_smt: - "x shares\<^bsub>\\<^esub> y\ \ $ x = \ $ y" - using shares_result' - by fast - -lemma shares_dom_smt : - "x shares\<^bsub>\\<^esub> y \ (x \ Domain \) = (y \ Domain \)" - using shares_dom by fast - -lemma sharing_sym_smt : - "x shares\<^bsub>\\<^esub> y\y shares\<^bsub>\\<^esub> x" - using sharing_sym - by auto - -lemma sharing_trans_smt: - "x shares\<^bsub>\\<^esub> y \ y shares\<^bsub>\\<^esub> z \ x shares\<^bsub>\\<^esub> z" - using sharing_trans - by auto - -lemma nat_0_le_smt: "0 \ z \ int (nat z) = z" - by transfer clarsimp - -lemma nat_le_0_smt: "0 > z \ int (nat z) = 0" - by transfer clarsimp - -lemma transfer_share_trans_smt: - "(x shares\<^bsub>\\<^esub> z) \(z shares\<^bsub>\(x \ y)\<^esub> y)" - using transfer_share_trans - by fast - -lemma transfer_share_mono_smt: - "(x shares\<^bsub>\\<^esub> y)\ \(x shares\<^bsub>\\<^esub> y')\ (x shares\<^bsub>\ (x' \ y')\<^esub> y)" - using transfer_share_mono - by fast - -lemma transfer_share_trans'_smt: - "(x shares\<^bsub>(\(x \ y))\<^esub> z)\(y shares\<^bsub>(\(x \ y))\<^esub> z) " - using transfer_share_trans' - by fast - -lemma transfer_share_old_new_trans_smt: - "(x shares\<^bsub>\\<^esub> z)\(y shares\<^bsub>(\(x \ y))\<^esub> z) " - using transfer_share_trans_sym - by fast - -lemma transfer_share_old_new_trans1_smt: - "a shares\<^bsub>\\<^esub> b \ a shares\<^bsub>\\<^esub> c \ - (c shares\<^bsub>(\ (a \ d))\<^esub> b ) " - using transfer_share_trans_smt sharing_sym_smt sharing_trans_smt - by metis - -lemma Domain_mono_smt: - "x \ Domain \ \ (x shares\<^bsub>\\<^esub> y)\y \ Domain \" - using Domain_mono - by fast - -lemma sharing_upd_smt: "x shares\<^bsub>(\(a :=\<^sub>$ b))\<^esub> y = x shares\<^bsub>\\<^esub> y" - using sharing_upd - by fast - -lemma sharing_init_mem_list_smt : - "i \ k \ \(i shares\<^bsub>init_mem_list S\<^esub> k)" - using sharing_init_mem_list - by fast - -lemma mem1_smt: - "(\(a\b) $ a) = (\(a\b) $ b)" - using transfer_share_lookup1 transfer_share_lookup2 - by metis - -lemmas sharing_smt = sharing_refl transfer_share - sharing_commute nat_le_0_smt - nat_0_le_smt sharing_sym_smt - transfer_share_lookup1 transfer_share_lookup2 - sharing_init_mem_list_smt sharing_upd_smt - shares_result_smt transfer_share_old_new_trans_smt - transfer_share_trans_smt mem1_smt - update_share_smt shares_dom_smt - Domain_mono_smt sharing_trans_smt - transfer_share_cancel1_smt transfer_share_trans'_smt - update_apply update_other_smt - update_cancel_smt transfer_share_old_new_trans1_smt - lookup_update_rep''_smt update_triv_smt - transfer_share_mono_smt update_commute_smt - transfer_share_dom_smt add\<^sub>e_not_share_lookup_smt - update_idem_smt transfer_share_charn_smt -(* @Chantal : if you want, you could add a generic smt config here ... *) - -subsection {*Tools for the initialization of the memory*} - -definition update_memory\<^sub>i\<^sub>n\<^sub>i\<^sub>t :: "'address list \ 'value list \ ('address, 'value)memory" -where "update_memory\<^sub>i\<^sub>n\<^sub>i\<^sub>t ADD VAL = - (foldl (\ m (x, y). (m (x:=\<^sub>$y))) init (zip ADD VAL))" - - -definition share_memory\<^sub>i\<^sub>n\<^sub>i\<^sub>t :: "'address list \ 'address list \ - ('address, 'value)memory \('address,'value)memory" -where "share_memory\<^sub>i\<^sub>n\<^sub>i\<^sub>t SRC DST m = - (foldl (\m (x, y). (m (x\y))) m (zip SRC DST))" - -definition memory\<^sub>i\<^sub>n\<^sub>i\<^sub>t :: "'address list \ 'value list \ 'address list \ - ('address,'value)memory" -where "memory\<^sub>i\<^sub>n\<^sub>i\<^sub>t SRC VAL DST = - foldl (\ m (SRC, DST). share_memory\<^sub>i\<^sub>n\<^sub>i\<^sub>t SRC DST m) - (update_memory\<^sub>i\<^sub>n\<^sub>i\<^sub>t SRC VAL) [(SRC, DST)]" - -lemmas sharing_refl_smt = sharing_refl (* legacy *) - - -subsection{* An Intrastructure for Global Memory Spaces *} -text{* Memory spaces are common concepts in Operating System (OS) design since it is - a major objective of OS kernels to separate logical, linear memory spaces - belonging to different processes (or in other terminologies such as PiKeOS: tasks) - from each other. We achieve this goal by modeling the adresses of memory spaces - as a \emph{pair} of a subject (e.g. process or task, denominated by a process-id or - task-id) and a location (a conventional adress). *} -text{* - Our model is still generic - we do not impose a particular type for subjects - or locations (which could be modeled in a concrete context by an enumeration type as well as - integers of bitvector representations); for the latter, however, we require that - they are instances of the type class @{typ "'\::comm_semiring_1"} assuring that there - is a minimum of infrastructure for address calculation: there must exist a - @{term 0}-element, a distinct @{term 1}-element and an addition operation with - the usual properties. -*} - -fun init\<^sub>g\<^sub>l\<^sub>o\<^sub>b\<^sub>a\<^sub>l\<^sub>m\<^sub>e\<^sub>m :: "(('sub\'loc::comm_semiring_1), '\) memory - \ ('sub\'loc) \ '\ list - \ (('sub\'loc), '\) memory" ("_ |> _ <| _" [60,60,60] 70) -where "\ |> start <| [] = \" - | "\ |> (sub,loc) <| (a # S) = ((\((sub,loc):=\<^sub>$ a)) |> (sub, loc+1)<| S)" - -lemma Domain_mem_init_Nil : "Domain(\ |> start <| []) = Domain \" -by simp - -subsubsection{* Example *} - -type_synonym task_id = int -type_synonym loc = int - -type_synonym global_mem = "((task_id\loc), int)memory" - -definition \\<^sub>0 :: "global_mem" -where "\\<^sub>0 \ init |> (0,0) <| [0,0,0,0] - |> (2,0) <| [0,0] - |> (4,0) <| [2,0]" - -(* why does this not work ? -value "(\\<^sub>0 ((4, 0)\(2, 1))) $ (4, 0)" -*) - -lemma \\<^sub>0_Domain: "Domain \\<^sub>0 = {(4, 1), (4, 0), (2, 1), (2, 0), (0, 3), (0, 2), (0, 1), (0, 0)}" -unfolding \\<^sub>0_def -by(simp add: sharing_upd) - -subsection{* Memory Transfer Based on Sharing Closure (Experimental) *} - -text{* One might have a fundamentally different understanding on memory transfer --- at least as -far as the sharing relation is concerned. The prior definition of sharing is based on the idea that -the overridden part is ``carved out'' of the prior equivalence. Instead of transforming the -equivalence relation, one might think of transfer as an operation where the to be shared memory is -synchronized and then the equivalence relation closed via reflexive-transitive closure. *} - -definition transfer' :: "('a,'b)memory \ 'a \ 'a \ ('a, 'b)memory" ("_ '(_ \\\ _')" [0,111,111]110) -where "\(i \\\ k) = - (\(i :=\<^sub>$ (\ $ k)) :=\<^sub>R (rtranclp(\x y. ($\<^sub>R \) x y \ (x=i \ y = k) \ (x=k \ y = i))))" - - -lemma transfer'_rep_sound: - "(fst(Rep_memory (\(i:=\<^sub>$(\ $ k)))),(\xa ya. ($\<^sub>R \) xa ya \ xa = x \ ya = y \ xa = y \ ya = x)\<^sup>*\<^sup>*) - \ - {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" -unfolding update_def -proof(auto) - let ?R' = "((\xa ya. ($\<^sub>R \) xa ya \ xa = x \ ya = y \ xa = y \ ya = x)\<^sup>*\<^sup>*)" - have E : "equivp ($\<^sub>R \)" unfolding lookup\<^sub>R_def by (metis snd_memory_equivp) - have fact1 : "symp ?R'" - unfolding symp_def - apply (auto) - apply (erule Transitive_Closure.rtranclp_induct,auto) - apply (drule E[THEN equivp_symp]) - by (metis (lifting, full_types) converse_rtranclp_into_rtranclp)+ - have fact2 : "transp ?R'" - unfolding transp_def - by (metis (lifting, no_types) rtranclp_trans) - have fact3 : "reflp ?R'" - unfolding reflp_def - by (metis (lifting) rtranclp.rtrancl_refl) - show "equivp (\xa ya. ($\<^sub>R \) xa ya \ xa = x \ ya = y \ xa = y \ ya = x)\<^sup>*\<^sup>*" - using fact1 fact2 fact3 equivpI by auto -next - fix xa ya - assume H : "(\xa ya. ($\<^sub>R \) xa ya \ xa = x \ ya = y \ xa = y \ ya = x)\<^sup>*\<^sup>* xa ya" - have * : "(fun_upd_equivp (snd (Rep_memory \)) (fst (Rep_memory \)) i (Some (\ $ k)), - snd (Rep_memory \)) - \ {(\, R). equivp R \ (\x y. R x y \ \ x = \ y)}" oops -(* - show "fst (Rep_memory (Abs_memory (Pair_upd_lifter (Rep_memory \) i (\ $ k)))) xa = - fst (Rep_memory (Abs_memory (Pair_upd_lifter (Rep_memory \) i (\ $ k)))) ya" - apply(subst surjective_pairing[of "(Rep_memory \)"]) - apply(subst Pair_upd_lifter.simps) - apply(subst (4)surjective_pairing[of "(Rep_memory \)"]) - apply(subst Pair_upd_lifter.simps) - apply(auto simp: Abs_memory_inverse[OF *]) - apply(simp add: SharedMemory.lookup_def) - apply(insert H, simp add: SharedMemory.lookup\<^sub>R_def) -oops -*) - -subsection{* Framing Conditions on Shared Memories (Experimental)*} - -text{* The Frame of an action --- or a monadic operation --- is the smallest possible subset of the -domain of a memory, in which the action has effect, i.e. it modifies only locations -in this set.*} - - -(* Experimental. Known problem: should run over all memory-maps, - but only one fixed sharing relation R, in which also the - equivs of x in R were collected... Frame\<^bsub>R\<^esub> A ? Fibered Framing ?*) -definition Frame :: "(('\, '\)memory \ ('\, '\)memory) \ '\ set" -where "Frame A \ Least(\X. \ \. (\(reset X)) = ((A \)(reset X)))" - -(* hard. *) -lemma Frame_update : "Frame (\\. \(x :=\<^sub>$ y)) = {x}" -oops - -(* hard *) -lemma Frame_compose : "Frame (A o B) \ Frame A \ Frame B" -oops - - -notation transfer ("add\<^sub>e") (* legacy *) -lemmas add\<^sub>e_def = transfer_def (* legacy *) -lemmas add\<^sub>e_rep_eq = transfer.rep_eq (* legacy, was add\<^sub>e.rep_eq *) -lemmas transfer_share_old_new_trans = transfer_share_trans_sym (* legacy *) -lemmas sharing_commute_smt = sharing_commute (*legacy *) -lemmas update_apply_smt = update_apply (* legacy *) -lemmas transfer_share_lookup2_smt = transfer_share_lookup2 (* legacy *) -lemmas transfer_share_lookup1_smt = transfer_share_lookup1 (* legacy *) -lemmas transfer_share_smt = SharedMemory.transfer_share (* legacy *) - - -end diff --git a/src/test-gen/src/main/Term_Tactics.thy b/src/test-gen/src/main/Term_Tactics.thy deleted file mode 100644 index fcd632f..0000000 --- a/src/test-gen/src/main/Term_Tactics.thy +++ /dev/null @@ -1,334 +0,0 @@ - -(***************************************************************************** - * HOL-OCL --- an interactive theorem-prover for for UML/OCL - * http://www.brucker.ch/projects/hol-ocl/ - * - * isabelle2009_kernel_patch.ML --- Isabelle kernel extensions - * This file is part of HOL-OCL. - * - * Copyright (c) 2003-2007 ETH Zurich, Switzerland - * 2008-2009 Achim D. Brucker, Germany - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id: isabelle2009_kernel_patch.ML 9289 2012-01-30 18:22:21Z krieger $ *) - - -theory Term_Tactics -imports Main -begin - - -(* Code for Isabelle2004/5-Kernel. Should go to Tactic - structure. *) -(* (up to make_elim_preserve, which is already there ... *) -(* >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> *) - -(* -Like lift_inst_rule but takes terms, not strings, where the terms may contain -Free variables referring to parameters of the subgoal (following the -conventions of the string-based version). - -insts: [...,(vj,tj),...] - -Both vj and tj must be type correct and have the same types as in the -string-based version (i.e. have the types *before lifting* over the -context of subgoal i. In particular, tj may not contain loose bound -variables. In order to use lift_inst_rule with subterms of the subgoal, -these have to be substituted by free variables before. - -NB: the types in insts must be correctly instantiated already, - i.e. Tinsts is not applied to insts. - - -An Example in HOL: -================== - -We assume st = [| [] @ y = y @ []; - !!a list. list @ y = y @ list ==> (a # list) @ y = y @ a # list |] - ==> (x @ y = y @ x)" : Thm.thm, -i = 2, rule = sym and a standard test-based substitution -sinsts = [("t","(a # list) @ y")]. - -Then standard lift_inst_rule (st, i, sinsts, rule) yields: - - "(!!a list. list @ y = y @ list ==> ?s1 a list = (a # list) @ y) - ==> (!!a list. list @ y = y @ list ==> (a # list) @ y = ?s1 a list)" - -i.e. a lifted version of 'sym'. - -Internally, the variables were set to: - - val params = [("a", "'a"), ("list", "'a List.list")]; - val inc = 1; - val used = ["'a"]:: string list; - val Tinsts = [(("'a", 0), "'a list")] : (Term.indexname * Thm.ctyp) list; - val insts = [("?t", "(a # list) @ y")] : (Thm.cterm * Thm.cterm) list; - -in this case. - -Now we emulate the effect of "lift_inst_rule" by "term_lift_inst_rule", -we simply have to convert the substitutions: - - val Tinsts'= map (fn(x,y) => (x,#T(rep_ctyp y))) Tinsts; - (*val Tinst' = [(("'a", 0), "'a List.list")]:(Term.indexname*Term.typ)list*) - val insts' = map (fn(x,y)=>(dest_Var(term_of x), term_of y)) insts; - (*[((("t", 0), "'a List.list"), - Const ("List.op @", "['a List.list, 'a List.list] => 'a List.list") - $(Const("List.list.Cons","['a, 'a List.list] => 'a List.list") $ - Free ("a", "'a") $ Free ("list", "'a List.list")) $ - Free ("y", "'a List.list"))] - :((Term.indexname * Term.typ) * Term.term) list *) - -Thus, we get: - - lift_inst_rule (st, i, sinsts, rule) - = term_lift_inst_rule (st, i, Tinsts', insts', rule) - - -where (Tinsts', insts') = read_insts_in_state (st, i, sinsts, rule). -This explains the connection between string- and term-based -versions. - -Unfortunately, the term_lift_inst_rule exported from the -the structure Tactics (in Isabelle/src/Pure/tactic.ML) -DOES NOT satisfy the desired equality - in subtle special -cases related to paramaters of a subgoal in st, it behaves -different. Therefore, a re-implementation based on -lift_inst_rule-code is done here. - -On top of this, the definition of term based substitution -tactic variants for res_inst_tac, eres_inst_tac, dres_inst_tac is -straigt forward. - -COULD BE RealIZED BY MORE GENERAL VERSION OF gen_compose_inst_tac, TOO. - -*) -ML{* -signature TERM_TACTICS = -sig -val params_of_state : thm -> int -> (string * typ) list -(* -val read_insts_in_state : thm * int * (indexname * string) list * thm - -> (ctyp * ctyp) list * (cterm * cterm) list -*) -val term_lift_inst_rule : Proof.context - -> thm * int * (ctyp * ctyp) list * (cterm * cterm) list * thm -> thm -val compose_terminst_tac: Proof.context - -> (ctyp * ctyp) list - -> (cterm * cterm) list -> bool * thm * int -> int -> tactic -val res_terminst_tac : Proof.context - -> (ctyp * ctyp) list - -> (cterm * cterm) list -> thm -> int -> tactic -val eres_terminst_tac : Proof.context - -> (ctyp * ctyp) list - -> (cterm * cterm) list -> thm -> int -> tactic -val make_elim_preserve : Proof.context -> thm -> thm -val cut_terminst_tac : Proof.context - ->(ctyp * ctyp) list - -> (cterm * cterm) list -> thm -> int -> tactic -val forw_terminst_tac : Proof.context - ->(ctyp * ctyp) list - -> (cterm * cterm) list -> thm -> int -> tactic -val dres_terminst_tac : Proof.context - -> (ctyp * ctyp) list - -> (cterm * cterm) list -> thm -> int -> tactic -(* -val convert_tinsts : ((indexname * sort) * typ) list -> theory -> (ctyp * ctyp) list -val convert_substs : ((indexname * typ) * term) list -> theory -> (cterm * cterm) list -*) -val subgoal_terminst_tac: Proof.context - -> (ctyp * ctyp) list - -> term -> int -> tactic -end; - -*} - - -ML{* -structure Term_Tactics : TERM_TACTICS = -struct - -open Thm; -(* copied code from Isabelle/src/Pure/tactic.ML, - essentially for debugging purposes ... (version 2005) - >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> - *) - -(*Determine print names of goal parameters (reversed)*) -fun innermost_params i st = (fn goal => - Term.rename_wrt_term goal (Logic.strip_params goal)) (Logic.get_goal (Thm.prop_of st) i); -(*params of subgoal i as they are printed*) -fun params_of_state st i = rev (innermost_params i st); -fun cterm_fun f ct = Thm.global_cterm_of (Thm.theory_of_cterm ct) (f (Thm.term_of ct)); -(********* - -(*read instantiations with respect to subgoal i of proof state st*) - fun read_insts_in_state (st, i, sinsts, rule) = - let val thy = Thm.theory_of_thm st - and params = params_of_state st i - and rts = Drule.types_sorts rule and (types,sorts) = Drule.types_sorts st - fun types'(a, ~1) = (case AList.lookup (op =) params a of NONE => types (a, ~1) | sm => sm) - | types' ixn = types ixn; - val used = Drule.add_used rule (Drule.add_used st []); - in read_insts thy rts (types',sorts) used sinsts end; - -*************) - - - -(* copied code from Isabelle/src/Pure/tactic.ML, - but modified. (deletion of its first line - and expansion of the parameters) ... (version 2005) - >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> - *) - -fun term_lift_inst_rule ctxt (st, i, Tinsts, insts, rule) = -let (*val {maxidx,...} = rep_thm st*) - val maxidx = Thm.maxidx_of st - and params = params_of_state st i - val paramTs = map #2 params - and inc = maxidx+1 - fun ctyp_fun f cT = Thm.ctyp_of ctxt (f (Thm.typ_of cT)); - fun liftvar (Var ((a,j), T)) = Var((a, j+inc), paramTs---> Logic.incr_tvar inc T) - | liftvar t = raise TERM("Variable expected", [t]); - fun liftterm t - = fold_rev absfree params (Logic.incr_indexes([],paramTs,inc) t) - (*Lifts instantiation pair over params*) - (*fun liftpair (cv,ct) = (cterm_fun liftvar cv, cterm_fun liftterm ct)*) - - val to_var_index = (fn Var(s,t) => (s,t)) o Thm.term_of - val to_tvar_index = (fn TVar(s,t) => (s,t)) o Thm.typ_of - - fun liftpair (cv,ct) = ((to_var_index o (cterm_fun liftvar)) cv, - cterm_fun liftterm ct) - fun lifttvar (c,tt) = ((to_tvar_index o ctyp_fun (Logic.incr_tvar inc)) c, - ctyp_fun (Logic.incr_tvar inc) tt) -in Drule.instantiate_normalize (map lifttvar Tinsts, map liftpair insts) - (Thm.lift_rule (Thm.cprem_of st i) rule) -end; - - -(* copied code from Isabelle/src/Pure/tactic.ML, (gen_compose_inst_tac) - but modified. (definition unfolding, exchange of lifting function, - adoption of parameters) ... (version 2005) - >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> - *) - -fun compose_terminst_tac ctxt Tinsts insts (bires_flg, rule, nsubgoal) i st = - if i > nprems_of st then no_tac st - else st |> - (compose_tac ctxt - (bires_flg, term_lift_inst_rule ctxt (st, i, Tinsts, insts, rule), nsubgoal) - i - handle TERM (msg,_) => (warning msg; no_tac) - | THM (msg,_,_) => (warning msg; no_tac)); - - -(*"Resolve" version. Note: res_inst_tac cannot behave sensibly if the - terms that are substituted contain (term or type) unknowns from the - goal, because it is unable to instantiate goal unknowns at the same time. - - The type checker is instructed not to freeze flexible type vars that - were introduced during type inference and still remain in the term at the - end. This increases flexibility but can introduce schematic type vars in - goals. -*) - -(* copied code from Isabelle/src/Pure/tactic.ML, (res_inst_tac etc.) - but modified. (definition unfolding, exchange of lifting function, - adoption of parameters) ... (version 2005) - >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> - *) - -fun res_terminst_tac ctxt Tinsts insts rule i = - compose_terminst_tac ctxt Tinsts insts (false, rule, nprems_of rule) i; - -(*eresolve elimination version*) -fun eres_terminst_tac ctxt Tinsts insts rule i = - compose_terminst_tac ctxt Tinsts insts (true, rule, nprems_of rule) i; - - -(*For forw_inst_tac and dres_inst_tac. Preserve Var indexes of rl; - increment revcut_rl instead. *) -(* COPIED FROM TACTIC STRUCTURE. SUPERFLUOUS HERE IF IT WOULD BE EXPORTED !!! *) - -fun make_elim_preserve rl = Rule_Insts.make_elim_preserve rl - -(* -fun make_elim_preserve rl = - let val {maxidx,...} = rep_thm rl - val thy = Thm.theory_of_thm rl - fun cvar ixn = cterm_of (thy) (Var(ixn,propT)); - val revcut_rl' = - Drule.instantiate_normalize ([], [(cvar("V",0), cvar("V",maxidx+1)), - (cvar("W",0), cvar("W",maxidx+1))]) revcut_rl - val arg = (false, rl, nprems_of rl) - val [th] = Seq.list_of (Thm.bicompose false arg 1 revcut_rl') - in th end - handle Bind => raise THM("make_elim_preserve", 1, [rl]); -*) - -(*instantiate and cut -- for a FACT, anyway...*) -fun cut_terminst_tac ctxt Tinsts insts rule = res_terminst_tac ctxt Tinsts insts (make_elim_preserve ctxt rule); - -(*forward tactic applies a RULE to an assumption without deleting it*) -fun forw_terminst_tac ctxt Tinsts insts rule = cut_terminst_tac ctxt Tinsts insts rule THEN' assume_tac ctxt; - -(*dresolve tactic applies a RULE to replace an assumption*) -fun dres_terminst_tac ctxt Tinsts insts rule = eres_terminst_tac ctxt Tinsts insts (make_elim_preserve ctxt rule); - -(* conversions to handle depricated versions of this module : - >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> *) - -fun convert_tinsts Tinsts thy = map (fn(x,y) => (Thm.ctyp_of thy (TVar x), Thm.ctyp_of thy y)) Tinsts; -fun convert_substs Subst thy = map (fn(x,y) => (Thm.cterm_of thy (Var x), Thm.cterm_of thy y)) Subst; - -(* Of course, some code duplication can be can be avoided by introducing - higher-order variants. *) - - -fun subgoal_terminst_tac ctxt insts sprop goal st = - (DETERM o (res_terminst_tac ctxt insts) - (convert_substs [((("psi",0),propT), sprop)] ctxt) cut_rl THEN' - SUBGOAL (fn (prop, _) => - let val concl' = Logic.strip_assums_concl prop in - if null (Term.add_tvars concl' []) then () - else warning"Type variables in new subgoal: add a type constraint?"; - all_tac - end)) goal st; -end; - -*} - - - -ML{* Term_Tactics.subgoal_terminst_tac *} -end diff --git a/src/test-gen/src/main/clocks.ML b/src/test-gen/src/main/clocks.ML deleted file mode 100644 index 0c506cd..0000000 --- a/src/test-gen/src/main/clocks.ML +++ /dev/null @@ -1,352 +0,0 @@ - -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * clocks.ML --- time measurements - * This file is part of HOL-TestGen. - * - * Copyright (c) 2010 University Paris-Sud, France - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) - -signature CLOCKENV = -sig - type clockenv - - val mt_clockenv : clockenv - val merge_clockenv : clockenv * clockenv -> clockenv - - val text_stats : clockenv -> string - val latex_stats : clockenv -> string - - val start_clock : string -> clockenv -> clockenv - val stop_clock : string -> clockenv -> clockenv - val next_clock : clockenv -> clockenv - - val rename_clock : string -> string -> clockenv -> clockenv -end - -structure ClockEnv : CLOCKENV = -struct - -structure IDtab = - Table(type key = (string * int) list val ord = list_ord (prod_ord fast_string_ord int_ord)); - -val next_time_id = Unsynchronized.ref 0 - -fun add_time tab key time = let - val fresh_id = !next_time_id - val _ = next_time_id := !next_time_id + 1 -in - IDtab.map_default (key, Inttab.empty) (fn stats => Inttab.update_new (fresh_id, time) stats) tab -end - -fun sum_times tab = let - fun fold_tab stats = Inttab.fold (fn (_,time) => fn sum => time + sum) stats Time.zeroTime - val tab' = IDtab.map (K fold_tab) tab -in - IDtab.dest tab' -end - -type clockinfo = { - timer_stack : Timer.real_timer list, - id_stack : (string * int) list, - timetab : Time.time Inttab.table IDtab.table, - error_occurred : bool -} - -datatype clockenv = Clockenv of clockinfo - -fun rep_clockenv (Clockenv X) = X; -fun mk_clockenv (X:clockinfo)= Clockenv X - -val mt_clockenv = Clockenv{timer_stack = [], - id_stack = [], - timetab = IDtab.empty, - error_occurred = false}; - -fun merge_clockenv - (Clockenv{timer_stack = ts, - id_stack = ids, - timetab = tt, - error_occurred = e}, - Clockenv{timer_stack = ts', - id_stack = ids', - timetab = tt', - error_occurred = e'}) = let - - fun merge_stats tab1 tab2 = Inttab.join (fn time_id => fn (x, y) => x) (tab1, tab2) - (* here we always have x = y for the same time_id *) - -in - Clockenv{timer_stack = [], - id_stack = [], - timetab = IDtab.join (fn key => fn (x, y) => merge_stats x y) (tt, tt'), - error_occurred = e orelse e'} -end - -fun clean str = String.translate (fn #" " => "_" | ch => String.str ch) str - -fun string_of_id (name,n) = - if n = 0 then - clean name - else - (clean name) ^ "_" ^ (Int.toString n) - -fun text_stats' (Clockenv{timetab = tt,...}) = let - fun string_of_ids ids = String.concatWith "/" (map string_of_id (rev ids)) - val maxlen = List.foldl Int.max 0 (map (size o string_of_ids) (IDtab.keys tt)) - val string_of_ids' = (StringCvt.padRight #" " maxlen) o string_of_ids - fun string_of_entry (ids, time) = "Total time spent in " ^ (string_of_ids' ids) ^": " ^ (Time.toString time) -in - String.concatWith "\n" (map string_of_entry (sum_times tt)) -end - -fun latex_tab entries = let - fun string_of_ids ids = "\\protect\\path{" ^ (String.concatWith "/" (map string_of_id (rev ids))) ^ "}" - fun string_of_entry (ids, time) = (string_of_ids ids) ^ " & " ^ (Time.toString time) ^ "\\\\ \n" - val inner = String.concat (map string_of_entry entries) - val headers = "Location & Time\\\\ \\hline\n" -in - "\\begin{tabular}{l|r}\n" ^ headers ^ inner ^ "\\end{tabular}\n" -end - -fun latex_stats' (Clockenv{timetab = tt,...}) = let - val entries = sum_times tt - fun toplevel_name entry = (fst o List.last o fst) entry - val toplevel_names = distinct (op =) (map toplevel_name entries) - fun has_name name entry = (toplevel_name entry) = name - val sorted_entries = map (fn name => (name, filter (has_name name) entries)) toplevel_names - fun latex_unit (name, entries) = "\\begin{table}\n\\centering"^ (latex_tab entries) - ^"\\caption{Time consumed by \\protect\\path{" ^ (clean name) ^ "}" - ^"\\label{tab:" ^ (clean name) ^ "}}\n\\end{table}\n%%%\n" - -in - (String.concatWith "\n%%%\n" (map latex_unit sorted_entries))^"\n" -end - -fun check_error f clockenv = - if #error_occurred (rep_clockenv clockenv) then - "An error occurred during profiling." - else - f clockenv - -val text_stats = check_error text_stats' -val latex_stats = check_error latex_stats' - -fun start_id (Clockenv{timer_stack = ts, id_stack = ids, timetab= tt, error_occurred = e}) newid = - Clockenv{timer_stack = (Timer.startRealTimer ()) :: ts, - id_stack = newid :: ids, - timetab = tt, - error_occurred = e}; - -fun stop_id (Clockenv{timer_stack = ts, id_stack = ids, timetab= tt, error_occurred = e}) = let - val (timer::timers) = ts - val elapsed = Timer.checkRealTimer timer - val (id::remaining_ids) = ids -in - Clockenv{timer_stack = timers, - id_stack = remaining_ids, - timetab = add_time tt ids elapsed, - error_occurred = e} -end - -fun rewrite_list find replace x = let - val r = rev x - val prefix = List.take (r, Int.min(length find, length r)) -in - if prefix = find then - rev (replace @ (List.drop (r, length find))) - else - x -end - -fun rename_id str (Clockenv{timer_stack = ts, id_stack = ids, timetab= tt, error_occurred = e}) = let - val ((name, n)::remaining_ids) = ids - val new_ids = (str,n)::remaining_ids - fun rewrite_entry (key, value) = (rewrite_list ids new_ids key, value) -in - Clockenv{timer_stack = ts, - id_stack = new_ids, - timetab = IDtab.make (map rewrite_entry (IDtab.dest tt)), - error_occurred = e} -end - - -fun add_error (Clockenv{timer_stack = ts, id_stack = ids, timetab= tt, error_occurred = e}) = - Clockenv{timer_stack = ts, - id_stack = ids, - timetab = tt, - error_occurred = true}; - -fun start_clock clockname clockenv = start_id clockenv (clockname, 0) - -fun stop_clock clockname clockenv = - if null (#timer_stack (rep_clockenv clockenv)) then - add_error clockenv - else let - val ((idname,_)::ids) = #id_stack (rep_clockenv clockenv) - in - if idname = clockname then - stop_id clockenv - else - add_error clockenv - end - -fun next_clock clockenv = - if null (#timer_stack (rep_clockenv clockenv)) then - add_error clockenv - else let - val ((clockname,n)::ids) = #id_stack (rep_clockenv clockenv) - in - start_id (stop_id clockenv)(clockname,n+1) - end - -fun rename_clock oldname newname clockenv = - if null (#timer_stack (rep_clockenv clockenv)) then - add_error clockenv - else let - val ((idname,_)::ids) = #id_stack (rep_clockenv clockenv) - in - if idname = oldname then - rename_id newname clockenv - else - add_error clockenv - end - -end; - -structure Clocks_DataManagement = Generic_Data -( - type T = ClockEnv.clockenv - val empty = ClockEnv.mt_clockenv - fun extend T = T - val merge = ClockEnv.merge_clockenv -); - -(* FIXME ignored - fun print sg ce = - (writeln "Runtime statistics:"; - writeln (ClockEnv.text_stats ce)); -*) - -signature CLOCKS = -sig - val init_clocks : theory -> unit - val flush_clocks : theory -> theory - - val start_clock : string -> unit - val stop_clock : string -> unit - val next_clock : unit -> unit - - val start_clock_tac : string -> tactic - val stop_clock_tac : string -> tactic - val next_clock_tac : unit -> tactic - - val string_of_clocks: theory -> string - val write_clocks : theory -> string -> unit - - val rename_clock : string -> string -> unit -end - -structure Clocks : CLOCKS = -struct - -val env_ref = Unsynchronized.ref ClockEnv.mt_clockenv - -fun init_clocks thy = env_ref := Clocks_DataManagement.get(Context.Theory thy) - -fun flush_clocks thy = Context.theory_of (Clocks_DataManagement.put (!env_ref) (Context.Theory thy)) - -fun start_clock name = env_ref := ClockEnv.start_clock name (!env_ref) - -fun stop_clock name = env_ref := ClockEnv.stop_clock name (!env_ref) - -fun next_clock () = env_ref := ClockEnv.next_clock (!env_ref) - -fun start_clock_tac clockname thm = (start_clock clockname; all_tac thm) - -fun stop_clock_tac clockname thm = (stop_clock clockname; all_tac thm) - -fun next_clock_tac () thm = (next_clock (); all_tac thm) - -fun rename_clock oldname newname = env_ref := ClockEnv.rename_clock oldname newname (!env_ref) - -fun write_clocks thy fname = let - val _ = init_clocks thy - val to_write = ClockEnv.latex_stats (!env_ref) - val _ = File.write (Path.explode fname) to_write; -in - () -end - -fun string_of_clocks thy = (init_clocks thy; ClockEnv.text_stats (!env_ref)) - -fun start_clock_command clockname thy = let - val result = Context.theory_of (Clocks_DataManagement.map (ClockEnv.start_clock clockname) (Context.Theory thy)) -in - (init_clocks result; result) -end - -val _ = Outer_Syntax.command @{command_spec "start_clock"} "starts a clock for measuring runtime" - (Parse.string >> (Toplevel.theory o start_clock_command)); - -fun stop_clock_command clockname thy = let - val result = Context.theory_of (Clocks_DataManagement.map (ClockEnv.stop_clock clockname) (Context.Theory thy)) -in - (init_clocks result; result) -end - -val _ = Outer_Syntax.command @{command_spec "stop_clock"} "stops a clock for measuring runtime" - (Parse.string >> (Toplevel.theory o stop_clock_command)); - -fun next_clock_command thy = let - val result = Context.theory_of (Clocks_DataManagement.map ClockEnv.next_clock (Context.Theory thy)) -in - (init_clocks result; result) -end - -val _ = Outer_Syntax.command @{command_spec "next_clock"} "increments the ID of the current clock" - (Scan.succeed (Toplevel.theory next_clock_command)); - -fun print_clocks_command thy = (writeln (string_of_clocks thy); init_clocks thy; thy) - -val _ = Outer_Syntax.command @{command_spec "print_clocks"} "print runtime statistics" - (Scan.succeed (Toplevel.theory print_clocks_command)); - -fun write_clocks_command fname thy = (write_clocks thy fname; init_clocks thy; thy) - -val _ = Outer_Syntax.command @{command_spec "write_clocks"} "write a table with the total runtimes measured to a file" - (Parse.string >> (Toplevel.theory o write_clocks_command)); - -end; diff --git a/src/test-gen/src/main/clocks.thy b/src/test-gen/src/main/clocks.thy deleted file mode 100644 index 7a61495..0000000 --- a/src/test-gen/src/main/clocks.thy +++ /dev/null @@ -1,370 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * clocks.ML --- time measurements - * This file is part of HOL-TestGen. - * - * Copyright (c) 2010 University Paris-Sud, France - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) - -theory clocks -imports Main -keywords "start_clock" "stop_clock" "next_clock" "print_clocks" "write_clocks" :: thy_decl -begin - - -ML{* -signature CLOCKENV = -sig - type clockenv - - val mt_clockenv : clockenv - val merge_clockenv : clockenv * clockenv -> clockenv - - val text_stats : clockenv -> string - val latex_stats : clockenv -> string - - val start_clock : string -> clockenv -> clockenv - val stop_clock : string -> clockenv -> clockenv - val next_clock : clockenv -> clockenv - - val rename_clock : string -> string -> clockenv -> clockenv -end -*} - - -ML{* - -structure ClockEnv : CLOCKENV = -struct - -structure IDtab = - Table(type key = (string * int) list val ord = list_ord (prod_ord fast_string_ord int_ord)); - -val next_time_id = Unsynchronized.ref 0 - -fun add_time tab key time = let - val fresh_id = !next_time_id - val _ = next_time_id := !next_time_id + 1 -in - IDtab.map_default (key, Inttab.empty) (fn stats => Inttab.update_new (fresh_id, time) stats) tab -end - -fun sum_times tab = let - fun fold_tab stats = Inttab.fold (fn (_,time) => fn sum => time + sum) stats Time.zeroTime - val tab' = IDtab.map (K fold_tab) tab -in - IDtab.dest tab' -end - -type clockinfo = { - timer_stack : Timer.real_timer list, - id_stack : (string * int) list, - timetab : Time.time Inttab.table IDtab.table, - error_occurred : bool -} - -datatype clockenv = Clockenv of clockinfo - -fun rep_clockenv (Clockenv X) = X; -fun mk_clockenv (X:clockinfo)= Clockenv X - -val mt_clockenv = Clockenv{timer_stack = [], - id_stack = [], - timetab = IDtab.empty, - error_occurred = false}; - -fun merge_clockenv - (Clockenv{timer_stack = ts, - id_stack = ids, - timetab = tt, - error_occurred = e}, - Clockenv{timer_stack = ts', - id_stack = ids', - timetab = tt', - error_occurred = e'}) = let - - fun merge_stats tab1 tab2 = Inttab.join (fn time_id => fn (x, y) => x) (tab1, tab2) - (* here we always have x = y for the same time_id *) - -in - Clockenv{timer_stack = [], - id_stack = [], - timetab = IDtab.join (fn key => fn (x, y) => merge_stats x y) (tt, tt'), - error_occurred = e orelse e'} -end - -fun clean str = String.translate (fn #" " => "_" | ch => String.str ch) str - -fun string_of_id (name,n) = - if n = 0 then - clean name - else - (clean name) ^ "_" ^ (Int.toString n) - -fun text_stats' (Clockenv{timetab = tt,...}) = let - fun string_of_ids ids = String.concatWith "/" (map string_of_id (rev ids)) - val maxlen = List.foldl Int.max 0 (map (size o string_of_ids) (IDtab.keys tt)) - val string_of_ids' = (StringCvt.padRight #" " maxlen) o string_of_ids - fun string_of_entry (ids, time) = "Total time spent in " ^ (string_of_ids' ids) ^": " ^ (Time.toString time) -in - String.concatWith "\n" (map string_of_entry (sum_times tt)) -end - -fun latex_tab entries = let - fun string_of_ids ids = "\\protect\\path{" ^ (String.concatWith "/" (map string_of_id (rev ids))) ^ "}" - fun string_of_entry (ids, time) = (string_of_ids ids) ^ " & " ^ (Time.toString time) ^ "\\\\ \n" - val inner = String.concat (map string_of_entry entries) - val headers = "Location & Time\\\\ \\hline\n" -in - "\\begin{tabular}{l|r}\n" ^ headers ^ inner ^ "\\end{tabular}\n" -end - -fun latex_stats' (Clockenv{timetab = tt,...}) = let - val entries = sum_times tt - fun toplevel_name entry = (fst o List.last o fst) entry - val toplevel_names = distinct (op =) (map toplevel_name entries) - fun has_name name entry = (toplevel_name entry) = name - val sorted_entries = map (fn name => (name, filter (has_name name) entries)) toplevel_names - fun latex_unit (name, entries) = "\\begin{table}\n\\centering"^ (latex_tab entries) - ^"\\caption{Time consumed by \\protect\\path{" ^ (clean name) ^ "}" - ^"\\label{tab:" ^ (clean name) ^ "}}\n\\end{table}\n%%%\n" - -in - (String.concatWith "\n%%%\n" (map latex_unit sorted_entries))^"\n" -end - -fun check_error f clockenv = - if #error_occurred (rep_clockenv clockenv) then - "An error occurred during profiling." - else - f clockenv - -val text_stats = check_error text_stats' -val latex_stats = check_error latex_stats' - -fun start_id (Clockenv{timer_stack = ts, id_stack = ids, timetab= tt, error_occurred = e}) newid = - Clockenv{timer_stack = (Timer.startRealTimer ()) :: ts, - id_stack = newid :: ids, - timetab = tt, - error_occurred = e}; - -fun stop_id (Clockenv{timer_stack = ts, id_stack = ids, timetab= tt, error_occurred = e}) = let - val (timer::timers) = ts - val elapsed = Timer.checkRealTimer timer - val (id::remaining_ids) = ids -in - Clockenv{timer_stack = timers, - id_stack = remaining_ids, - timetab = add_time tt ids elapsed, - error_occurred = e} -end - -fun rewrite_list find replace x = let - val r = rev x - val prefix = List.take (r, Int.min(length find, length r)) -in - if prefix = find then - rev (replace @ (List.drop (r, length find))) - else - x -end - -fun rename_id str (Clockenv{timer_stack = ts, id_stack = ids, timetab= tt, error_occurred = e}) = let - val ((name, n)::remaining_ids) = ids - val new_ids = (str,n)::remaining_ids - fun rewrite_entry (key, value) = (rewrite_list ids new_ids key, value) -in - Clockenv{timer_stack = ts, - id_stack = new_ids, - timetab = IDtab.make (map rewrite_entry (IDtab.dest tt)), - error_occurred = e} -end - - -fun add_error (Clockenv{timer_stack = ts, id_stack = ids, timetab= tt, error_occurred = e}) = - Clockenv{timer_stack = ts, - id_stack = ids, - timetab = tt, - error_occurred = true}; - -fun start_clock clockname clockenv = start_id clockenv (clockname, 0) - -fun stop_clock clockname clockenv = - if null (#timer_stack (rep_clockenv clockenv)) then - add_error clockenv - else let - val ((idname,_)::ids) = #id_stack (rep_clockenv clockenv) - in - if idname = clockname then - stop_id clockenv - else - add_error clockenv - end - -fun next_clock clockenv = - if null (#timer_stack (rep_clockenv clockenv)) then - add_error clockenv - else let - val ((clockname,n)::ids) = #id_stack (rep_clockenv clockenv) - in - start_id (stop_id clockenv)(clockname,n+1) - end - -fun rename_clock oldname newname clockenv = - if null (#timer_stack (rep_clockenv clockenv)) then - add_error clockenv - else let - val ((idname,_)::ids) = #id_stack (rep_clockenv clockenv) - in - if idname = oldname then - rename_id newname clockenv - else - add_error clockenv - end - -end; -*} - - -ML{* -structure Clocks_DataManagement = Generic_Data -( - type T = ClockEnv.clockenv - val empty = ClockEnv.mt_clockenv - fun extend T = T - val merge = ClockEnv.merge_clockenv -); - -(* FIXME ignored - fun print sg ce = - (writeln "Runtime statistics:"; - writeln (ClockEnv.text_stats ce)); -*) - -signature CLOCKS = -sig - val init_clocks : theory -> unit - val flush_clocks : theory -> theory - - val start_clock : string -> unit - val stop_clock : string -> unit - val next_clock : unit -> unit - - val start_clock_tac : string -> tactic - val stop_clock_tac : string -> tactic - val next_clock_tac : unit -> tactic - - val string_of_clocks: theory -> string - val write_clocks : theory -> string -> unit - - val rename_clock : string -> string -> unit -end -*} - -ML{* -structure Clocks : CLOCKS = -struct - -val env_ref = Unsynchronized.ref ClockEnv.mt_clockenv - -fun init_clocks thy = env_ref := Clocks_DataManagement.get(Context.Theory thy) - -fun flush_clocks thy = Context.theory_of (Clocks_DataManagement.put (!env_ref) (Context.Theory thy)) - -fun start_clock name = env_ref := ClockEnv.start_clock name (!env_ref) - -fun stop_clock name = env_ref := ClockEnv.stop_clock name (!env_ref) - -fun next_clock () = env_ref := ClockEnv.next_clock (!env_ref) - -fun start_clock_tac clockname thm = (start_clock clockname; all_tac thm) - -fun stop_clock_tac clockname thm = (stop_clock clockname; all_tac thm) - -fun next_clock_tac () thm = (next_clock (); all_tac thm) - -fun rename_clock oldname newname = env_ref := ClockEnv.rename_clock oldname newname (!env_ref) - -fun write_clocks thy fname = let - val _ = init_clocks thy - val to_write = ClockEnv.latex_stats (!env_ref) - val _ = File.write (Path.explode fname) to_write; -in - () -end - -fun string_of_clocks thy = (init_clocks thy; ClockEnv.text_stats (!env_ref)) - -fun start_clock_command clockname thy = let - val result = Context.theory_of (Clocks_DataManagement.map (ClockEnv.start_clock clockname) (Context.Theory thy)) -in - (init_clocks result; result) -end - -val _ = Outer_Syntax.command @{command_keyword start_clock} "starts a clock for measuring runtime" - (Parse.string >> (Toplevel.theory o start_clock_command)); - -fun stop_clock_command clockname thy = let - val result = Context.theory_of (Clocks_DataManagement.map (ClockEnv.stop_clock clockname) (Context.Theory thy)) -in - (init_clocks result; result) -end - -val _ = Outer_Syntax.command @{command_keyword stop_clock} "stops a clock for measuring runtime" - (Parse.string >> (Toplevel.theory o stop_clock_command)); - -fun next_clock_command thy = let - val result = Context.theory_of (Clocks_DataManagement.map ClockEnv.next_clock (Context.Theory thy)) -in - (init_clocks result; result) -end - -val _ = Outer_Syntax.command @{command_keyword next_clock} "increments the ID of the current clock" - (Scan.succeed (Toplevel.theory next_clock_command)); - -fun print_clocks_command thy = (writeln (string_of_clocks thy); init_clocks thy; thy) - -val _ = Outer_Syntax.command @{command_keyword print_clocks} "print runtime statistics" - (Scan.succeed (Toplevel.theory print_clocks_command)); - -fun write_clocks_command fname thy = (write_clocks thy fname; init_clocks thy; thy) - -val _ = Outer_Syntax.command @{command_keyword write_clocks} "write a table with the total runtimes measured to a file" - (Parse.string >> (Toplevel.theory o write_clocks_command)); - -end; -*} - -end diff --git a/src/test-gen/src/main/codegen_C_pthread/Code_C_pthread.thy b/src/test-gen/src/main/codegen_C_pthread/Code_C_pthread.thy deleted file mode 100644 index 5d2bb3c..0000000 --- a/src/test-gen/src/main/codegen_C_pthread/Code_C_pthread.thy +++ /dev/null @@ -1,86 +0,0 @@ -theory Code_C_pthread -imports Main "../TestLib" -keywords "gen_C_pthread" :: "qed_global" -begin - -subsection {*C pthread Header term*} - -ML {* - -val next_line = @{term "''~''"}; - -val stdio_term = @{term "''#include ''"}; -val stdlib_term = @{term "''#include ''"}; -val pthread_term = @{term "''#include ''"} - -val C_pthread_header = stdio_term $ stdlib_term $ pthread_term; - -*} - -subsection {*C instructions term*} -(*A C instruction can be a variable declaration or a call to another existing function or - affectation or conditional or a loop*) - -subsection {*C functions term*} - -ML {* -val next_instr = @{term "'';''"}; -val open_bracket = @{term "''{''"}; -val close_bracket = @{term "''}''"}; -val next_arg = @{term "'',''"} -val open_par = @{term "''(''"}; -val close_par = @{term "'')''"}; - - -fun C_function_header fun_type fun_name fun_args = fun_type $ fun_name $ fun_args; - -fun discharge_intrs [] = @{term"''/**/''"} -| discharge_intrs [C_instr] = C_instr $ next_instr $ next_line -| discharge_intrs (C_instr::C_instrs) = C_instr $ next_instr $ next_line $ - discharge_intrs C_instrs; - -fun discharge_args [] = @{term"''/**/''"} -| discharge_args [C_arg] = C_arg $ next_arg -| discharge_args (C_arg::C_args) = C_arg $ next_arg $ - discharge_args C_args; - -fun C_function fun_type fun_name fun_args C_instrs = - C_function_header fun_type fun_name (open_par $ discharge_args fun_args $ close_par) $ next_line $ - open_bracket $ next_line $ - discharge_intrs C_instrs $ next_line $ - close_bracket $ next_line ; - -fun C_void_function fun_name fun_args C_instrs = - C_function @{term"''void''"} fun_name fun_args C_instrs; - -fun C_int_function fun_name fun_args C_instrs = - C_function @{term"''int''"} fun_name fun_args C_instrs; - -fun C_string_function fun_name fun_args C_instrs = - C_function @{term"''string''"} fun_name fun_args C_instrs; - -*} - - -subsection {*C File term*} - -ML {* - -fun discharge_funs [] = @{term"''/**/''"} -| discharge_funs [C_fun] = C_fun $ next_line -| discharge_funs (C_fun::C_funs) = C_fun $ next_line $ - discharge_funs C_funs; - -fun C_file C_header C_funs = C_header $ discharge_funs C_funs ; - -*} - -subsection {*Jump to the next line*} - -ML {* fun replace_next_line nil = [] - | replace_next_line (x::xs) = (if x = #"~" - then replace_next_line (#"\n"::xs) - else x::replace_next_line xs); - *} - -end diff --git a/src/test-gen/src/main/codegen_fsharp/Code_Char_Fsharp.thy b/src/test-gen/src/main/codegen_fsharp/Code_Char_Fsharp.thy deleted file mode 100644 index 2b33d07..0000000 --- a/src/test-gen/src/main/codegen_fsharp/Code_Char_Fsharp.thy +++ /dev/null @@ -1,103 +0,0 @@ -(* Title: HOL/Library/Code_Char.thy - Author: Florian Haftmann -*) - -chapter {* Code generation of pretty characters (and strings) *} - -theory Code_Char_Fsharp -imports Main (* Char_ord *) -begin - -code_printing - type_constructor char \ - (SML) "char" - and (OCaml) "char" - and (Haskell) "Prelude.Char" - and (Scala) "Char" - -setup {* - fold String_Code.add_literal_char ["SML", "OCaml", "Haskell", "Scala"] - #> String_Code.add_literal_list_string "Haskell" -*} - -code_printing - class_instance char :: equal \ - (Haskell) - -| constant "HOL.equal :: char \ char \ bool" \ - (SML) "!((_ : char) = _)" - and (OCaml) "!((_ : char) = _)" - and (Haskell) infix 4 "==" - and (Scala) infixl 5 "==" -| constant "Code_Evaluation.term_of :: char \ term" \ - (Eval) "HOLogic.mk'_char/ (IntInf.fromInt/ (Char.ord/ _))" - -code_reserved SML - char - -code_reserved OCaml - char - -code_reserved Scala - char - -definition implode :: "string \ String.literal" where - "implode = STR" - -code_reserved SML String - -code_printing - constant String.implode \ - (SML) "String.implode" - and (OCaml) "!(let l = _ in let res = String.create (List.length l) in let rec imp i = function | [] -> res | c :: l -> String.set res i c; imp (i + 1) l in imp 0 l)" - and (Haskell) "_" - and (Scala) "!(\"\" ++/ _)" -| constant String.explode \ - (SML) "String.explode" - and (OCaml) "!(let s = _ in let rec exp i l = if i < 0 then l else exp (i - 1) (String.get s i :: l) in exp (String.length s - 1) [])" - and (Haskell) "_" - and (Scala) "!(_.toList)" - - -definition integer_of_char :: "char \ integer" -where - "integer_of_char = integer_of_nat o nat_of_char" - -definition char_of_integer :: "integer \ char" -where - "char_of_integer = char_of_nat \ nat_of_integer" - -lemma [code]: - "nat_of_char = nat_of_integer o integer_of_char" - by (simp add: integer_of_char_def fun_eq_iff) - -lemma [code]: - "char_of_nat = char_of_integer o integer_of_nat" - by (simp add: char_of_integer_def fun_eq_iff) - - (* -code_printing - constant integer_of_char \ - (SML) "!(IntInf.fromInt o Char.ord)" - and (OCaml) "Big'_int.big'_int'_of'_int (Char.code _)" - and (Haskell) "Prelude.toInteger (Prelude.fromEnum (_ :: Prelude.Char))" - and (Scala) "BigInt(_.toInt)" -| constant char_of_integer \ - (SML) "!(Char.chr o IntInf.toInt)" - and (OCaml) "Char.chr (Big'_int.int'_of'_big'_int _)" - and (Haskell) "!(let chr k | (0 <= k && k < 256) = Prelude.toEnum k :: Prelude.Char in chr . Prelude.fromInteger)" - and (Scala) "!((k: BigInt) => if (BigInt(0) <= k && k < BigInt(256)) k.charValue else error(\"character value out of range\"))" -| constant "Orderings.less_eq :: char \ char \ bool" \ - (SML) "!((_ : char) <= _)" - and (OCaml) "!((_ : char) <= _)" - and (Haskell) infix 4 "<=" - and (Scala) infixl 4 "<=" - and (Eval) infixl 6 "<=" -| constant "Orderings.less :: char \ char \ bool" \ - (SML) "!((_ : char) < _)" - and (OCaml) "!((_ : char) < _)" - and (Haskell) infix 4 "<" - and (Scala) infixl 4 "<" - and (Eval) infixl 6 "<" -*) -end - diff --git a/src/test-gen/src/main/codegen_fsharp/Code_Char_chr_Fsharp.thy b/src/test-gen/src/main/codegen_fsharp/Code_Char_chr_Fsharp.thy deleted file mode 100644 index 74271fd..0000000 --- a/src/test-gen/src/main/codegen_fsharp/Code_Char_chr_Fsharp.thy +++ /dev/null @@ -1,83 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * Code_Char_chr_Fsharp.thy --- Isar setup for HOL-TestGen - * This file is part of HOL-TestGen. - * - * Copyright (c) 2010-2012 ETH Zurich, Switzerland - * 2010-2013 Achim D. Brucker, Germany - * 2016 The University of Sheffield, UK - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id: Code_Char_chr_Fsharp.thy 12648 2016-06-17 09:26:51Z brucker $ *) - -theory Code_Char_chr_Fsharp -imports -(* "~~/src/HOL/Library/Char_ord" *) - Code_Char_Fsharp - code_fsharp -begin - -definition - "int_of_char = int o nat_of_char" - -lemma [code]: - "nat_of_char = nat o int_of_char" - unfolding int_of_char_def by (simp add: fun_eq_iff) - -definition - "char_of_int = char_of_nat o nat" - -lemma [code]: - "char_of_nat = char_of_int o int" - unfolding char_of_int_def by (simp add: fun_eq_iff) - - -code_printing - constant "Unity" \ - (Fsharp) "()" - -code_printing - constant int_of_char \ - (SML) "!(IntInf.fromInt o Char.ord)" and - (OCaml) "Big'_int.big'_int'_of'_int (Char.code _)" and - (Fsharp) "Big'_int.big'_int'_of'_int (Char.code _)" and - (Haskell) "toInteger (fromEnum (_ :: Char))" and - (Scala) "BigInt(_.toInt)" -| constant char_of_int \ - (SML) "!(Char.chr o IntInf.toInt)" and - (OCaml) "Char.chr (Big'_int.int'_of'_big'_int _)" and - (Fsharp) "Char.chr (Big'_int.int'_of'_big'_int _)" and - (Haskell) "!(let chr k | (0 <= k && k < 256) = toEnum k :: Char in chr . fromInteger)" and - (Scala) "!((k: BigInt) => if (BigInt(0) <= k && k < BigInt(256)) k.charValue else error(\"character value out of range\"))" - -end diff --git a/src/test-gen/src/main/codegen_fsharp/Code_Integer_Fsharp.thy b/src/test-gen/src/main/codegen_fsharp/Code_Integer_Fsharp.thy deleted file mode 100644 index 1ce7463..0000000 --- a/src/test-gen/src/main/codegen_fsharp/Code_Integer_Fsharp.thy +++ /dev/null @@ -1,101 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * Code_Integer_Fsharp.thy --- - * This file is part of HOL-TestGen. - * - * Copyright (c) 2010-2012 ETH Zurich, Switzerland - * 2010-2012 Achim D. Brucker, Germany - * 2016 The University of Sheffield, UK - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id: Code_Integer_Fsharp.thy 12803 2016-09-01 08:38:47Z brucker $ *) - - -theory Code_Integer_Fsharp -imports - code_fsharp -begin -text {* - Representation-ignorant code equations for conversions. -*} - -text {* - HOL numeral expressions are mapped to integer literals - in target languages, using predefined target language - operations for abstract integer operations. -*} - -code_printing - type_constructor integer \ - (Fsharp) "int" - -code_printing - constant "0::integer" \ - (Fsharp) "0" - -setup \ - fold (fn target => - Numeral.add_code @{const_name Code_Numeral.Pos} I Code_Printer.literal_numeral target - #> Numeral.add_code @{const_name Code_Numeral.Neg} (op ~) Code_Printer.literal_numeral target) - ["SML", "OCaml", "Haskell", "Scala", "Fsharp"] -\ - -code_printing - constant "plus :: integer \ _ \ _" \ - (Fsharp) infixl 8 "+" -| constant "uminus :: integer \ _" \ - (Fsharp) "-/ _" -| constant "minus :: integer \ _" \ - (Fsharp) infixl 8 "-" -| constant Code_Numeral.dup \ - (Fsharp) "failwith/ \"dup\"" -| constant Code_Numeral.sub \ - (Fsharp) "failwith/ \"sub\"" -| constant "times :: integer \ _ \ _" \ - (Fsharp) infixl 9 "*" -| constant Code_Numeral.divmod_abs \ - (SML) "IntInf.divMod/ (IntInf.abs _,/ IntInf.abs _)" -| constant "HOL.equal :: integer \ _ \ bool" \ - (Fsharp) infixl 6 "=" -| constant "less_eq :: integer \ _ \ bool" \ - (Fsharp) infixl 6 "<=" -| constant "less :: integer \ _ \ bool" \ - (Fsharp) infixl 6 "<" - - -code_identifier - code_module Int \ (SML) Arith and (OCaml) Arith and (Haskell) Arith - and (Fsharp) - -end diff --git a/src/test-gen/src/main/codegen_fsharp/Code_String_Fsharp.thy b/src/test-gen/src/main/codegen_fsharp/Code_String_Fsharp.thy deleted file mode 100644 index 2590967..0000000 --- a/src/test-gen/src/main/codegen_fsharp/Code_String_Fsharp.thy +++ /dev/null @@ -1,68 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * Code_String_Fsharp.thy --- - * This file is part of HOL-TestGen. - * - * Copyright (c) 2010-2012 ETH Zurich, Switzerland - * 2010-2012 Achim D. Brucker, Germany - * 2016 The University of Sheffield, UK - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id: Code_String_Fsharp.thy 12984 2017-01-06 09:39:05Z brucker $ *) - - -chapter {* Character and string types *} - -theory Code_String_Fsharp -imports - code_fsharp -begin - - -subsection {* Code generator *} - -code_printing type_constructor "String.literal" \ - (Fsharp) "string" - -setup \ - fold String_Code.add_literal_string ["SML", "OCaml", "Haskell", "Scala", "Fsharp"] -\ - -code_printing - constant "HOL.equal :: String.literal \ String.literal \ bool" \ - (Fsharp) "!((_ : string) = _)" - -code_printing constant Code.abort \ - (Fsharp) "failwith" -end diff --git a/src/test-gen/src/main/codegen_fsharp/code_fsharp.ML b/src/test-gen/src/main/codegen_fsharp/code_fsharp.ML deleted file mode 100644 index 5dacaef..0000000 --- a/src/test-gen/src/main/codegen_fsharp/code_fsharp.ML +++ /dev/null @@ -1,618 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * code_fsharp.ML --- main file - * This file is part of HOL-TestGen. - * - * Copyright (c) 2010-2012 ETH Zurich, Switzerland - * Copyright (c) 2010-2013 Achim D. Brucker, Germany - * Copyright (c) 2016 The University of Sheffield, UK - * - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id: code_fsharp.ML 9990 2013-11-16 16:30:56Z brucker $ *) - -(* - This implementation is based the OCaml code generator that is part - of the Isabelle distribution. -*) - -signature CODE_FSharp = -sig - val target_Fsharp: string -end; - -structure Code_FSharp : CODE_FSharp = -struct - -open Basic_Code_Symbol; -open Basic_Code_Thingol; -open Code_Printer; - -infixr 5 @@; -infixr 5 @|; - - -(** generic **) - -open Code_ML; - -val target_Fsharp = "Fsharp"; - - -fun string_of_int i = - if i < 0 then ("-"^(Int.toString (~1*i))) - else Int.toString i; - -datatype ml_binding = - ML_Function of string * (typscheme * ((iterm list * iterm) * (thm option * bool)) list) - | ML_Instance of (string * class) * { class: class, tyco: string, vs: (vname * sort) list, - superinsts: (class * dict list list) list, - inst_params: ((string * (const * int)) * (thm * bool)) list, - superinst_params: ((string * (const * int)) * (thm * bool)) list }; - -datatype ml_stmt = - ML_Exc of string * (typscheme * int) - | ML_Val of ml_binding - | ML_Funs of (Code_Namespace.export * ml_binding) list * Code_Symbol.T list - | ML_Datas of (string * (vname list * ((string * vname list) * itype list) list)) list - | ML_Class of string * (vname * ((class * class) list * (string * itype) list)); - -fun print_product _ [] = NONE - | print_product print [x] = SOME (print x) - | print_product print xs = (SOME o enum " *" "" "") (map print xs); - -fun tuplify _ _ [] = NONE - | tuplify print fxy [x] = SOME (print fxy x) - | tuplify print _ xs = SOME (enum "," "(" ")" (map (print NOBR) xs)); - -(** Fsharp serializer **) - - -fun print_fsharp_stmt tyco_syntax const_syntax reserved is_constr deresolve = - let - val deresolve_const = deresolve o Constant; - val deresolve_class = deresolve o Type_Class; - val deresolve_classrel = deresolve o Class_Relation; - val deresolve_inst = deresolve o Class_Instance; - fun print_tyco_expr (sym, []) = (str o deresolve) sym - | print_tyco_expr (sym, [ty]) = - concat [print_typ BR ty, (str o deresolve) sym] - | print_tyco_expr (sym, tys) = - concat [enum "," "(" ")" (map (print_typ BR) tys), (str o deresolve) sym] - and print_typ fxy (tyco `%% tys) = (case tyco_syntax tyco - of NONE => print_tyco_expr (Type_Constructor tyco, tys) - | SOME (_, print) => print print_typ fxy tys) - | print_typ fxy (ITyVar v) = str ("'" ^ v); - fun print_dicttyp (class, ty) = print_tyco_expr (Type_Class class, [ty]); - fun print_typscheme_prefix (vs, p) = enum " ->" "" "" - (map_filter (fn (v, sort) => - (print_product (fn class => print_dicttyp (class, ITyVar v)) sort)) vs @| p); - fun print_typscheme (vs, ty) = print_typscheme_prefix (vs, print_typ NOBR ty); - fun print_dicttypscheme (vs, class_ty) = print_typscheme_prefix (vs, print_dicttyp class_ty); - val print_classrels = - fold_rev (fn classrel => fn p => Pretty.block [p, str ".", (str o deresolve_classrel) classrel]) - fun print_dict is_pseudo_fun fxy (Dict (classrels, x)) = - print_plain_dict is_pseudo_fun fxy x - |> print_classrels classrels - and print_plain_dict is_pseudo_fun fxy (Dict_Const (inst, dss)) = - brackify BR ((str o deresolve_inst) inst :: - (if is_pseudo_fun (Class_Instance inst) then [str "()"] - else map_filter (print_dicts is_pseudo_fun BR) dss)) - | print_plain_dict is_pseudo_fun fxy (Dict_Var {var = v, index = i, length = k, unique = u, class = c}) = - str (if k = 1 then "_" ^ Name.enforce_case true v - else "_" ^ Name.enforce_case true v ^ string_of_int (i+1)) - and print_dicts is_pseudo_fun = tuplify (print_dict is_pseudo_fun); - val print_dict_args = map_filter (fn (v, sort) => print_dicts (K false) BR - (map_index (fn (i, c) => Dict ([], Dict_Var {var = v, index = i, length = length sort, unique = false, class = c})) sort)); - fun print_term is_pseudo_fun some_thm vars fxy (IConst const) = - print_app is_pseudo_fun some_thm vars fxy (const, []) - | print_term is_pseudo_fun some_thm vars fxy (IVar NONE) = - str "_" - | print_term is_pseudo_fun some_thm vars fxy (IVar (SOME v)) = - str (lookup_var vars v) - | print_term is_pseudo_fun some_thm vars fxy (t as t1 `$ t2) = - (case Code_Thingol.unfold_const_app t - of SOME app => print_app is_pseudo_fun some_thm vars fxy app - | NONE => brackify fxy [print_term is_pseudo_fun some_thm vars NOBR t1, - print_term is_pseudo_fun some_thm vars BR t2]) - | print_term is_pseudo_fun some_thm vars fxy (t as _ `|=> _) = - let - val (binds, t') = Code_Thingol.unfold_pat_abs t; - val (ps, vars') = fold_map (print_bind is_pseudo_fun some_thm BR o fst) binds vars; - in brackets (str "fun" :: ps @ str "->" @@ print_term is_pseudo_fun some_thm vars' NOBR t') end - | print_term is_pseudo_fun some_thm vars fxy (ICase case_expr) = - (case Code_Thingol.unfold_const_app (#primitive case_expr) - of SOME (app as ({ sym = Constant const, ... }, _)) => - if is_none (const_syntax const) - then print_case is_pseudo_fun some_thm vars fxy case_expr - else print_app is_pseudo_fun some_thm vars fxy app - | NONE => print_case is_pseudo_fun some_thm vars fxy case_expr) - and print_app_expr is_pseudo_fun some_thm vars (app as ({ sym, dicts = dss, dom = dom, ... }, ts)) = - if is_constr sym then - let val k = length dom in - if length ts = k - then (str o deresolve) sym - :: the_list (tuplify (print_term is_pseudo_fun some_thm vars) BR ts) - else [print_term is_pseudo_fun some_thm vars BR (Code_Thingol.eta_expand k app)] - end - else if is_pseudo_fun sym - then (str o deresolve) sym @@ str "()" - else (str o deresolve) sym :: map_filter (print_dicts is_pseudo_fun BR) dss - @ map (print_term is_pseudo_fun some_thm vars BR) ts - and print_app is_pseudo_fun some_thm vars = gen_print_app (print_app_expr is_pseudo_fun) - (print_term is_pseudo_fun) const_syntax some_thm vars - and print_bind is_pseudo_fun = gen_print_bind (print_term is_pseudo_fun) - and print_case is_pseudo_fun some_thm vars fxy { clauses = [], ... } = - (concat o map str) ["failwith", "\"empty case\""] - | print_case is_pseudo_fun some_thm vars fxy (case_expr as { clauses = [_], ... }) = - let - val (binds, body) = Code_Thingol.unfold_let (ICase case_expr); - fun print_let ((pat, _), t) vars = - vars - |> print_bind is_pseudo_fun some_thm NOBR pat - |>> (fn p => concat - [str "let", p, str "=", print_term is_pseudo_fun some_thm vars NOBR t, str "in"]) - val (ps, vars') = fold_map print_let binds vars; - in - brackets [Pretty.chunks ps, print_term is_pseudo_fun some_thm vars' NOBR body] - end - | print_case is_pseudo_fun some_thm vars fxy { term = t, typ = ty, clauses = clauses, ... } = - let - fun print_select delim (pat, body) = - let - val (p, vars') = print_bind is_pseudo_fun some_thm NOBR pat vars; - in concat [str delim, p, str "->", print_term is_pseudo_fun some_thm vars' NOBR body] end; - in - brackets ( - str "match" - :: print_term is_pseudo_fun some_thm vars NOBR t - :: str "with\n" - :: map (print_select "|") clauses - ) - end - | print_case is_pseudo_fun some_thm vars fxy ({clauses=[], ...}) = - (concat o map str) ["failwith", "\"empty case\""]; - fun print_val_decl print_typscheme (sym, typscheme) = concat - [str "val", str (deresolve sym), str ":", print_typscheme typscheme]; - fun print_datatype_decl definer (tyco, (vs, cos)) = - let - fun print_co ((co, _), []) = str (deresolve_const co) - | print_co ((co, _), tys) = concat [str (deresolve_const co), str "of", - enum " *" "" "" (map (print_typ (INFX (2, X))) tys)]; - fun separateByList l (x::y::xs) = (x :: l) @ separateByList l (y::xs) - | separateByList l x = x - in - concat ( - str definer - :: print_tyco_expr (Type_Constructor tyco, map ITyVar vs) - :: str "=" - :: [Pretty.blk (0, - separateByList [Pretty.brk 1, str "| "] (map print_co cos) - )] - ) - end; - fun print_def is_pseudo_fun needs_typ definer - (ML_Function (const, (vs_ty as (vs, ty), eqs))) = - let - fun print_eqn ((ts, t), (some_thm, _)) = - let - val vars = reserved - |> intro_base_names_for (is_none o const_syntax) - deresolve (t :: ts) - |> intro_vars ((fold o Code_Thingol.fold_varnames) - (insert (op =)) ts []); - in concat [ - (Pretty.block o commas) - (map (print_term is_pseudo_fun some_thm vars NOBR) ts), - str "->", - print_term is_pseudo_fun some_thm vars NOBR t - ] end; - fun print_eqns is_pseudo [((ts, t), (some_thm, _))] = - let - val vars = reserved - |> intro_base_names_for (is_none o const_syntax) - deresolve (t :: ts) - |> intro_vars ((fold o Code_Thingol.fold_varnames) - (insert (op =)) ts []); - in - concat ( - (if is_pseudo then [str "()"] - else map (print_term is_pseudo_fun some_thm vars BR) ts) - @ str "=" - @@ print_term is_pseudo_fun some_thm vars NOBR t - ) - end - | print_eqns _ eqs = - let - val vars = reserved - |> intro_base_names_for (is_none o const_syntax) - deresolve (map (snd o fst) eqs) - val dummy_parms = (map str o aux_params vars o map (fst o fst)) eqs; - in - Pretty.block ( - Pretty.breaks dummy_parms - @ Pretty.brk 1 - :: str "=" - :: Pretty.brk 1 - :: Pretty.blk (2, - str "match" - :: Pretty.brk 1 - :: (Pretty.block o commas) dummy_parms - :: Pretty.brk 1 - :: str "with" - :: maps (append [Pretty.fbrk, str "|", Pretty.brk 1] - o single o print_eqn) eqs - ) - :: [] - ) - end; - val prolog = if needs_typ then - concat [str definer, (str o deresolve_const) const, str ":", print_typ NOBR ty] - else (concat o map str) [definer, deresolve_const const]; - in pair - (print_val_decl print_typscheme (Constant const, vs_ty)) - (concat ( - prolog - :: print_dict_args vs - @| print_eqns (is_pseudo_fun (Constant const)) eqs - )) - end - | print_def is_pseudo_fun _ definer - (ML_Instance (inst as (tyco, class), { vs, superinsts, inst_params, ... })) = - let - fun print_super_instance (super_class, x) = - concat [ - (str o deresolve_classrel) (class, super_class), - str "=", - print_dict is_pseudo_fun NOBR (Dict ([], Dict_Const ((tyco, super_class), x))) - ]; - fun print_classparam_instance ((classparam, (const, _)), (thm, _)) = - concat [ - (str o deresolve_const) classparam, - str "=", - print_app (K false) (SOME thm) reserved NOBR (const, []) - ]; - in pair - (print_val_decl print_dicttypscheme - (Class_Instance inst, (vs, (class, tyco `%% map (ITyVar o fst) vs)))) - (concat ( - str definer - :: (str o deresolve_inst) inst - :: (if is_pseudo_fun (Class_Instance inst) then [str "()"] - else print_dict_args vs) - @ str "=" - @@ brackets [ - enum_default "()" ";" "{" "}" (map print_super_instance superinsts - @ map print_classparam_instance inst_params), - str ":", - print_dicttyp (class, tyco `%% map (ITyVar o fst) vs) - ] - )) - end; - fun print_stmt _ (ML_Exc (const, (vs_ty, n))) = pair - [print_val_decl print_typscheme (Constant const, vs_ty)] - ((doublesemicolon o map str) ( - "let" - :: deresolve_const const - :: replicate n "_" - @ "=" - :: "failwith" - @@ ML_Syntax.print_string const - )) - | print_stmt _ (ML_Val binding) = - let - val (sig_p, p) = print_def (K false) true "let" binding - in pair - [sig_p] - (doublesemicolon [p]) - end - | print_stmt _ (ML_Funs ((export, binding) :: exports_bindings, pseudo_funs)) = - let - val print_def' = print_def (member (op =) pseudo_funs) false; - fun print_pseudo_fun sym = concat [ - str "let", - (str o deresolve) sym, - str "=", - (str o deresolve) sym, - str "();;" - ]; - val (sig_ps, (ps, p)) = (apsnd split_last o split_list) - (print_def' "let rec" binding :: map (print_def' "and" o snd) exports_bindings); - val pseudo_ps = map print_pseudo_fun pseudo_funs; - in pair - (map_filter (fn (export, p) => if Code_Namespace.not_private export then SOME p else NONE) - ((export :: map fst exports_bindings) ~~ sig_ps)) - (Pretty.chunks (ps @ doublesemicolon [p] :: pseudo_ps)) - end - | print_stmt _ (ML_Datas [(tyco, (vs, []))]) = - let - val ty_p = print_tyco_expr (Type_Constructor tyco, map ITyVar vs); - in - pair - [concat [str "type", ty_p]] - (doublesemicolon [str "type", ty_p, str "=", str "EMPTY__"]) - end - | print_stmt export (ML_Datas (data :: datas)) = - let - val decl_ps = print_datatype_decl "type" data - :: map (print_datatype_decl "and") datas; - val (ps, p) = split_last decl_ps; - in pair - (if Code_Namespace.is_public export - then decl_ps - else map (fn (tyco, (vs, _)) => - concat [str "type", print_tyco_expr (Type_Constructor tyco, map ITyVar vs)]) - (data :: datas)) - (Pretty.chunks (ps @| doublesemicolon [p])) - end - | print_stmt export (ML_Class (class, (v, (classrels, classparams)))) = - let - fun print_field s p = concat [str s, str ":", p]; - fun print_super_class_field (classrel as (_, super_class)) = - print_field (deresolve_classrel classrel) (print_dicttyp (super_class, ITyVar v)); - fun print_classparam_decl (classparam, ty) = - print_val_decl print_typscheme - (Constant classparam, ([(v, [class])], ty)); - fun print_classparam_field (classparam, ty) = - print_field (deresolve_const classparam) (print_typ NOBR ty); - val w = "_" ^ Name.enforce_case true v; - fun print_classparam_proj (classparam, _) = - (concat o map str) ["let", deresolve_const classparam, w, "=", - w ^ "." ^ deresolve_const classparam ^ ";;"]; - val type_decl_p = concat [ - str "type", - print_dicttyp (class, ITyVar v), - str "=", - enum_default "unit" ";" "{" "}" ( - map print_super_class_field classrels - @ map print_classparam_field classparams - ) - ]; - in pair - (if Code_Namespace.is_public export - then type_decl_p :: map print_classparam_decl classparams - else [concat [str "type", print_dicttyp (class, ITyVar v)]]) - (Pretty.chunks ( - doublesemicolon [type_decl_p] - :: map print_classparam_proj classparams - )) - end; - in print_stmt end; - -fun print_fsharp_module name some_decls body = - Pretty.chunks2 ( - str ("module " ^ name) - :: body - ); - -val literals_fsharp = let - fun chr i = - let - val xs = string_of_int i; - val ys = replicate_string (3 - length (raw_explode xs)) "0"; - in "\\" ^ ys ^ xs end; - fun char_fsharp c = - let - val i = ord c; - val s = if i < 32 orelse i = 34 orelse i = 39 orelse i = 92 orelse i > 126 - then chr i else c - in s end; - fun numeral_fsharp k = string_of_int k -(* - fun numeral_fsharp k = if k < 0 - then "(Big_int.minus_big_int " ^ numeral_fsharp (~ k) ^ ")" - else if k <= 1073741823 - then "(Big_int.big_int_of_int " ^ string_of_int k ^ ")" - else "(Big_int.big_int_of_string " ^ quote (string_of_int k) ^ ")" -*) -in Literals { - literal_char = Library.enclose "'" "'" o char_fsharp, - literal_string = quote o translate_string char_fsharp, - literal_numeral = numeral_fsharp, - literal_list = enum ";" "[" "]", - infix_cons = (6, "::") -} end; - - - - -(* -val serializer_fsharp : Code_Target.serializer = - Code_Target.parse_args (Scan.optional (Args.$$$ "no_signatures" >> K false) true - >> (fn with_signatures => serialize_ml print_fsharp_module print_fsharp_stmt with_signatures)); -*) - -fun ml_program_of_program ctxt module_name reserved identifiers = - let - fun namify_const upper base (nsp_const, nsp_type) = - let - val (base', nsp_const') = Name.variant (Name.enforce_case upper base) nsp_const - in (base', (nsp_const', nsp_type)) end; - fun namify_type base (nsp_const, nsp_type) = - let - val (base', nsp_type') = Name.variant (Name.enforce_case false base) nsp_type - in (base', (nsp_const, nsp_type')) end; - fun namify_stmt (Code_Thingol.Fun _) = namify_const false - | namify_stmt (Code_Thingol.Datatype _) = namify_type - | namify_stmt (Code_Thingol.Datatypecons _) = namify_const true - | namify_stmt (Code_Thingol.Class _) = namify_type - | namify_stmt (Code_Thingol.Classrel _) = namify_const false - | namify_stmt (Code_Thingol.Classparam _) = namify_const false - | namify_stmt (Code_Thingol.Classinst _) = namify_const false; - fun ml_binding_of_stmt (sym as Constant const, (export, Code_Thingol.Fun ((tysm as (vs, ty), raw_eqs), _))) = - let - val eqs = filter (snd o snd) raw_eqs; - val (eqs', some_sym) = if null (filter_out (null o snd) vs) then case eqs - of [(([], t), some_thm)] => if (not o null o fst o Code_Thingol.unfold_fun) ty - then ([(([IVar (SOME "x")], t `$ IVar (SOME "x")), some_thm)], NONE) - else (eqs, SOME (sym, member (op =) (Code_Thingol.add_constsyms t []) sym)) - | _ => (eqs, NONE) - else (eqs, NONE) - in ((export, ML_Function (const, (tysm, eqs'))), some_sym) end - | ml_binding_of_stmt (sym as Class_Instance inst, (export, Code_Thingol.Classinst (stmt as { vs, ... }))) = - ((export, ML_Instance (inst, stmt)), - if forall (null o snd) vs then SOME (sym, false) else NONE) - | ml_binding_of_stmt (sym, _) = - error ("Binding block containing illegal statement: " ^ - Code_Symbol.quote ctxt sym) - fun modify_fun (sym, (export, stmt)) = - let - val ((export', binding), some_value_sym) = ml_binding_of_stmt (sym, (export, stmt)); - val ml_stmt = case binding - of ML_Function (const, ((vs, ty), [])) => - ML_Exc (const, ((vs, ty), - (length o filter_out (null o snd)) vs + (length o fst o Code_Thingol.unfold_fun) ty)) - | _ => case some_value_sym - of NONE => ML_Funs ([(export', binding)], []) - | SOME (sym, true) => ML_Funs ([(export, binding)], [sym]) - | SOME (sym, false) => ML_Val binding - in SOME (export, ml_stmt) end; - fun modify_funs stmts = single (SOME - (Code_Namespace.Opaque, ML_Funs (map_split ml_binding_of_stmt stmts |> (apsnd o map_filter o Option.map) fst))) - fun modify_datatypes stmts = - map_filter - (fn (Type_Constructor tyco, (export, Code_Thingol.Datatype stmt)) => SOME (export, (tyco, stmt)) | _ => NONE) stmts - |> split_list - |> apfst Code_Namespace.join_exports - |> apsnd ML_Datas - |> SOME - |> single; - fun modify_class stmts = - the_single (map_filter - (fn (Type_Class class, (export, Code_Thingol.Class stmt)) => SOME (export, (class, stmt)) | _ => NONE) stmts) - |> apsnd ML_Class - |> SOME - |> single; - fun modify_stmts ([stmt as (_, (_, stmt' as Code_Thingol.Fun _))]) = - if Code_Thingol.is_case stmt' then [] else [modify_fun stmt] - | modify_stmts ((stmts as (_, (_, Code_Thingol.Fun _)) :: _)) = - modify_funs (filter_out (Code_Thingol.is_case o snd o snd) stmts) - | modify_stmts ((stmts as (_, (_, Code_Thingol.Datatypecons _)) :: _)) = - modify_datatypes stmts - | modify_stmts ((stmts as (_, (_, Code_Thingol.Datatype _)) :: _)) = - modify_datatypes stmts - | modify_stmts ((stmts as (_, (_, Code_Thingol.Class _)) :: _)) = - modify_class stmts - | modify_stmts ((stmts as (_, (_, Code_Thingol.Classrel _)) :: _)) = - modify_class stmts - | modify_stmts ((stmts as (_, (_, Code_Thingol.Classparam _)) :: _)) = - modify_class stmts - | modify_stmts ([stmt as (_, (_, Code_Thingol.Classinst _))]) = - [modify_fun stmt] - | modify_stmts ((stmts as (_, (_, Code_Thingol.Classinst _)) :: _)) = - modify_funs stmts - | modify_stmts stmts = error ("Illegal mutual dependencies: " ^ - (Library.commas o map (Code_Symbol.quote ctxt o fst)) stmts); - in - Code_Namespace.hierarchical_program ctxt { - module_name = module_name, reserved = reserved, identifiers = identifiers, - empty_nsp = (reserved, reserved), namify_module = pair, namify_stmt = namify_stmt, - cyclic_modules = false, class_transitive = true, - class_relation_public = true, empty_data = (), - memorize_data = K I, modify_stmts = modify_stmts } - end; - -fun serialize_ml print_ml_module print_ml_stmt ctxt - { module_name, reserved_syms, identifiers, includes, - class_syntax, tyco_syntax, const_syntax } exports program = - let - - (* build program *) - val { deresolver, hierarchical_program = ml_program } = - ml_program_of_program ctxt module_name (Name.make_context reserved_syms) - identifiers exports program; - - (* print statements *) - fun print_stmt prefix_fragments (_, (export, stmt)) = print_ml_stmt - tyco_syntax const_syntax (make_vars reserved_syms) - (Code_Thingol.is_constr program) (deresolver prefix_fragments) export stmt - |> apfst (fn decl => if Code_Namespace.not_private export then SOME decl else NONE); - - (* print modules *) - fun print_module _ base _ xs = - let - val (raw_decls, body) = split_list xs; - val decls = maps these raw_decls - in (NONE, print_ml_module base decls body) end; - - (* serialization *) - val p = Pretty.chunks2 (map snd includes - @ map snd (Code_Namespace.print_hierarchical { - print_module = print_module, print_stmt = print_stmt, - lift_markup = apsnd } ml_program)); - fun write width NONE = writeln o format [] width - | write width (SOME p) = File.write p o format [] width; - fun prepare syms width p = ([("", format syms width p)], try (deresolver [])); - in - Code_Target.serialization write prepare p - end; - - -val serializer_fsharp : Code_Target.serializer = - Code_Target.parse_args (Scan.succeed ()) #> K (serialize_ml print_fsharp_module print_fsharp_stmt); - -(** Isar setup **) - -fun fun_syntax print_typ fxy [ty1, ty2] = - brackify_infix (1, R) fxy ( - print_typ (INFX (1, X)) ty1, - str "->", - print_typ (INFX (1, R)) ty2 - ); - - - -val _ = Theory.setup - (Code_Target.add_language - (target_Fsharp, { serializer = serializer_fsharp, literals = literals_fsharp, - check = { env_var = "EXEC_FSHARP", make_destination = fn p => Path.append p (Path.explode "ROOT.fsharp"), - make_command = fn fsharp => fsharp ^ " -w pu nums.cma ROOT.fsharp" } }) - #> Code_Target.set_printings (Code_Symbol.Type_Constructor ("fun", [(target_Fsharp, SOME (2, fun_syntax))])) -(* Source: http://msdn.microsoft.com/en-us/library/dd233249.aspx *) - #> fold (Code_Target.add_reserved target_Fsharp) [ - "abstract", "and", "as", "asr", "assert", "atomic", "base", "begin", "break", - "class", "checked", "component", "const", "constraint", "constructor", "continue", - "default", "delegate", "do", "done", "downcast", "downto", "eager", "elif", - "else", "end", "event", "exception", "extern", "external", "false", "fixed", - "for", "fun", "function", "functor", "global", "if", "in", "include", "inherit", - "inline", "int", "interface", "internal", "land", "lazy", "let", "lor", "lsl", "lsr", - "lxor", "match", "member", "method", "mixin", "mod", "module", "mutable", - "namespace", "new", "not", "null", "object", "of", "open", "or", "override", - "parallel", "private", "process", "protected", "public", "pure", "rec", - "return", "sealed", "sig", "static", "string", "struct", "tailcall", "then", "to", - "trait", "true", "try", "type", "upcast", "use", "val", "virtual", "void", - "volatile", "when", "while", "with", "yield" - ]); - -end; (*struct*) diff --git a/src/test-gen/src/main/codegen_fsharp/code_fsharp.thy b/src/test-gen/src/main/codegen_fsharp/code_fsharp.thy deleted file mode 100644 index e16d4ca..0000000 --- a/src/test-gen/src/main/codegen_fsharp/code_fsharp.thy +++ /dev/null @@ -1,130 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * code_fsharp.thy --- - * This file is part of HOL-TestGen. - * - * Copyright (c) 2010-2012 ETH Zurich, Switzerland - * 2010-2013 Achim D. Brucker, Germany - * 2016 The University of Sheffield, UK - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id: code_fsharp.thy 12648 2016-06-17 09:26:51Z brucker $ *) - -theory code_fsharp -imports - Main -begin - -ML_file "code_fsharp.ML" - -(* In file HOL/HOL.thy *) - -code_printing - type_constructor bool \ - (Fsharp) "bool" - -code_printing - constant True \ - (Fsharp) "true" -| constant False \ - (Fsharp) "false" -| constant Not \ - (Fsharp) "not" -| constant HOL.conj \ - (Fsharp) infixl 3 "&&" -| constant HOL.disj \ - (Fsharp) infixl 2 "||" -| constant HOL.implies \ - (Fsharp) "!(if (_)/ then (_)/ else true)" -| constant If \ - (Fsharp) "!(if (_)/ then (_)/ else (_))" - -code_reserved Fsharp - bool - -code_printing - constant undefined \ - (Fsharp) "failwith/ \"undefined\"" - - -(* In file HOL/Option.thy *) -code_printing - type_constructor option \ - (Fsharp) "_ option" - -code_printing - constant None \ - (Fsharp) "None" -| constant Some \ - (Fsharp) "Some _" - -code_reserved Fsharp - option None Some - -(* In file HOL/List.thy *) -code_printing - type_constructor list \ - (Fsharp) "_ list" -| constant Nil \ - (Fsharp) "[]" -| constant Cons \ - (Fsharp) "(_ ::/ _)" - -code_reserved Fsharp - list - -code_printing - constant "op @" \ - (Fsharp) infixr 6 "@" - -code_printing - type_constructor "unit" \ - (Fsharp) "unit" - -code_printing - constant "Unity" \ - (Fsharp) "()" - -code_reserved Fsharp - unit - -code_printing - type_constructor prod \ - (Fsharp) infix 2 "*" - -code_printing - constant "Pair" \ - (Fsharp) "!((_),/ (_))" - -end diff --git a/src/test-gen/src/main/codegen_fsharp/examples/AQ.thy b/src/test-gen/src/main/codegen_fsharp/examples/AQ.thy deleted file mode 100755 index c5b70f0..0000000 --- a/src/test-gen/src/main/codegen_fsharp/examples/AQ.thy +++ /dev/null @@ -1,71 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * AQ.thy thy --- - * This file is part of HOL-TestGen. - * - * Copyright (c) 2010-2012 ETH Zurich, Switzerland - * 2010-2012 Achim D. Brucker, Germany - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id: AQ.thy 9263 2011-12-25 15:49:36Z brucker $ *) - - -theory AQ -imports code_fsharp -begin - -datatype 'a queue = AQueue "'a list" "'a list" - -definition empty :: "'a queue" where - "empty = AQueue [] []" - -primrec enqueue :: "'a \ 'a queue \ 'a queue" where - "enqueue x (AQueue xs ys) = AQueue (x # xs) ys" - -fun dequeue :: "'a queue \ 'a option \ 'a queue" where - "dequeue (AQueue [] []) = (None, AQueue [] [])" - | "dequeue (AQueue xs (y # ys)) = (Some y, AQueue xs ys)" - | "dequeue (AQueue xs []) = - (case rev xs of y # ys \ (Some y, AQueue [] ys))" - -fun not :: "bool \ bool" where - "not True = False" - | "not False = True" - -fun head2 :: "('b list) list \ 'b option" where - "head2 [] = None" - | "head2 (x # xs) = (case x of [] \ None | (y # ys) \ Some y)" - -export_code empty dequeue enqueue not head2 in Fsharp - module_name Example file "test.fs" diff --git a/src/test-gen/src/main/codegen_fsharp/examples/SemiG.thy b/src/test-gen/src/main/codegen_fsharp/examples/SemiG.thy deleted file mode 100755 index c70368d..0000000 --- a/src/test-gen/src/main/codegen_fsharp/examples/SemiG.thy +++ /dev/null @@ -1,92 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * SemiG.thy --- - * This file is part of HOL-TestGen. - * - * Copyright (c) 2010-2012 ETH Zurich, Switzerland - * 2010-2012 Achim D. Brucker, Germany - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id: SemiG.thy 9263 2011-12-25 15:49:36Z brucker $ *) - -theory SemiG -imports Main -begin - -class semigroup = - fixes mult :: "'a \ 'a \ 'a" (infixl "\" 70) - assumes assoc: "(x \ y) \ z = x \ (y \ z)" - -class monoid = semigroup + - fixes neutral :: "'a" ("\") - assumes neutl: "x \ \ = x" - and neutr: "\ \ x = x" - -instantiation nat :: monoid -begin - primrec mult_nat where - "0 \ n = (0::nat)" - | "Suc m \ n = n + m \ n" - - definition neutral_nat where - "\ = Suc 0" - - lemma add_mult_distrib: - fixes n m q :: nat - shows "(n + m) \ q = n \ q + m \ q" - by (induct n) simp_all - - instance proof - fix m n q :: nat - show "m \ n \ q = m \ (n \ q)" - by (induct m) (simp_all add: add_mult_distrib) - show "\ \ n = n" - by (simp add: neutral_nat_def) - show "m \ \ = m" - by (induct m) (simp_all add: neutral_nat_def) - qed -end - -primrec (in monoid) pow :: "nat \ 'a \ 'a" where - "pow 0 a = \" -| "pow (Suc n) a = a \ pow n a" - -definition bexp :: "nat \ nat" where - "bexp n = pow n (Suc (Suc 0))" - -export_code pow bexp in OCaml - module_name SemiG file "SemiG.ml" - -end - diff --git a/src/test-gen/src/main/codegen_fsharp/upstream/code_ml.ML b/src/test-gen/src/main/codegen_fsharp/upstream/code_ml.ML deleted file mode 100644 index 6f0e004..0000000 --- a/src/test-gen/src/main/codegen_fsharp/upstream/code_ml.ML +++ /dev/null @@ -1,898 +0,0 @@ -(* Title: Tools/Code/code_ml.ML - Author: Florian Haftmann, TU Muenchen - -Serializer for SML and OCaml. -*) - -(* -signature CODE_ML = -sig - val target_SML: string - val target_OCaml: string -end; -*) - -structure Code_ML (*: CODE_ML *) = -struct - -open Basic_Code_Symbol; -open Basic_Code_Thingol; -open Code_Printer; - -infixr 5 @@; -infixr 5 @|; - - -(** generic **) - -val target_SML = "SML"; -val target_OCaml = "OCaml"; - -datatype ml_binding = - ML_Function of string * (typscheme * ((iterm list * iterm) * (thm option * bool)) list) - | ML_Instance of (string * class) * { class: class, tyco: string, vs: (vname * sort) list, - superinsts: (class * dict list list) list, - inst_params: ((string * (const * int)) * (thm * bool)) list, - superinst_params: ((string * (const * int)) * (thm * bool)) list }; - -datatype ml_stmt = - ML_Exc of string * (typscheme * int) - | ML_Val of ml_binding - | ML_Funs of (Code_Namespace.export * ml_binding) list * Code_Symbol.T list - | ML_Datas of (string * (vname list * ((string * vname list) * itype list) list)) list - | ML_Class of string * (vname * ((class * class) list * (string * itype) list)); - -fun print_product _ [] = NONE - | print_product print [x] = SOME (print x) - | print_product print xs = (SOME o enum " *" "" "") (map print xs); - -fun tuplify _ _ [] = NONE - | tuplify print fxy [x] = SOME (print fxy x) - | tuplify print _ xs = SOME (enum "," "(" ")" (map (print NOBR) xs)); - - -(** SML serializer **) - -fun print_char_any_ml s = - if Symbol.is_char s then ML_Syntax.print_char s else "\\092" ^ unprefix "\\" s; - -val print_string_any_ml = quote o implode o map print_char_any_ml o Symbol.explode; - -fun print_sml_stmt tyco_syntax const_syntax reserved is_constr deresolve = - let - val deresolve_const = deresolve o Constant; - val deresolve_class = deresolve o Type_Class; - val deresolve_classrel = deresolve o Class_Relation; - val deresolve_inst = deresolve o Class_Instance; - fun print_tyco_expr (sym, []) = (str o deresolve) sym - | print_tyco_expr (sym, [ty]) = - concat [print_typ BR ty, (str o deresolve) sym] - | print_tyco_expr (sym, tys) = - concat [enum "," "(" ")" (map (print_typ BR) tys), (str o deresolve) sym] - and print_typ fxy (tyco `%% tys) = (case tyco_syntax tyco - of NONE => print_tyco_expr (Type_Constructor tyco, tys) - | SOME (_, print) => print print_typ fxy tys) - | print_typ fxy (ITyVar v) = str ("'" ^ v); - fun print_dicttyp (class, ty) = print_tyco_expr (Type_Class class, [ty]); - fun print_typscheme_prefix (vs, p) = enum " ->" "" "" - (map_filter (fn (v, sort) => - (print_product (fn class => print_dicttyp (class, ITyVar v)) sort)) vs @| p); - fun print_typscheme (vs, ty) = print_typscheme_prefix (vs, print_typ NOBR ty); - fun print_dicttypscheme (vs, class_ty) = print_typscheme_prefix (vs, print_dicttyp class_ty); - fun print_classrels fxy [] ps = brackify fxy ps - | print_classrels fxy [classrel] ps = brackify fxy [(str o deresolve_classrel) classrel, brackify BR ps] - | print_classrels fxy classrels ps = - brackify fxy [enum " o" "(" ")" (map (str o deresolve_classrel) classrels), brackify BR ps] - fun print_dict is_pseudo_fun fxy (Dict (classrels, x)) = - print_classrels fxy classrels (print_plain_dict is_pseudo_fun fxy x) - and print_plain_dict is_pseudo_fun fxy (Dict_Const (inst, dss)) = - ((str o deresolve_inst) inst :: - (if is_pseudo_fun (Class_Instance inst) then [str "()"] - else map_filter (print_dicts is_pseudo_fun BR) dss)) - | print_plain_dict is_pseudo_fun fxy (Dict_Var (v, (i, k))) = - [str (if k = 1 then Name.enforce_case true v ^ "_" - else Name.enforce_case true v ^ string_of_int (i+1) ^ "_")] - and print_dicts is_pseudo_fun = tuplify (print_dict is_pseudo_fun); - val print_dict_args = map_filter (fn (v, sort) => print_dicts (K false) BR - (map_index (fn (i, _) => Dict ([], Dict_Var (v, (i, length sort)))) sort)); - fun print_term is_pseudo_fun some_thm vars fxy (IConst const) = - print_app is_pseudo_fun some_thm vars fxy (const, []) - | print_term is_pseudo_fun some_thm vars fxy (IVar NONE) = - str "_" - | print_term is_pseudo_fun some_thm vars fxy (IVar (SOME v)) = - str (lookup_var vars v) - | print_term is_pseudo_fun some_thm vars fxy (t as t1 `$ t2) = - (case Code_Thingol.unfold_const_app t - of SOME app => print_app is_pseudo_fun some_thm vars fxy app - | NONE => brackify fxy [print_term is_pseudo_fun some_thm vars NOBR t1, - print_term is_pseudo_fun some_thm vars BR t2]) - | print_term is_pseudo_fun some_thm vars fxy (t as _ `|=> _) = - let - val (binds, t') = Code_Thingol.unfold_pat_abs t; - fun print_abs (pat, ty) = - print_bind is_pseudo_fun some_thm NOBR pat - #>> (fn p => concat [str "fn", p, str "=>"]); - val (ps, vars') = fold_map print_abs binds vars; - in brackets (ps @ [print_term is_pseudo_fun some_thm vars' NOBR t']) end - | print_term is_pseudo_fun some_thm vars fxy (ICase case_expr) = - (case Code_Thingol.unfold_const_app (#primitive case_expr) - of SOME (app as ({ sym = Constant const, ... }, _)) => - if is_none (const_syntax const) - then print_case is_pseudo_fun some_thm vars fxy case_expr - else print_app is_pseudo_fun some_thm vars fxy app - | NONE => print_case is_pseudo_fun some_thm vars fxy case_expr) - and print_app_expr is_pseudo_fun some_thm vars (app as ({ sym, dicts = dss, dom = dom, ... }, ts)) = - if is_constr sym then - let val k = length dom in - if k < 2 orelse length ts = k - then (str o deresolve) sym - :: the_list (tuplify (print_term is_pseudo_fun some_thm vars) BR ts) - else [print_term is_pseudo_fun some_thm vars BR (Code_Thingol.eta_expand k app)] - end - else if is_pseudo_fun sym - then (str o deresolve) sym @@ str "()" - else (str o deresolve) sym :: map_filter (print_dicts is_pseudo_fun BR) dss - @ map (print_term is_pseudo_fun some_thm vars BR) ts - and print_app is_pseudo_fun some_thm vars = gen_print_app (print_app_expr is_pseudo_fun) - (print_term is_pseudo_fun) const_syntax some_thm vars - and print_bind is_pseudo_fun = gen_print_bind (print_term is_pseudo_fun) - and print_case is_pseudo_fun some_thm vars fxy { clauses = [], ... } = - (concat o map str) ["raise", "Fail", "\"empty case\""] - | print_case is_pseudo_fun some_thm vars fxy (case_expr as { clauses = [_], ... }) = - let - val (binds, body) = Code_Thingol.unfold_let (ICase case_expr); - fun print_match ((pat, _), t) vars = - vars - |> print_bind is_pseudo_fun some_thm NOBR pat - |>> (fn p => semicolon [str "val", p, str "=", - print_term is_pseudo_fun some_thm vars NOBR t]) - val (ps, vars') = fold_map print_match binds vars; - in - Pretty.chunks [ - Pretty.block [str "let", Pretty.fbrk, Pretty.chunks ps], - Pretty.block [str "in", Pretty.fbrk, print_term is_pseudo_fun some_thm vars' NOBR body], - str "end" - ] - end - | print_case is_pseudo_fun some_thm vars fxy { term = t, typ = ty, clauses = clause :: clauses, ... } = - let - fun print_select delim (pat, body) = - let - val (p, vars') = print_bind is_pseudo_fun some_thm NOBR pat vars; - in - concat [str delim, p, str "=>", print_term is_pseudo_fun some_thm vars' NOBR body] - end; - in - brackets ( - str "case" - :: print_term is_pseudo_fun some_thm vars NOBR t - :: print_select "of" clause - :: map (print_select "|") clauses - ) - end; - fun print_val_decl print_typscheme (sym, typscheme) = concat - [str "val", str (deresolve sym), str ":", print_typscheme typscheme]; - fun print_datatype_decl definer (tyco, (vs, cos)) = - let - fun print_co ((co, _), []) = str (deresolve_const co) - | print_co ((co, _), tys) = concat [str (deresolve_const co), str "of", - enum " *" "" "" (map (print_typ (INFX (2, X))) tys)]; - in - concat ( - str definer - :: print_tyco_expr (Type_Constructor tyco, map ITyVar vs) - :: str "=" - :: separate (str "|") (map print_co cos) - ) - end; - fun print_def is_pseudo_fun needs_typ definer - (ML_Function (const, (vs_ty as (vs, ty), eq :: eqs))) = - let - fun print_eqn definer ((ts, t), (some_thm, _)) = - let - val vars = reserved - |> intro_base_names_for (is_none o const_syntax) - deresolve (t :: ts) - |> intro_vars ((fold o Code_Thingol.fold_varnames) - (insert (op =)) ts []); - val prolog = if needs_typ then - concat [str definer, (str o deresolve_const) const, str ":", print_typ NOBR ty] - else (concat o map str) [definer, deresolve_const const]; - in - concat ( - prolog - :: (if is_pseudo_fun (Constant const) then [str "()"] - else print_dict_args vs - @ map (print_term is_pseudo_fun some_thm vars BR) ts) - @ str "=" - @@ print_term is_pseudo_fun some_thm vars NOBR t - ) - end - val shift = if null eqs then I else - map (Pretty.block o single o Pretty.block o single); - in pair - (print_val_decl print_typscheme (Constant const, vs_ty)) - ((Pretty.block o Pretty.fbreaks o shift) ( - print_eqn definer eq - :: map (print_eqn "|") eqs - )) - end - | print_def is_pseudo_fun _ definer - (ML_Instance (inst as (tyco, class), { vs, superinsts, inst_params, ... })) = - let - fun print_super_instance (super_class, x) = - concat [ - (str o Long_Name.base_name o deresolve_classrel) (class, super_class), - str "=", - print_dict is_pseudo_fun NOBR (Dict ([], Dict_Const ((tyco, super_class), x))) - ]; - fun print_classparam_instance ((classparam, (const, _)), (thm, _)) = - concat [ - (str o Long_Name.base_name o deresolve_const) classparam, - str "=", - print_app (K false) (SOME thm) reserved NOBR (const, []) - ]; - in pair - (print_val_decl print_dicttypscheme - (Class_Instance inst, (vs, (class, tyco `%% map (ITyVar o fst) vs)))) - (concat ( - str definer - :: (str o deresolve_inst) inst - :: (if is_pseudo_fun (Class_Instance inst) then [str "()"] - else print_dict_args vs) - @ str "=" - :: enum "," "{" "}" - (map print_super_instance superinsts - @ map print_classparam_instance inst_params) - :: str ":" - @@ print_dicttyp (class, tyco `%% map (ITyVar o fst) vs) - )) - end; - fun print_stmt _ (ML_Exc (const, (vs_ty, n))) = pair - [print_val_decl print_typscheme (Constant const, vs_ty)] - ((semicolon o map str) ( - (if n = 0 then "val" else "fun") - :: deresolve_const const - :: replicate n "_" - @ "=" - :: "raise" - :: "Fail" - @@ print_string_any_ml const - )) - | print_stmt _ (ML_Val binding) = - let - val (sig_p, p) = print_def (K false) true "val" binding - in pair - [sig_p] - (semicolon [p]) - end - | print_stmt _ (ML_Funs ((export, binding) :: exports_bindings, pseudo_funs)) = - let - val print_def' = print_def (member (op =) pseudo_funs) false; - fun print_pseudo_fun sym = concat [ - str "val", - (str o deresolve) sym, - str "=", - (str o deresolve) sym, - str "();" - ]; - val (sig_ps, (ps, p)) = (apsnd split_last o split_list) - (print_def' "fun" binding :: map (print_def' "and" o snd) exports_bindings); - val pseudo_ps = map print_pseudo_fun pseudo_funs; - in pair - (map_filter (fn (export, p) => if Code_Namespace.not_private export then SOME p else NONE) - ((export :: map fst exports_bindings) ~~ sig_ps)) - (Pretty.chunks (ps @ semicolon [p] :: pseudo_ps)) - end - | print_stmt _ (ML_Datas [(tyco, (vs, []))]) = - let - val ty_p = print_tyco_expr (Type_Constructor tyco, map ITyVar vs); - in - pair - [concat [str "type", ty_p]] - (semicolon [str "datatype", ty_p, str "=", str "EMPTY__"]) - end - | print_stmt export (ML_Datas (data :: datas)) = - let - val decl_ps = print_datatype_decl "datatype" data - :: map (print_datatype_decl "and") datas; - val (ps, p) = split_last decl_ps; - in pair - (if Code_Namespace.is_public export - then decl_ps - else map (fn (tyco, (vs, _)) => - concat [str "type", print_tyco_expr (Type_Constructor tyco, map ITyVar vs)]) - (data :: datas)) - (Pretty.chunks (ps @| semicolon [p])) - end - | print_stmt export (ML_Class (class, (v, (classrels, classparams)))) = - let - fun print_field s p = concat [str s, str ":", p]; - fun print_proj s p = semicolon - (map str ["val", s, "=", "#" ^ s, ":"] @| p); - fun print_super_class_decl (classrel as (_, super_class)) = - print_val_decl print_dicttypscheme - (Class_Relation classrel, ([(v, [class])], (super_class, ITyVar v))); - fun print_super_class_field (classrel as (_, super_class)) = - print_field (deresolve_classrel classrel) (print_dicttyp (super_class, ITyVar v)); - fun print_super_class_proj (classrel as (_, super_class)) = - print_proj (deresolve_classrel classrel) - (print_dicttypscheme ([(v, [class])], (super_class, ITyVar v))); - fun print_classparam_decl (classparam, ty) = - print_val_decl print_typscheme - (Constant classparam, ([(v, [class])], ty)); - fun print_classparam_field (classparam, ty) = - print_field (deresolve_const classparam) (print_typ NOBR ty); - fun print_classparam_proj (classparam, ty) = - print_proj (deresolve_const classparam) - (print_typscheme ([(v, [class])], ty)); - in pair - (concat [str "type", print_dicttyp (class, ITyVar v)] - :: (if Code_Namespace.is_public export - then map print_super_class_decl classrels - @ map print_classparam_decl classparams - else [])) - (Pretty.chunks ( - concat [ - str "type", - print_dicttyp (class, ITyVar v), - str "=", - enum "," "{" "};" ( - map print_super_class_field classrels - @ map print_classparam_field classparams - ) - ] - :: map print_super_class_proj classrels - @ map print_classparam_proj classparams - )) - end; - in print_stmt end; - -fun print_sml_module name decls body = - Pretty.chunks2 ( - Pretty.chunks [ - str ("structure " ^ name ^ " : sig"), - (indent 2 o Pretty.chunks) decls, - str "end = struct" - ] - :: body - @| str ("end; (*struct " ^ name ^ "*)") - ); - -val literals_sml = Literals { - literal_char = prefix "#" o quote o ML_Syntax.print_char, - literal_string = print_string_any_ml, - literal_numeral = fn k => "(" ^ string_of_int k ^ " : IntInf.int)", - literal_list = enum "," "[" "]", - infix_cons = (7, "::") -}; - - -(** OCaml serializer **) - -fun print_ocaml_stmt tyco_syntax const_syntax reserved is_constr deresolve = - let - val deresolve_const = deresolve o Constant; - val deresolve_class = deresolve o Type_Class; - val deresolve_classrel = deresolve o Class_Relation; - val deresolve_inst = deresolve o Class_Instance; - fun print_tyco_expr (sym, []) = (str o deresolve) sym - | print_tyco_expr (sym, [ty]) = - concat [print_typ BR ty, (str o deresolve) sym] - | print_tyco_expr (sym, tys) = - concat [enum "," "(" ")" (map (print_typ BR) tys), (str o deresolve) sym] - and print_typ fxy (tyco `%% tys) = (case tyco_syntax tyco - of NONE => print_tyco_expr (Type_Constructor tyco, tys) - | SOME (_, print) => print print_typ fxy tys) - | print_typ fxy (ITyVar v) = str ("'" ^ v); - fun print_dicttyp (class, ty) = print_tyco_expr (Type_Class class, [ty]); - fun print_typscheme_prefix (vs, p) = enum " ->" "" "" - (map_filter (fn (v, sort) => - (print_product (fn class => print_dicttyp (class, ITyVar v)) sort)) vs @| p); - fun print_typscheme (vs, ty) = print_typscheme_prefix (vs, print_typ NOBR ty); - fun print_dicttypscheme (vs, class_ty) = print_typscheme_prefix (vs, print_dicttyp class_ty); - val print_classrels = - fold_rev (fn classrel => fn p => Pretty.block [p, str ".", (str o deresolve_classrel) classrel]) - fun print_dict is_pseudo_fun fxy (Dict (classrels, x)) = - print_plain_dict is_pseudo_fun fxy x - |> print_classrels classrels - and print_plain_dict is_pseudo_fun fxy (Dict_Const (inst, dss)) = - brackify BR ((str o deresolve_inst) inst :: - (if is_pseudo_fun (Class_Instance inst) then [str "()"] - else map_filter (print_dicts is_pseudo_fun BR) dss)) - | print_plain_dict is_pseudo_fun fxy (Dict_Var (v, (i, k))) = - str (if k = 1 then "_" ^ Name.enforce_case true v - else "_" ^ Name.enforce_case true v ^ string_of_int (i+1)) - and print_dicts is_pseudo_fun = tuplify (print_dict is_pseudo_fun); - val print_dict_args = map_filter (fn (v, sort) => print_dicts (K false) BR - (map_index (fn (i, _) => Dict ([], Dict_Var (v, (i, length sort)))) sort)); - fun print_term is_pseudo_fun some_thm vars fxy (IConst const) = - print_app is_pseudo_fun some_thm vars fxy (const, []) - | print_term is_pseudo_fun some_thm vars fxy (IVar NONE) = - str "_" - | print_term is_pseudo_fun some_thm vars fxy (IVar (SOME v)) = - str (lookup_var vars v) - | print_term is_pseudo_fun some_thm vars fxy (t as t1 `$ t2) = - (case Code_Thingol.unfold_const_app t - of SOME app => print_app is_pseudo_fun some_thm vars fxy app - | NONE => brackify fxy [print_term is_pseudo_fun some_thm vars NOBR t1, - print_term is_pseudo_fun some_thm vars BR t2]) - | print_term is_pseudo_fun some_thm vars fxy (t as _ `|=> _) = - let - val (binds, t') = Code_Thingol.unfold_pat_abs t; - val (ps, vars') = fold_map (print_bind is_pseudo_fun some_thm BR o fst) binds vars; - in brackets (str "fun" :: ps @ str "->" @@ print_term is_pseudo_fun some_thm vars' NOBR t') end - | print_term is_pseudo_fun some_thm vars fxy (ICase case_expr) = - (case Code_Thingol.unfold_const_app (#primitive case_expr) - of SOME (app as ({ sym = Constant const, ... }, _)) => - if is_none (const_syntax const) - then print_case is_pseudo_fun some_thm vars fxy case_expr - else print_app is_pseudo_fun some_thm vars fxy app - | NONE => print_case is_pseudo_fun some_thm vars fxy case_expr) - and print_app_expr is_pseudo_fun some_thm vars (app as ({ sym, dicts = dss, dom = dom, ... }, ts)) = - if is_constr sym then - let val k = length dom in - if length ts = k - then (str o deresolve) sym - :: the_list (tuplify (print_term is_pseudo_fun some_thm vars) BR ts) - else [print_term is_pseudo_fun some_thm vars BR (Code_Thingol.eta_expand k app)] - end - else if is_pseudo_fun sym - then (str o deresolve) sym @@ str "()" - else (str o deresolve) sym :: map_filter (print_dicts is_pseudo_fun BR) dss - @ map (print_term is_pseudo_fun some_thm vars BR) ts - and print_app is_pseudo_fun some_thm vars = gen_print_app (print_app_expr is_pseudo_fun) - (print_term is_pseudo_fun) const_syntax some_thm vars - and print_bind is_pseudo_fun = gen_print_bind (print_term is_pseudo_fun) - and print_case is_pseudo_fun some_thm vars fxy { clauses = [], ... } = - (concat o map str) ["failwith", "\"empty case\""] - | print_case is_pseudo_fun some_thm vars fxy (case_expr as { clauses = [_], ... }) = - let - val (binds, body) = Code_Thingol.unfold_let (ICase case_expr); - fun print_let ((pat, _), t) vars = - vars - |> print_bind is_pseudo_fun some_thm NOBR pat - |>> (fn p => concat - [str "let", p, str "=", print_term is_pseudo_fun some_thm vars NOBR t, str "in"]) - val (ps, vars') = fold_map print_let binds vars; - in - brackets [Pretty.chunks ps, print_term is_pseudo_fun some_thm vars' NOBR body] - end - | print_case is_pseudo_fun some_thm vars fxy { term = t, typ = ty, clauses = clause :: clauses, ... } = - let - fun print_select delim (pat, body) = - let - val (p, vars') = print_bind is_pseudo_fun some_thm NOBR pat vars; - in concat [str delim, p, str "->", print_term is_pseudo_fun some_thm vars' NOBR body] end; - in - brackets ( - str "match" - :: print_term is_pseudo_fun some_thm vars NOBR t - :: print_select "with" clause - :: map (print_select "|") clauses - ) - end; - fun print_val_decl print_typscheme (sym, typscheme) = concat - [str "val", str (deresolve sym), str ":", print_typscheme typscheme]; - fun print_datatype_decl definer (tyco, (vs, cos)) = - let - fun print_co ((co, _), []) = str (deresolve_const co) - | print_co ((co, _), tys) = concat [str (deresolve_const co), str "of", - enum " *" "" "" (map (print_typ (INFX (2, X))) tys)]; - in - concat ( - str definer - :: print_tyco_expr (Type_Constructor tyco, map ITyVar vs) - :: str "=" - :: separate (str "|") (map print_co cos) - ) - end; - fun print_def is_pseudo_fun needs_typ definer - (ML_Function (const, (vs_ty as (vs, ty), eqs))) = - let - fun print_eqn ((ts, t), (some_thm, _)) = - let - val vars = reserved - |> intro_base_names_for (is_none o const_syntax) - deresolve (t :: ts) - |> intro_vars ((fold o Code_Thingol.fold_varnames) - (insert (op =)) ts []); - in concat [ - (Pretty.block o commas) - (map (print_term is_pseudo_fun some_thm vars NOBR) ts), - str "->", - print_term is_pseudo_fun some_thm vars NOBR t - ] end; - fun print_eqns is_pseudo [((ts, t), (some_thm, _))] = - let - val vars = reserved - |> intro_base_names_for (is_none o const_syntax) - deresolve (t :: ts) - |> intro_vars ((fold o Code_Thingol.fold_varnames) - (insert (op =)) ts []); - in - concat ( - (if is_pseudo then [str "()"] - else map (print_term is_pseudo_fun some_thm vars BR) ts) - @ str "=" - @@ print_term is_pseudo_fun some_thm vars NOBR t - ) - end - | print_eqns _ ((eq as (([_], _), _)) :: eqs) = - Pretty.block ( - str "=" - :: Pretty.brk 1 - :: str "function" - :: Pretty.brk 1 - :: print_eqn eq - :: maps (append [Pretty.fbrk, str "|", Pretty.brk 1] - o single o print_eqn) eqs - ) - | print_eqns _ (eqs as eq :: eqs') = - let - val vars = reserved - |> intro_base_names_for (is_none o const_syntax) - deresolve (map (snd o fst) eqs) - val dummy_parms = (map str o aux_params vars o map (fst o fst)) eqs; - in - Pretty.block ( - Pretty.breaks dummy_parms - @ Pretty.brk 1 - :: str "=" - :: Pretty.brk 1 - :: str "match" - :: Pretty.brk 1 - :: (Pretty.block o commas) dummy_parms - :: Pretty.brk 1 - :: str "with" - :: Pretty.brk 1 - :: print_eqn eq - :: maps (append [Pretty.fbrk, str "|", Pretty.brk 1] - o single o print_eqn) eqs' - ) - end; - val prolog = if needs_typ then - concat [str definer, (str o deresolve_const) const, str ":", print_typ NOBR ty] - else (concat o map str) [definer, deresolve_const const]; - in pair - (print_val_decl print_typscheme (Constant const, vs_ty)) - (concat ( - prolog - :: print_dict_args vs - @| print_eqns (is_pseudo_fun (Constant const)) eqs - )) - end - | print_def is_pseudo_fun _ definer - (ML_Instance (inst as (tyco, class), { vs, superinsts, inst_params, ... })) = - let - fun print_super_instance (super_class, x) = - concat [ - (str o deresolve_classrel) (class, super_class), - str "=", - print_dict is_pseudo_fun NOBR (Dict ([], Dict_Const ((tyco, super_class), x))) - ]; - fun print_classparam_instance ((classparam, (const, _)), (thm, _)) = - concat [ - (str o deresolve_const) classparam, - str "=", - print_app (K false) (SOME thm) reserved NOBR (const, []) - ]; - in pair - (print_val_decl print_dicttypscheme - (Class_Instance inst, (vs, (class, tyco `%% map (ITyVar o fst) vs)))) - (concat ( - str definer - :: (str o deresolve_inst) inst - :: (if is_pseudo_fun (Class_Instance inst) then [str "()"] - else print_dict_args vs) - @ str "=" - @@ brackets [ - enum_default "()" ";" "{" "}" (map print_super_instance superinsts - @ map print_classparam_instance inst_params), - str ":", - print_dicttyp (class, tyco `%% map (ITyVar o fst) vs) - ] - )) - end; - fun print_stmt _ (ML_Exc (const, (vs_ty, n))) = pair - [print_val_decl print_typscheme (Constant const, vs_ty)] - ((doublesemicolon o map str) ( - "let" - :: deresolve_const const - :: replicate n "_" - @ "=" - :: "failwith" - @@ ML_Syntax.print_string const - )) - | print_stmt _ (ML_Val binding) = - let - val (sig_p, p) = print_def (K false) true "let" binding - in pair - [sig_p] - (doublesemicolon [p]) - end - | print_stmt _ (ML_Funs ((export, binding) :: exports_bindings, pseudo_funs)) = - let - val print_def' = print_def (member (op =) pseudo_funs) false; - fun print_pseudo_fun sym = concat [ - str "let", - (str o deresolve) sym, - str "=", - (str o deresolve) sym, - str "();;" - ]; - val (sig_ps, (ps, p)) = (apsnd split_last o split_list) - (print_def' "let rec" binding :: map (print_def' "and" o snd) exports_bindings); - val pseudo_ps = map print_pseudo_fun pseudo_funs; - in pair - (map_filter (fn (export, p) => if Code_Namespace.not_private export then SOME p else NONE) - ((export :: map fst exports_bindings) ~~ sig_ps)) - (Pretty.chunks (ps @ doublesemicolon [p] :: pseudo_ps)) - end - | print_stmt _ (ML_Datas [(tyco, (vs, []))]) = - let - val ty_p = print_tyco_expr (Type_Constructor tyco, map ITyVar vs); - in - pair - [concat [str "type", ty_p]] - (doublesemicolon [str "type", ty_p, str "=", str "EMPTY__"]) - end - | print_stmt export (ML_Datas (data :: datas)) = - let - val decl_ps = print_datatype_decl "type" data - :: map (print_datatype_decl "and") datas; - val (ps, p) = split_last decl_ps; - in pair - (if Code_Namespace.is_public export - then decl_ps - else map (fn (tyco, (vs, _)) => - concat [str "type", print_tyco_expr (Type_Constructor tyco, map ITyVar vs)]) - (data :: datas)) - (Pretty.chunks (ps @| doublesemicolon [p])) - end - | print_stmt export (ML_Class (class, (v, (classrels, classparams)))) = - let - fun print_field s p = concat [str s, str ":", p]; - fun print_super_class_field (classrel as (_, super_class)) = - print_field (deresolve_classrel classrel) (print_dicttyp (super_class, ITyVar v)); - fun print_classparam_decl (classparam, ty) = - print_val_decl print_typscheme - (Constant classparam, ([(v, [class])], ty)); - fun print_classparam_field (classparam, ty) = - print_field (deresolve_const classparam) (print_typ NOBR ty); - val w = "_" ^ Name.enforce_case true v; - fun print_classparam_proj (classparam, _) = - (concat o map str) ["let", deresolve_const classparam, w, "=", - w ^ "." ^ deresolve_const classparam ^ ";;"]; - val type_decl_p = concat [ - str "type", - print_dicttyp (class, ITyVar v), - str "=", - enum_default "unit" ";" "{" "}" ( - map print_super_class_field classrels - @ map print_classparam_field classparams - ) - ]; - in pair - (if Code_Namespace.is_public export - then type_decl_p :: map print_classparam_decl classparams - else [concat [str "type", print_dicttyp (class, ITyVar v)]]) - (Pretty.chunks ( - doublesemicolon [type_decl_p] - :: map print_classparam_proj classparams - )) - end; - in print_stmt end; - -fun print_ocaml_module name decls body = - Pretty.chunks2 ( - Pretty.chunks [ - str ("module " ^ name ^ " : sig"), - (indent 2 o Pretty.chunks) decls, - str "end = struct" - ] - :: body - @| str ("end;; (*struct " ^ name ^ "*)") - ); - -val literals_ocaml = let - fun chr i = - let - val xs = string_of_int i; - val ys = replicate_string (3 - length (raw_explode xs)) "0"; - in "\\" ^ ys ^ xs end; - fun char_ocaml c = - let - val i = ord c; - val s = if i < 32 orelse i = 34 orelse i = 39 orelse i = 92 orelse i > 126 - then chr i else c - in s end; - fun numeral_ocaml k = if k < 0 - then "(Big_int.minus_big_int " ^ numeral_ocaml (~ k) ^ ")" - else if k <= 1073741823 - then "(Big_int.big_int_of_int " ^ string_of_int k ^ ")" - else "(Big_int.big_int_of_string " ^ quote (string_of_int k) ^ ")" -in Literals { - literal_char = Library.enclose "'" "'" o char_ocaml, - literal_string = quote o translate_string char_ocaml, - literal_numeral = numeral_ocaml, - literal_list = enum ";" "[" "]", - infix_cons = (6, "::") -} end; - - - -(** SML/OCaml generic part **) - -fun ml_program_of_program ctxt module_name reserved identifiers = - let - fun namify_const upper base (nsp_const, nsp_type) = - let - val (base', nsp_const') = Name.variant (Name.enforce_case upper base) nsp_const - in (base', (nsp_const', nsp_type)) end; - fun namify_type base (nsp_const, nsp_type) = - let - val (base', nsp_type') = Name.variant (Name.enforce_case false base) nsp_type - in (base', (nsp_const, nsp_type')) end; - fun namify_stmt (Code_Thingol.Fun _) = namify_const false - | namify_stmt (Code_Thingol.Datatype _) = namify_type - | namify_stmt (Code_Thingol.Datatypecons _) = namify_const true - | namify_stmt (Code_Thingol.Class _) = namify_type - | namify_stmt (Code_Thingol.Classrel _) = namify_const false - | namify_stmt (Code_Thingol.Classparam _) = namify_const false - | namify_stmt (Code_Thingol.Classinst _) = namify_const false; - fun ml_binding_of_stmt (sym as Constant const, (export, Code_Thingol.Fun ((tysm as (vs, ty), raw_eqs), _))) = - let - val eqs = filter (snd o snd) raw_eqs; - val (eqs', some_sym) = if null (filter_out (null o snd) vs) then case eqs - of [(([], t), some_thm)] => if (not o null o fst o Code_Thingol.unfold_fun) ty - then ([(([IVar (SOME "x")], t `$ IVar (SOME "x")), some_thm)], NONE) - else (eqs, SOME (sym, member (op =) (Code_Thingol.add_constsyms t []) sym)) - | _ => (eqs, NONE) - else (eqs, NONE) - in ((export, ML_Function (const, (tysm, eqs'))), some_sym) end - | ml_binding_of_stmt (sym as Class_Instance inst, (export, Code_Thingol.Classinst (stmt as { vs, ... }))) = - ((export, ML_Instance (inst, stmt)), - if forall (null o snd) vs then SOME (sym, false) else NONE) - | ml_binding_of_stmt (sym, _) = - error ("Binding block containing illegal statement: " ^ - Code_Symbol.quote ctxt sym) - fun modify_fun (sym, (export, stmt)) = - let - val ((export', binding), some_value_sym) = ml_binding_of_stmt (sym, (export, stmt)); - val ml_stmt = case binding - of ML_Function (const, ((vs, ty), [])) => - ML_Exc (const, ((vs, ty), - (length o filter_out (null o snd)) vs + (length o fst o Code_Thingol.unfold_fun) ty)) - | _ => case some_value_sym - of NONE => ML_Funs ([(export', binding)], []) - | SOME (sym, true) => ML_Funs ([(export, binding)], [sym]) - | SOME (sym, false) => ML_Val binding - in SOME (export, ml_stmt) end; - fun modify_funs stmts = single (SOME - (Code_Namespace.Opaque, ML_Funs (map_split ml_binding_of_stmt stmts |> (apsnd o map_filter o Option.map) fst))) - fun modify_datatypes stmts = - map_filter - (fn (Type_Constructor tyco, (export, Code_Thingol.Datatype stmt)) => SOME (export, (tyco, stmt)) | _ => NONE) stmts - |> split_list - |> apfst Code_Namespace.join_exports - |> apsnd ML_Datas - |> SOME - |> single; - fun modify_class stmts = - the_single (map_filter - (fn (Type_Class class, (export, Code_Thingol.Class stmt)) => SOME (export, (class, stmt)) | _ => NONE) stmts) - |> apsnd ML_Class - |> SOME - |> single; - fun modify_stmts ([stmt as (_, (_, stmt' as Code_Thingol.Fun _))]) = - if Code_Thingol.is_case stmt' then [] else [modify_fun stmt] - | modify_stmts ((stmts as (_, (_, Code_Thingol.Fun _)) :: _)) = - modify_funs (filter_out (Code_Thingol.is_case o snd o snd) stmts) - | modify_stmts ((stmts as (_, (_, Code_Thingol.Datatypecons _)) :: _)) = - modify_datatypes stmts - | modify_stmts ((stmts as (_, (_, Code_Thingol.Datatype _)) :: _)) = - modify_datatypes stmts - | modify_stmts ((stmts as (_, (_, Code_Thingol.Class _)) :: _)) = - modify_class stmts - | modify_stmts ((stmts as (_, (_, Code_Thingol.Classrel _)) :: _)) = - modify_class stmts - | modify_stmts ((stmts as (_, (_, Code_Thingol.Classparam _)) :: _)) = - modify_class stmts - | modify_stmts ([stmt as (_, (_, Code_Thingol.Classinst _))]) = - [modify_fun stmt] - | modify_stmts ((stmts as (_, (_, Code_Thingol.Classinst _)) :: _)) = - modify_funs stmts - | modify_stmts stmts = error ("Illegal mutual dependencies: " ^ - (Library.commas o map (Code_Symbol.quote ctxt o fst)) stmts); - in - Code_Namespace.hierarchical_program ctxt { - module_name = module_name, reserved = reserved, identifiers = identifiers, - empty_nsp = (reserved, reserved), namify_module = pair, namify_stmt = namify_stmt, - cyclic_modules = false, class_transitive = true, - class_relation_public = true, empty_data = (), - memorize_data = K I, modify_stmts = modify_stmts } - end; - -fun serialize_ml print_ml_module print_ml_stmt ctxt - { module_name, reserved_syms, identifiers, includes, - class_syntax, tyco_syntax, const_syntax } exports program = - let - - (* build program *) - val { deresolver, hierarchical_program = ml_program } = - ml_program_of_program ctxt module_name (Name.make_context reserved_syms) - identifiers exports program; - - (* print statements *) - fun print_stmt prefix_fragments (_, (export, stmt)) = print_ml_stmt - tyco_syntax const_syntax (make_vars reserved_syms) - (Code_Thingol.is_constr program) (deresolver prefix_fragments) export stmt - |> apfst (fn decl => if Code_Namespace.not_private export then SOME decl else NONE); - - (* print modules *) - fun print_module _ base _ xs = - let - val (raw_decls, body) = split_list xs; - val decls = maps these raw_decls - in (NONE, print_ml_module base decls body) end; - - (* serialization *) - val p = Pretty.chunks2 (map snd includes - @ map snd (Code_Namespace.print_hierarchical { - print_module = print_module, print_stmt = print_stmt, - lift_markup = apsnd } ml_program)); - fun write width NONE = writeln o format [] width - | write width (SOME p) = File.write p o format [] width; - fun prepare syms width p = ([("", format syms width p)], try (deresolver [])); - in - Code_Target.serialization write prepare p - end; - -val serializer_sml : Code_Target.serializer = - Code_Target.parse_args (Scan.succeed ()) #> K (serialize_ml print_sml_module print_sml_stmt); - -val serializer_ocaml : Code_Target.serializer = - Code_Target.parse_args (Scan.succeed ()) #> K (serialize_ml print_ocaml_module print_ocaml_stmt); - - -(** Isar setup **) - -fun fun_syntax print_typ fxy [ty1, ty2] = - brackify_infix (1, R) fxy ( - print_typ (INFX (1, X)) ty1, - str "->", - print_typ (INFX (1, R)) ty2 - ); - -(* -val _ = Theory.setup - (Code_Target.add_language - (target_SML, { serializer = serializer_sml, literals = literals_sml, - check = { env_var = "ISABELLE_PROCESS", - make_destination = fn p => Path.append p (Path.explode "ROOT.ML"), - make_command = fn _ => - "\"$ISABELLE_PROCESS\" -r -q -e 'datatype ref = datatype Unsynchronized.ref; use \"ROOT.ML\" handle _ => exit 1' Pure" } }) - #> Code_Target.add_language - (target_OCaml, { serializer = serializer_ocaml, literals = literals_ocaml, - check = { env_var = "ISABELLE_OCAML", - make_destination = fn p => Path.append p (Path.explode "ROOT.ocaml"), - make_command = fn _ => "\"$ISABELLE_OCAML\" -w pu nums.cma ROOT.ocaml" } }) - #> Code_Target.set_printings (Type_Constructor ("fun", - [(target_SML, SOME (2, fun_syntax)), (target_OCaml, SOME (2, fun_syntax))])) - #> fold (Code_Target.add_reserved target_SML) ML_Syntax.reserved_names - #> fold (Code_Target.add_reserved target_SML) - ["ref" (*rebinding is illegal*), "o" (*dictionary projections use it already*), - "Fail", "div", "mod" (*standard infixes*), "IntInf"] - #> fold (Code_Target.add_reserved target_OCaml) [ - "and", "as", "assert", "begin", "class", - "constraint", "do", "done", "downto", "else", "end", "exception", - "external", "false", "for", "fun", "function", "functor", "if", - "in", "include", "inherit", "initializer", "lazy", "let", "match", "method", - "module", "mutable", "new", "object", "of", "open", "or", "private", "rec", - "sig", "struct", "then", "to", "true", "try", "type", "val", - "virtual", "when", "while", "with" - ] - #> fold (Code_Target.add_reserved target_OCaml) ["failwith", "mod", "Big_int"]); -*) -end; (*struct*) diff --git a/src/test-gen/src/main/codegen_gdb/Code_gdb_script.thy b/src/test-gen/src/main/codegen_gdb/Code_gdb_script.thy deleted file mode 100644 index 115354a..0000000 --- a/src/test-gen/src/main/codegen_gdb/Code_gdb_script.thy +++ /dev/null @@ -1,322 +0,0 @@ -theory Code_gdb_script -imports Main "../TestLib" -(*keywords "gen_gdb_script" :: "qed_global"*) -begin - -datatype gdb_comand = - break string gdb_comand - | commands gdb_comand - | silent gdb_comand - | continue gdb_comand - | thread gdb_comand - | "end" gdb_comand - | sharp string - |start - -datatype gdb_option = - logging gdb_option - |on - |off - |pagination gdb_option - |"file" string - |print gdb_option - - - -subsection {*writing on file using Isabelle/ML*} -ML{* - val file_path_try = "../../add-ons/OS-IFP-test/OS_kernel_model/IPC/example_gdb_impl/c/yakoub.gdb" - |> Path.explode - |> Path.append (Resources.master_directory @{theory }); - val file_check = file_path_try |> File.exists; - (*val file_write = File.write file_path_office "#yakoub";*) - -*} - -(*Generation of a set of gdb files*) - -ML{* - fun writeFiles _ _ [] = [] - | writeFiles filePath fileExtension (gdb_script :: gdb_script_list) = - ([filePath] @ [(gdb_script :: gdb_script_list) |> length |> Int.toString] @ - [fileExtension] |> String.concat |> Path.explode |> File.write_list) gdb_script:: - writeFiles filePath fileExtension gdb_script_list; - *} -(*master parth*) -ML{* (*Thy_Load.master_directory*) - Resources.master_directory @{theory}; - *} -ML {*Resources.master_directory @{theory}; - fun masterPath_add theory Path = Path - |> Path.explode - |> Path.append (Resources.master_directory theory) - |> Path.implode; - *} - -subsection {*Printing a list of terms in column using Pretty*} -ML{* - fun pretty_terms' context terms = terms |> (Syntax.pretty_term context - |> List.map) - |> Pretty.chunks; - - Pretty.writeln (pretty_terms' @{context} [@{term "2::int"}, @{term "2::int"}]); - *} - -subsection {*Going from a list of terms to ASCII string*} -ML {*(*fun render_thm ctxt thm = - Print_Mode.setmp ["xsymbols"] - (fn _ => Display.pretty_thm ctxt thm - |> Pretty.str_of - |> YXML.parse_body - |> XML.content_of) (); - render_thm @{context} @{thm "conjI"};*) - fun render_term ctxt term = - Print_Mode.setmp ["xsymbols"] - (fn _ => Syntax.pretty_term ctxt term - |> Pretty.string_of - |> YXML.parse_body - |> XML.content_of) (); - - render_term @{context} @{term "1::int"}; - - fun render_term_list ctxt term = - Print_Mode.setmp ["xsymbols"] - (fn _ => pretty_terms' ctxt term - |> Pretty.string_of - |> YXML.parse_body - |> XML.content_of) (); - render_term_list @{context} [@{term "1::int"}, @{term "1::int"}]; -*} - -subsection {*GDB terms script to control scheduler*} - -ML {*val gdb_header = - @{term "''#setting gdb options''"} $ @{term "''{''"} $ - @{term "set"} $ @{term "logging (file ''Example_sequential.log'')"} $ @{term "''{''"} $ - @{term "set"} $ @{term "logging on"} $ @{term "''{''"} $ - @{term "set"} $ @{term "pagination off"} $ @{term "''{''"} $ - @{term "set ''target-async''"} $ @{term " on"} $ @{term "''{''"} $ - @{term "set ''non-stop''"} $ @{term " on"} $ @{term "''{''"} $ - @{term "set ''print thread-events off''"} $ @{term "''{''"} $ @{term "''{''"} - ; - - fun gdb_break_point_entry fun_nam_term thread_id_term = - @{term "''#setting thread entry''"} $ @{term "''{''"} $ - @{term "break"} $ fun_nam_term $ @{term "''{''"} $ - @{term "commands"} $ @{term "''{''"} $ - @{term "silent"} $ @{term "''{''"} $ - @{term "thread"} $ thread_id_term $ @{term "''{''"} $ - @{term "continue"} $ @{term "''{''"} $ - @{term "end"} $ @{term "''{''"} $ @{term "''{''"}; - - - fun gdb_break_point_exist line_number_term thread_id_term = - @{term "''#setting thread exit''"} $ @{term "''{''"} $ - @{term "break"} $ line_number_term $ @{term "''{''"} $ - @{term "commands"} $ @{term "''{''"} $ - @{term "silent"} $ @{term "''{''"} $ - @{term "thread"} $ thread_id_term $ @{term "''{''"} $ - @{term "continue"} $ @{term "''{''"} $ - @{term "end"} $ @{term "''{''"} $ @{term "''{''"}; - - fun gdb_break_main_entry fun_nam_term = - @{term "''#setting main thread entry''"} $ @{term "''{''"} $ - @{term "break"} $ fun_nam_term $ @{term "''{''"} $ - @{term "commands"} $ @{term "''{''"} $ - @{term "silent"} $ @{term "''{''"} $ - @{term "set"} $ @{term "''scheduler-locking''"} $ @{term " on"} $ @{term "''{''"} $ - @{term "continue"} $ @{term "''{''"} $ - @{term "end"} $ @{term "''{''"} $ @{term "''{''"}; - - fun gdb_break_main_exit line_number_term thread_id_term = - @{term "''#wait for thread creation''"} $ @{term "''{''"} $ - @{term "break"} $ line_number_term $ @{term "''{''"} $ - @{term "commands"} $ @{term "''{''"} $ - @{term "silent"} $ @{term "''{''"} $ - @{term "thread"} $ thread_id_term $ @{term "''{''"} $ - @{term "continue"} $ @{term "''{''"} $ - @{term "end"} $ @{term "''{''"} $ @{term "''{''"}; - - val gdb_start_term = @{term "start"} $ @{term "''{''"}; - - val gdb_endFile = @{term "''#endFile''"} - -*} - -ML {* gdb_header*} - -subsection {*removing quotes and parentheses from ASCII string*} -ML {* fun remove_char nil = [] - | remove_char (x::xs) = (if ((x = #"(" orelse x = #")") orelse x = #"'") - then remove_char xs - else x::remove_char xs); - *} - -subsection {*Jump to the next line*} - -ML {* fun next_line nil = [] - | next_line (x::xs) = (if x = #"{" - then next_line (#"\n"::xs) - else x::next_line xs); - *} - -subsection {*Going from a simple list to a list of terms*} - -ML {*render_term_list @{context} [@{term " ''{''"}]*} - -subsection {*Terms constructors and scheme destructors*} - -ML{* - fun thm_to_term thm = thm - |> Thm.concl_of |> HOLogic.dest_Trueprop; - fun thms_to_terms thms = thms - |> (thm_to_term |> map); - - fun dest_valid_SE_term terms = terms |> ((fn term => case term of - ((Const(@{const_name "valid_SE"},_) $ _) - $(Const(@{const_name "bind_SE"},_) $ T $ _)) => T - | _ => term) - |> map); - - fun dest_mbind_term terms = terms |> ((fn term => case term of - Const (@{const_name "mbind"}, _) - $ LIST $ _ => LIST - |_ => term ) - |> map); - - fun dest_mbind_term' terms = terms |> ((fn term => case term of - Const (@{const_name "mbind'"}, _) - $ LIST $ _ => LIST - |_ => term ) - |> map); - - fun dest_List_term terms = terms |> ((fn term => HOLogic.dest_list term) |> map); - - *} - -subsection {*From a test thm to terms of input sequences*} - -ML {*fun thm_to_inputSeqTerms test_facts = - test_facts - |> thms_to_terms |> dest_valid_SE_term - |> dest_mbind_term |> dest_List_term; - - fun thm_to_inputSeqTerms' test_facts = - test_facts - |> thms_to_terms |> dest_valid_SE_term - |> dest_mbind_term' |> dest_List_term; - *} -subsection {*from input seuquences to strings*} - -ML {* fun inputSeq_to_gdbStrings actTerm_to_gdbTerm inputSeqTerms = - inputSeqTerms - |> ((fn terms => [gdb_header] - @(terms |> (actTerm_to_gdbTerm |> map)) - @[gdb_start_term] - |> (render_term @{context} |> map)) - |> map); - - fun - breakpoint_setup (term::terms) = - ((term::terms) |> length) :: (terms |> breakpoint_setup) ; - - *} - -ML {*open List*} -ML {*open HOLogic;*} -subsection {*from sequeces of strings to a gdb script*} - -ML {* fun gdbStrings_to_gdbScripts gdbStrings = - gdbStrings - |> ((fn strings => strings - |> (String.implode o next_line o - remove_char o String.explode |> map)) - |> map); - *} - - -subsection{*concat terms*} -ML {* -fun add_entry_exist_terms [] [] = [] - | add_entry_exist_terms terms [] = terms - | add_entry_exist_terms [] terms = terms - | add_entry_exist_terms (term :: terms) (term'::terms') = - term $ term':: add_entry_exist_terms terms terms'; - - fun add_entry_exist_termsS [] [] = [] - | add_entry_exist_termsS termsS [] = termsS - | add_entry_exist_termsS [] termsS = termsS - | add_entry_exist_termsS (terms :: termsS) (terms'::termsS') = - add_entry_exist_terms terms terms'::add_entry_exist_termsS termsS termsS'; - - fun add_entry_exist_termsS' [] [] = [] - | add_entry_exist_termsS' termsS [] = termsS - | add_entry_exist_termsS' [] termsS = termsS - | add_entry_exist_termsS' (terms :: termsS) (terms'::termsS') = - (terms @ terms')::add_entry_exist_termsS' termsS termsS'; - -*} - -subsection {*from thms to gdb scripts*} - -ML {* -fun thms_to_gdbScripts inputSeq_to_gdbEn inputSeq_to_gdbEx inputSeq_to_gdbMain infos thms = - thms - |> thm_to_inputSeqTerms - |> ((fn terms => inputSeq_to_gdbMain infos terms) |> map) - |> add_entry_exist_termsS' - (thms |> thm_to_inputSeqTerms |> ((fn terms => inputSeq_to_gdbEx infos terms)|> map)) - |> add_entry_exist_termsS - (thms |> thm_to_inputSeqTerms |> ((fn terms => inputSeq_to_gdbEn infos terms)|> map)) - |> inputSeq_to_gdbStrings (fn term => term) - |> gdbStrings_to_gdbScripts; - -fun thms_to_gdbScripts' inputSeq_to_gdbEn inputSeq_to_gdbEx inputSeq_to_gdbMain infos thms = - thms - |> thm_to_inputSeqTerms' - |> ((fn terms => inputSeq_to_gdbMain infos terms) |> map) - |> add_entry_exist_termsS' - (thms |> thm_to_inputSeqTerms' |> ((fn terms => inputSeq_to_gdbEx infos terms)|> map)) - |> add_entry_exist_termsS - (thms |> thm_to_inputSeqTerms' |> ((fn terms => inputSeq_to_gdbEn infos terms)|> map)) - |> inputSeq_to_gdbStrings (fn term => term) - |> gdbStrings_to_gdbScripts; - -*} - - - -subsection {*isa markup*} - -ML {* - - fun gen_gdb_scripts - inputSeq_to_gdbEn inputSeq_to_gdbEx inputSeq_to_gdbMain infos theory path thms = - thms - |> thms_to_gdbScripts inputSeq_to_gdbEn inputSeq_to_gdbEx inputSeq_to_gdbMain infos - |> writeFiles (path |> masterPath_add theory) ".gdb"; - - - (*For mbind'*) - fun gen_gdb_scripts' - inputSeq_to_gdbEn inputSeq_to_gdbEx inputSeq_to_gdbMain infos theory path thms = - thms - |> thms_to_gdbScripts' inputSeq_to_gdbEn inputSeq_to_gdbEx inputSeq_to_gdbMain infos - |> writeFiles (path |> masterPath_add theory) ".gdb"; - - - (* val _ = Outer_Syntax.command - @{command_spec "gen_gdb_script"} - "store test state (theorem)" - ;*) - - (*For mbind*) - - (*val gen_gdb_script = @{thms mykeos_simple.test_data} - |> thm_to_inputSeqTerms - |> inputSeq_to_gdbStrings actTerm_to_gdbTerm - |> gdbStrings_to_gdbScripts*) - -*} - -end diff --git a/src/test-gen/src/main/config.sml b/src/test-gen/src/main/config.sml deleted file mode 100644 index 9e7fa19..0000000 --- a/src/test-gen/src/main/config.sml +++ /dev/null @@ -1,93 +0,0 @@ -(***************************************************************************** - * W A R N I N G - * - * Information in this file will be updated by the packaging process, - * respectively the owner of the build process. - * Please, DO NOT EDIT THIS FILE MANUALLY. - ******************************************************************************) - -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * config.sml --- main configuration file for HOL-TestGen - * This file is part of HOL-TestGen. - * - * Copyright (c) 2005-2007 ETH Zurich, Switzerland - * 2013 Achim D. Brucker, Germany - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) - - -(** Major version number. - * - * Modify when incompatible changes are made to published interfaces. - *) -val testgen_ver_major = 1 - -(* Minor version number. - * - * Modify when new functionality is added or new interfaces are defined, - * but all changes are backward compatible. - *) -val testgen_ver_minor = 9 - -(** Patch number. - * - * Modify for every released patch. - *) -val testgen_ver_micro= 1 - -(** Version tag: a string describing the version. - * - * This tag remains " (dev build: )" in the repository so that we can - * always see from "version" that the software has been built from the - * repository rather than a "blessed" distribution. - * - * When rolling a tarball, we automatically replace this text with - * " (build: " for final releases; in prereleases, it becomes - * " (Alpha: )", " (Beta )", etc., as appropriate. - *) -val testgen_ver_tag = " (development build)" - -(** Supported Isabelle version. - * - * If build with a different version, a warning during the build - * process is displayed. - *) - -val isabelle_version = "Isabelle2016-1: December 2016" - -(** URL of the HOL-TestGen Homepage. - * - *) -val testgen_url = "http://www.brucker.ch/projects/hol-testgen/" diff --git a/src/test-gen/src/main/debug/profiling_begin.thy b/src/test-gen/src/main/debug/profiling_begin.thy deleted file mode 100644 index b002a26..0000000 --- a/src/test-gen/src/main/debug/profiling_begin.thy +++ /dev/null @@ -1,49 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * profiling_begin.thy --- workaround for profiling - * This file is part of HOL-TestGen. - * - * Copyright (c) 2005-2007, ETH Zurich, Switzerland - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) - -theory - profiling_begin -imports - Testing -begin - -declare [[testgen_profiling]] - -end diff --git a/src/test-gen/src/main/debug/profiling_end.thy b/src/test-gen/src/main/debug/profiling_end.thy deleted file mode 100644 index a48e5b3..0000000 --- a/src/test-gen/src/main/debug/profiling_end.thy +++ /dev/null @@ -1,51 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * profiling_end.thy --- workaround for profiling - * This file is part of HOL-TestGen. - * - * Copyright (c) 2005-2007, ETH Zurich, Switzerland - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) - -theory - profiling_end -imports - max_test List_test List_Verified_test Triangle_test AVL_test RBT_test Sequence_test -begin - -write_clocks "document/time_stats.tex" - -declare [[testgen_profiling = false]] - -end \ No newline at end of file diff --git a/src/test-gen/src/main/isar_setup.ML b/src/test-gen/src/main/isar_setup.ML deleted file mode 100644 index 4cef168..0000000 --- a/src/test-gen/src/main/isar_setup.ML +++ /dev/null @@ -1,178 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * isar_setup.sml --- Isar setup for HOL-TestGen - * This file is part of HOL-TestGen. - * - * Copyright (c) 2005-2007 ETH Zurich, Switzerland - * 2008-2013 Achim D. Brucker, Germany - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id: isar_setup.ML 9653 2013-04-17 23:17:08Z abderrahmane.feliachi $ *) - -(* at the moment, calling store_test_thm without the optional argument works only - in an ProofGeneral session because otherweise the call of Toplevel.pretty_state - results in an empty list \ -*) - -fun store_test_thm name = Toplevel.end_proof (fn _ => fn state => - let - val ctxt = Proof.context_of state; - val _ = if Config.get ctxt TestGen.profiling then - (Clocks.start_clock "unnamed_test_thm"; - Clocks.rename_clock "unnamed_test_thm" name; - Clocks.stop_clock name) - else - () - - val _ = Proof.assert_backward state; - val {context = goal_ctxt, goal, ...} = Proof.goal state - fun up_thy thy = - let - fun add_to_dm name thm thy = - let - val thy = TestEnv.map_data_global (TestEnv.add_test_case (name,thm)) thy - val t = LogThy.get_tc_delta () - val num_of_tc = List.length - (List.filter (fn tc => TestGen.is_test tc) (prems_of thm)) - val _ = writeln (String.concat ["Test theorem (gen_test_cases) '",name,"': ", - Int.toString num_of_tc, - " test cases in ",Time.toString t, " seconds"]) - - val _ = LogThy.append (String.concat [Context.theory_name thy, ", " - ,name, ", " - ,"test case, " - ,Int.toString num_of_tc,", " - , Time.toString t,"\n"]) - in - thy - end - in - thy |> Sign.add_path (space_implode "_" [name]) - |> (Global_Theory.add_thms [((@{binding test_thm}, goal), [])]) - |> snd - |> Sign.parent_path - |> add_to_dm (space_implode "_" [name]) goal - |> Clocks.flush_clocks - end - - in goal_ctxt |> Local_Theory.background_theory up_thy end); - -(*val _ = - Outer_Syntax.command ("store_test_thm", (Keyword.tag_proof Keyword.qed)) "store test state (theorem)" - (Parse.name >> store_test_thm);*) - -val _ = - Outer_Syntax.command @{command_spec "store_test_thm"} "store test state (theorem)" - (Parse.name >> store_test_thm); - - - - - - -(**********************) -fun gen_test_dataT name thy = - let - fun gen_test_data name = - let - val _ = LogThy.start_td_timer () - val _ = if Config.get_global thy TestGen.profiling then - (Clocks.init_clocks thy; - Clocks.start_clock name; - Clocks.start_clock "gen_test_data") - else - () - - val thy = Context.theory_map (TestGen.gen_test_data name) thy - val t = LogThy.get_td_delta () - val thy = if Config.get_global thy TestGen.profiling then - (Clocks.stop_clock "gen_test_data"; - Clocks.stop_clock name; - Clocks.flush_clocks thy) - else - thy - - val data = TestGen.get_test_data thy name - val hyps = TestGen.get_test_hyps thy name - val pos = TestGen.get_pos thy name - val thy = Sign.add_path (space_implode "_" [name]) thy; - val thy = snd(Global_Theory.add_thmss [((@{binding test_hyps},hyps),[])] (thy)); - val thy = snd(Global_Theory.add_thmss [((@{binding test_data},data),[])] (thy)) - val thy = snd(Global_Theory.add_thmss [((@{binding pos},pos),[])] (thy)) - val _ = writeln (String.concat ["Test theorem (gen_test_data) '",name,"': ", - Int.toString (List.length data), - " test cases in ",Time.toString t, " seconds"]) - val _ = if not (null pos) then - writeln ("Warning: There were unsolved POs.") - else - () - - val _ = LogThy.append (String.concat [Context.theory_name thy, ", " - ,name, ", " - ,"test data, " - ,Int.toString (List.length data),", " - ,Time.toString t,"\n"]) - - val thy = Sign.parent_path thy; - in - thy - end - val thy = gen_test_data name - in - thy - end - -(*val _ = - Outer_Syntax.command ("gen_test_data", Keyword.thy_script) "generate test data" - (Parse.name >> (Toplevel.theory o gen_test_dataT));*) - -val _ = - Outer_Syntax.command @{command_spec "gen_test_data"} "generate test data" - (Parse.name >> (Toplevel.theory o gen_test_dataT)); - -(**********************) - -val _ = - (* Outer_Syntax.local_theory_to_proof ("test_spec", Keyword.thy_schematic_goal) "define test specification"*) - Outer_Syntax.local_theory_to_proof @{command_spec "test_spec"} "define test specification" - (Scan.optional (Parse_Spec.opt_thm_name ":" --| - Scan.ahead (Parse_Spec.includes >> K "" || - Parse_Spec.locale_keyword || Parse_Spec.statement_keyword)) Attrib.empty_binding -- - Scan.optional Parse_Spec.includes [] -- - Parse_Spec.general_statement >> (fn ((a, includes), (elems, concl)) => fn lthy => - let val _ = if Config.get lthy TestGen.profiling then Clocks.init_clocks (Proof_Context.theory_of lthy) else () - in - Specification.schematic_theorem_cmd "test_spec" NONE (K I) a includes elems concl false lthy - end)); - diff --git a/src/test-gen/src/main/log.thy b/src/test-gen/src/main/log.thy deleted file mode 100644 index dab6bd7..0000000 --- a/src/test-gen/src/main/log.thy +++ /dev/null @@ -1,132 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * log_thy.thy --- Simple Logging Framework for HOL-TestGen - * This file is part of HOL-TestGen. - * - * Copyright (c) 2005-2009 ETH Zurich, Switzerland - * 2009-2013 Achim D. Brucker, Germany - * 2010-2013 Université Paris-Sud, France - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id: log.thy 11032 2015-01-04 10:02:45Z wolff $ *) - -theory - log -imports - version -begin - -text{* Attempts/Elements for a reform: replace the ref's by a proper Isabelle state- -management. by bu*} -ML {* val tc_timer = Attrib.setup_config_int @{binding tc_timer} (K 0) *} -ML_val {* @{assert} (Config.get @{context} tc_timer = 0) *} -ML{* Config.put tc_timer 4 @{context} *} -ML{* - -type timer_config = {tc_timer : Time.time, - spec_time : Time.time, - td_time : Time.time, - log_file : string} - -(* val tc_timer_raw = Config.declare_option "tc_timer"; -ROOT.ML:val quick_and_dirty = Config.bool quick_and_dirty_raw; -goal.ML: if Config.get ctxt quick_and_dirty then *) -*} - -ML{* open Config ; Int 3; *} -(* Conclusion - bu: must be done with a global functor instatiation on - timer_config. I temporarily leave the ref's here in order not - to break to many interfaces ... bu -*) - -text{* Back to the original ...*} - -ML {* -structure LogThy = -struct - - -val tc_timer = Unsynchronized.ref (Timer.startRealTimer ()) -val spec_time = Unsynchronized.ref (Timer.checkRealTimer (!tc_timer)) - -val td_time = Unsynchronized.ref (Timer.checkRealTimer (!tc_timer)) - -val log_file = Unsynchronized.ref ""; - -fun start () = (spec_time := Timer.checkRealTimer (!tc_timer)) - -fun get_tc_delta () = Time.-(Timer.checkRealTimer (!tc_timer),!spec_time) -fun get_td_delta () = Time.-(Timer.checkRealTimer (!tc_timer),!td_time) - -fun start_td_timer () = (td_time := Timer.checkRealTimer (!tc_timer)) - - -fun set_log_file ctxt n = let - val _ = if Config.get ctxt quick_and_dirty - then () - else ((log_file := n);()) - val today = (Date.toString(Date.fromTimeUniv (Time.now())))^" (UTC)"; - val hostname = the_default "hostname not set" (OS.Process.getEnv "HOSTNAME"); -in - if (!log_file) = "" - then () - else - File.write (Path.explode (!log_file)) - ( "# This file was generated automatically \n" - ^"# by HOL-TestGen "^testgen_version^"\n" - ^"# on "^today^"\n" - ^"# Host: "^hostname^"\n" - ^"# \n" - ^"# theory, test case name, type, num. of tests cases/data, time in seconds\n") -end - -fun append s = if (!log_file) = "" then () else File.append (Path.explode (!log_file)) s - - - -fun reset_log_file ctxt = set_log_file ctxt "" - -fun log_thy ctxt thy = -let - val _ = set_log_file ctxt (thy^".csv") - val _ = use_thy thy - val _ = reset_log_file ctxt -in () end; - -end - -val log_thy = LogThy.log_thy - -*} -end diff --git a/src/test-gen/src/main/new_smt_patch/smt_config_patch.ML b/src/test-gen/src/main/new_smt_patch/smt_config_patch.ML deleted file mode 100644 index f994e2f..0000000 --- a/src/test-gen/src/main/new_smt_patch/smt_config_patch.ML +++ /dev/null @@ -1,265 +0,0 @@ -(* Title: HOL/Tools/SMT/smt_config.ML - Author: Sascha Boehme, TU Muenchen - -Configuration options and diagnostic tools for SMT. -*) - -signature SMT_CONFIG = -sig - (*solver*) - type solver_info = { - name: string, - class: Proof.context -> SMT_Util.class, - avail: unit -> bool, - options: Proof.context -> string list } - val add_solver: solver_info -> Context.generic -> Context.generic - val set_solver_options: string * string -> Context.generic -> Context.generic - val is_available: Proof.context -> string -> bool - val available_solvers_of: Proof.context -> string list - val select_solver: string -> Context.generic -> Context.generic - val solver_of: Proof.context -> string - val solver_class_of: Proof.context -> SMT_Util.class - val solver_options_of: Proof.context -> string list - - (*options*) - val oracle: bool Config.T - val timeout: real Config.T - val reconstruction_step_timeout: real Config.T - val random_seed: int Config.T - val read_only_certificates: bool Config.T - val verbose: bool Config.T - val trace: bool Config.T - val statistics: bool Config.T - val monomorph_limit: int Config.T - val monomorph_instances: int Config.T - val infer_triggers: bool Config.T - val debug_files: string Config.T - val sat_solver: string Config.T - - (*tools*) - val with_time_limit: Proof.context -> real Config.T -> ('a -> 'b) -> 'a -> 'b - val with_timeout: Proof.context -> ('a -> 'b) -> 'a -> 'b - - (*diagnostics*) - val trace_msg: Proof.context -> ('a -> string) -> 'a -> unit - val verbose_msg: Proof.context -> ('a -> string) -> 'a -> unit - val statistics_msg: Proof.context -> ('a -> string) -> 'a -> unit - - (*certificates*) - val select_certificates: string -> Context.generic -> Context.generic - val certificates_of: Proof.context -> Cache_IO.cache option - - (*setup*) - val print_setup: Proof.context -> unit -end; - -structure SMT_Config: SMT_CONFIG = -struct - -(* solver *) - -type solver_info = { - name: string, - class: Proof.context -> SMT_Util.class, - avail: unit -> bool, - options: Proof.context -> string list} - -type data = { - solvers: (solver_info * string list) Symtab.table, - solver: string option, - certs: Cache_IO.cache option} - -fun mk_data solvers solver certs: data = {solvers=solvers, solver=solver, certs=certs} - -val empty_data = mk_data Symtab.empty NONE NONE - -fun solvers_of ({solvers, ...}: data) = solvers -fun solver_of ({solver, ...}: data) = solver -fun certs_of ({certs, ...}: data) = certs - -fun map_solvers f ({solvers, solver, certs}: data) = mk_data (f solvers) solver certs -fun map_solver f ({solvers, solver, certs}: data) = mk_data solvers (f solver) certs -fun put_certs c ({solvers, solver, ...}: data) = mk_data solvers solver c - -fun merge_data ({solvers=ss1,solver=s1,certs=c1}: data, {solvers=ss2,solver=s2,certs=c2}: data) = - mk_data (Symtab.merge (K true) (ss1, ss2)) (merge_options (s1, s2)) (merge_options (c1, c2)) - -structure Data = Generic_Data -( - type T = data - val empty = empty_data - val extend = I - val merge = merge_data -) - -fun set_solver_options (name, options) = - let val opts = String.tokens (Symbol.is_ascii_blank o str) options - in Data.map (map_solvers (Symtab.map_entry name (apsnd (K opts)))) end - -fun add_solver (info as {name, ...} : solver_info) context = - if Symtab.defined (solvers_of (Data.get context)) name then - error ("Solver already registered: " ^ quote name) - else - context - |> Data.map (map_solvers (Symtab.update (name, (info, [])))) - |> Context.map_theory (Attrib.setup (Binding.name (name ^ "_options")) - (Scan.lift (@{keyword "="} |-- Args.name) >> - (Thm.declaration_attribute o K o set_solver_options o pair name)) - ("additional command line options for SMT solver " ^ quote name)) - -fun all_solvers_of ctxt = Symtab.keys (solvers_of (Data.get (Context.Proof ctxt))) - -fun solver_name_of ctxt = solver_of (Data.get (Context.Proof ctxt)) - -fun is_available ctxt name = - (case Symtab.lookup (solvers_of (Data.get (Context.Proof ctxt))) name of - SOME ({avail, ...}, _) => avail () - | NONE => false) - -fun available_solvers_of ctxt = - filter (is_available ctxt) (all_solvers_of ctxt) - -fun warn_solver (Context.Proof ctxt) name = - if Context_Position.is_visible ctxt then - warning ("The SMT solver " ^ quote name ^ " is not installed") - else () - | warn_solver _ _ = () - -fun select_solver name context = - let - val ctxt = Context.proof_of context - val upd = Data.map (map_solver (K (SOME name))) - in - if not (member (op =) (all_solvers_of ctxt) name) then - error ("Trying to select unknown solver: " ^ quote name) - else if not (is_available ctxt name) then - (warn_solver context name; upd context) - else upd context - end - -fun no_solver_err () = error "No SMT solver selected" - -fun solver_of ctxt = - (case solver_name_of ctxt of - SOME name => name - | NONE => no_solver_err ()) - -fun solver_info_of default select ctxt = - (case solver_name_of ctxt of - NONE => default () - | SOME name => select (Symtab.lookup (solvers_of (Data.get (Context.Proof ctxt))) name)) - -fun solver_class_of ctxt = - let fun class_of ({class, ...}: solver_info, _) = class ctxt - in solver_info_of no_solver_err (class_of o the) ctxt end - -fun solver_options_of ctxt = - let - fun all_options NONE = [] - | all_options (SOME ({options, ...} : solver_info, opts)) = - opts @ options ctxt - in solver_info_of (K []) all_options ctxt end - -val setup_solver = - Attrib.setup @{binding smt_solver} - (Scan.lift (@{keyword "="} |-- Args.name) >> - (Thm.declaration_attribute o K o select_solver)) - "SMT solver configuration" - - -(* options *) - -val oracle = Attrib.setup_config_bool @{binding smt_oracle} (K true) -val timeout = Attrib.setup_config_real @{binding smt_timeout} (K 30.0) -val reconstruction_step_timeout = Attrib.setup_config_real @{binding smt_reconstruction_step_timeout} (K 10.0) -val random_seed = Attrib.setup_config_int @{binding smt_random_seed} (K 1) -val read_only_certificates = Attrib.setup_config_bool @{binding smt_read_only_certificates} (K false) -val verbose = Attrib.setup_config_bool @{binding smt_verbose} (K true) -val trace = Attrib.setup_config_bool @{binding smt_trace} (K false) -val statistics = Attrib.setup_config_bool @{binding smt_statistics} (K false) -val monomorph_limit = Attrib.setup_config_int @{binding smt_monomorph_limit} (K 10) -val monomorph_instances = Attrib.setup_config_int @{binding smt_monomorph_instances} (K 500) -val infer_triggers = Attrib.setup_config_bool @{binding smt_infer_triggers} (K false) -val debug_files = Attrib.setup_config_string @{binding smt_debug_files} (K "") -val sat_solver = Attrib.setup_config_string @{binding smt_sat_solver} (K "cdclite") - - -(* diagnostics *) - -fun cond_trace flag f x = if flag then tracing ("SMT: " ^ f x) else () - -fun verbose_msg ctxt = cond_trace (Config.get ctxt verbose) -fun trace_msg ctxt = cond_trace (Config.get ctxt trace) -fun statistics_msg ctxt = cond_trace (Config.get ctxt statistics) - - -(* tools *) - -fun with_time_limit ctxt timeout_config f x = - Timeout.apply (seconds (Config.get ctxt timeout_config)) f x - handle Timeout.TIMEOUT t => raise SMT_Failure.SMT SMT_Failure.Time_Out - -fun with_timeout ctxt = with_time_limit ctxt timeout - - -(* certificates *) - -val certificates_of = certs_of o Data.get o Context.Proof - -val get_certificates_path = Option.map (Cache_IO.cache_path_of) o certificates_of - -fun select_certificates name context = context |> Data.map (put_certs ( - if name = "" then NONE - else - Path.explode name - |> Path.append (Resources.master_directory (Context.theory_of context)) - |> SOME o Cache_IO.unsynchronized_init)) - -val setup_certificates = - Attrib.setup @{binding smt_certificates} - (Scan.lift (@{keyword "="} |-- Args.name) >> - (Thm.declaration_attribute o K o select_certificates)) - "SMT certificates configuration" - - -(* setup *) - -val _ = Theory.setup ( - setup_solver #> - setup_certificates) - -fun print_setup ctxt = - let - fun string_of_bool b = if b then "true" else "false" - - val names = available_solvers_of ctxt - val ns = if null names then ["(none)"] else sort_strings names - val n = the_default "(none)" (solver_name_of ctxt) - val opts = solver_options_of ctxt - - val t = string_of_real (Config.get ctxt timeout) - - val certs_filename = - (case get_certificates_path ctxt of - SOME path => Path.print path - | NONE => "(disabled)") - in - Pretty.writeln (Pretty.big_list "SMT setup:" [ - Pretty.str ("Current SMT solver: " ^ n), - Pretty.str ("Current SMT solver options: " ^ space_implode " " opts), - Pretty.str_list "Available SMT solvers: " "" ns, - Pretty.str ("Current timeout: " ^ t ^ " seconds"), - Pretty.str ("With proofs: " ^ - string_of_bool (not (Config.get ctxt oracle))), - Pretty.str ("Certificates cache: " ^ certs_filename), - Pretty.str ("Fixed certificates: " ^ - string_of_bool (Config.get ctxt read_only_certificates))]) - end - -val _ = - Outer_Syntax.command @{command_keyword smt_status_patch} - "show the available SMT solvers, the currently selected SMT solver, \ - \and the values of SMT configuration options" - (Scan.succeed (Toplevel.keep (print_setup o Toplevel.context_of))) - -end; diff --git a/src/test-gen/src/main/new_smt_patch/smt_normalize_patch.ML b/src/test-gen/src/main/new_smt_patch/smt_normalize_patch.ML deleted file mode 100644 index d0e27fc..0000000 --- a/src/test-gen/src/main/new_smt_patch/smt_normalize_patch.ML +++ /dev/null @@ -1,556 +0,0 @@ -(* Title: HOL/Tools/SMT/smt_normalize.ML - Author: Sascha Boehme, TU Muenchen - -Normalization steps on theorems required by SMT solvers. -*) - -signature SMT_NORMALIZE = -sig - val drop_fact_warning: Proof.context -> thm -> unit - val atomize_conv: Proof.context -> conv - - val special_quant_table: (string * thm) list - val case_bool_entry: string * thm - val abs_min_max_table: (string * thm) list - - type extra_norm = Proof.context -> thm list * thm list -> thm list * thm list - val add_extra_norm: SMT_Util.class * extra_norm -> Context.generic -> Context.generic - val normalize: Proof.context -> thm list -> (int * thm) list -end; - -structure SMT_Normalize: SMT_NORMALIZE = -struct - -fun drop_fact_warning ctxt = - SMT_Config.verbose_msg ctxt (prefix "Warning: dropping assumption: " o - Thm.string_of_thm ctxt) - - -(* general theorem normalizations *) - -(** instantiate elimination rules **) - -local - val (cpfalse, cfalse) = `SMT_Util.mk_cprop (Thm.cterm_of @{context} @{const False}) - - fun inst f ct thm = - let val cv = f (Drule.strip_imp_concl (Thm.cprop_of thm)) - in Thm.instantiate ([], [(dest_Var (Thm.term_of cv), ct)]) thm end -in - -fun instantiate_elim thm = - (case Thm.concl_of thm of - @{const Trueprop} $ Var (_, @{typ bool}) => inst Thm.dest_arg cfalse thm - | Var _ => inst I cpfalse thm - | _ => thm) - -end - - -(** normalize definitions **) - -fun norm_def thm = - (case Thm.prop_of thm of - @{const Trueprop} $ (Const (@{const_name HOL.eq}, _) $ _ $ Abs _) => - norm_def (thm RS @{thm fun_cong}) - | Const (@{const_name Pure.eq}, _) $ _ $ Abs _ => norm_def (thm RS @{thm meta_eq_to_obj_eq}) - | _ => thm) - - -(** atomization **) - -fun atomize_conv ctxt ct = - (case Thm.term_of ct of - @{const Pure.imp} $ _ $ _ => - Conv.binop_conv (atomize_conv ctxt) then_conv Conv.rewr_conv @{thm atomize_imp} - | Const (@{const_name Pure.eq}, _) $ _ $ _ => - Conv.binop_conv (atomize_conv ctxt) then_conv Conv.rewr_conv @{thm atomize_eq} - | Const (@{const_name Pure.all}, _) $ Abs _ => - Conv.binder_conv (atomize_conv o snd) ctxt then_conv Conv.rewr_conv @{thm atomize_all} - | _ => Conv.all_conv) ct - handle CTERM _ => Conv.all_conv ct - -val setup_atomize = - fold SMT_Builtin.add_builtin_fun_ext'' [@{const_name Pure.imp}, @{const_name Pure.eq}, - @{const_name Pure.all}, @{const_name Trueprop}] - - -(** unfold special quantifiers **) - -val special_quant_table = [ - (@{const_name Ex1}, @{thm Ex1_def_raw}), - (@{const_name Ball}, @{thm Ball_def_raw}), - (@{const_name Bex}, @{thm Bex_def_raw})] - -local - fun special_quant (Const (n, _)) = AList.lookup (op =) special_quant_table n - | special_quant _ = NONE - - fun special_quant_conv _ ct = - (case special_quant (Thm.term_of ct) of - SOME thm => Conv.rewr_conv thm - | NONE => Conv.all_conv) ct -in - -fun unfold_special_quants_conv ctxt = - SMT_Util.if_exists_conv (is_some o special_quant) (Conv.top_conv special_quant_conv ctxt) - -val setup_unfolded_quants = fold (SMT_Builtin.add_builtin_fun_ext'' o fst) special_quant_table - -end - - -(** trigger inference **) - -local - (*** check trigger syntax ***) - - fun dest_trigger (Const (@{const_name pat}, _) $ _) = SOME true - | dest_trigger (Const (@{const_name nopat}, _) $ _) = SOME false - | dest_trigger _ = NONE - - fun eq_list [] = false - | eq_list (b :: bs) = forall (equal b) bs - - fun proper_trigger t = - t - |> these o try SMT_Util.dest_symb_list - |> map (map_filter dest_trigger o these o try SMT_Util.dest_symb_list) - |> (fn [] => false | bss => forall eq_list bss) - - fun proper_quant inside f t = - (case t of - Const (@{const_name All}, _) $ Abs (_, _, u) => proper_quant true f u - | Const (@{const_name Ex}, _) $ Abs (_, _, u) => proper_quant true f u - | @{const trigger} $ p $ u => - (if inside then f p else false) andalso proper_quant false f u - | Abs (_, _, u) => proper_quant false f u - | u1 $ u2 => proper_quant false f u1 andalso proper_quant false f u2 - | _ => true) - - fun check_trigger_error ctxt t = - error ("SMT triggers must only occur under quantifier and multipatterns " ^ - "must have the same kind: " ^ Syntax.string_of_term ctxt t) - - fun check_trigger_conv ctxt ct = - if proper_quant false proper_trigger (SMT_Util.term_of ct) then Conv.all_conv ct - else check_trigger_error ctxt (Thm.term_of ct) - - - (*** infer simple triggers ***) - - fun dest_cond_eq ct = - (case Thm.term_of ct of - Const (@{const_name HOL.eq}, _) $ _ $ _ => Thm.dest_binop ct - | @{const HOL.implies} $ _ $ _ => dest_cond_eq (Thm.dest_arg ct) - | _ => raise CTERM ("no equation", [ct])) - - fun get_constrs thy (Type (n, _)) = these (BNF_LFP_Compat.get_constrs thy n) - | get_constrs _ _ = [] - - fun is_constr thy (n, T) = - let fun match (m, U) = m = n andalso Sign.typ_instance thy (T, U) - in can (the o find_first match o get_constrs thy o Term.body_type) T end - - fun is_constr_pat thy t = - (case Term.strip_comb t of - (Free _, []) => true - | (Const c, ts) => is_constr thy c andalso forall (is_constr_pat thy) ts - | _ => false) - - fun is_simp_lhs ctxt t = - (case Term.strip_comb t of - (Const c, ts as _ :: _) => - not (SMT_Builtin.is_builtin_fun_ext ctxt c ts) andalso - forall (is_constr_pat (Proof_Context.theory_of ctxt)) ts - | _ => false) - - fun has_all_vars vs t = - subset (op aconv) (vs, map Free (Term.add_frees t [])) - - fun minimal_pats vs ct = - if has_all_vars vs (Thm.term_of ct) then - (case Thm.term_of ct of - _ $ _ => - (case apply2 (minimal_pats vs) (Thm.dest_comb ct) of - ([], []) => [[ct]] - | (ctss, ctss') => union (eq_set (op aconvc)) ctss ctss') - | _ => []) - else [] - - fun proper_mpat _ _ _ [] = false - | proper_mpat thy gen u cts = - let - val tps = (op ~~) (`gen (map Thm.term_of cts)) - fun some_match u = tps |> exists (fn (t', t) => - Pattern.matches thy (t', u) andalso not (t aconv u)) - in not (Term.exists_subterm some_match u) end - - val pat = SMT_Util.mk_const_pat @{theory} @{const_name pat} SMT_Util.destT1 - fun mk_pat ct = Thm.apply (SMT_Util.instT' ct pat) ct - - fun mk_clist T = - apply2 (Thm.cterm_of @{context}) (SMT_Util.symb_cons_const T, SMT_Util.symb_nil_const T) - fun mk_list (ccons, cnil) f cts = fold_rev (Thm.mk_binop ccons o f) cts cnil - val mk_pat_list = mk_list (mk_clist @{typ pattern}) - val mk_mpat_list = mk_list (mk_clist @{typ "pattern symb_list"}) - fun mk_trigger ctss = mk_mpat_list (mk_pat_list mk_pat) ctss - - val trigger_eq = mk_meta_eq @{lemma "p = trigger t p" by (simp add: trigger_def)} - - fun insert_trigger_conv [] ct = Conv.all_conv ct - | insert_trigger_conv ctss ct = - let - val (ctr, cp) = Thm.dest_binop (Thm.rhs_of trigger_eq) ||> rpair ct - val inst = map (apfst (dest_Var o Thm.term_of)) [cp, (ctr, mk_trigger ctss)] - in Thm.instantiate ([], inst) trigger_eq end - - fun infer_trigger_eq_conv outer_ctxt (ctxt, cvs) ct = - let - val (lhs, rhs) = dest_cond_eq ct - - val vs = map Thm.term_of cvs - val thy = Proof_Context.theory_of ctxt - - fun get_mpats ct = - if is_simp_lhs ctxt (Thm.term_of ct) then minimal_pats vs ct - else [] - val gen = Variable.export_terms ctxt outer_ctxt - val filter_mpats = filter (proper_mpat thy gen (Thm.term_of rhs)) - - in insert_trigger_conv (filter_mpats (get_mpats lhs)) ct end - - fun has_trigger (@{const trigger} $ _ $ _) = true - | has_trigger _ = false - - fun try_trigger_conv cv ct = - if SMT_Util.under_quant has_trigger (SMT_Util.term_of ct) then Conv.all_conv ct - else Conv.try_conv cv ct - - fun infer_trigger_conv ctxt = - if Config.get ctxt SMT_Config.infer_triggers then - try_trigger_conv (SMT_Util.under_quant_conv (infer_trigger_eq_conv ctxt) ctxt) - else Conv.all_conv -in - -fun trigger_conv ctxt = - SMT_Util.prop_conv (check_trigger_conv ctxt then_conv infer_trigger_conv ctxt) - -val setup_trigger = - fold SMT_Builtin.add_builtin_fun_ext'' - [@{const_name pat}, @{const_name nopat}, @{const_name trigger}] - -end - - -(** combined general normalizations **) - -fun gen_normalize1_conv ctxt = - atomize_conv ctxt then_conv - unfold_special_quants_conv ctxt then_conv - Thm.beta_conversion true then_conv - trigger_conv ctxt - -fun gen_normalize1 ctxt = - instantiate_elim #> - norm_def #> - Conv.fconv_rule (Thm.beta_conversion true then_conv Thm.eta_conversion) #> - Drule.forall_intr_vars #> - Conv.fconv_rule (gen_normalize1_conv ctxt) #> - (* Z3 4.3.1 silently normalizes "P --> Q --> R" to "P & Q --> R" *) - Raw_Simplifier.rewrite_rule ctxt @{thms HOL.imp_conjL[symmetric, THEN eq_reflection]} - -fun gen_norm1_safe ctxt (i, thm) = - (case try (gen_normalize1 ctxt) thm of - SOME thm' => SOME (i, thm') - | NONE => (drop_fact_warning ctxt thm; NONE)) - -fun gen_normalize ctxt iwthms = map_filter (gen_norm1_safe ctxt) iwthms - - -(* unfolding of definitions and theory-specific rewritings *) - -fun expand_head_conv cv ct = - (case Thm.term_of ct of - _ $ _ => - Conv.fun_conv (expand_head_conv cv) then_conv - Conv.try_conv (Thm.beta_conversion false) - | _ => cv) ct - - -(** rewrite bool case expressions as if expressions **) - -val case_bool_entry = (@{const_name "bool.case_bool"}, @{thm case_bool_if}) - -local - fun is_case_bool (Const (@{const_name "bool.case_bool"}, _)) = true - | is_case_bool _ = false - - fun unfold_conv _ = - SMT_Util.if_true_conv (is_case_bool o Term.head_of) - (expand_head_conv (Conv.rewr_conv @{thm case_bool_if})) -in - -fun rewrite_case_bool_conv ctxt = - SMT_Util.if_exists_conv is_case_bool (Conv.top_conv unfold_conv ctxt) - -val setup_case_bool = SMT_Builtin.add_builtin_fun_ext'' @{const_name "bool.case_bool"} - -end - - -(** unfold abs, min and max **) - -val abs_min_max_table = [ - (@{const_name min}, @{thm min_def_raw}), - (@{const_name max}, @{thm max_def_raw}), - (@{const_name abs}, @{thm abs_if_raw})] - -local - fun abs_min_max ctxt (Const (n, Type (@{type_name fun}, [T, _]))) = - (case AList.lookup (op =) abs_min_max_table n of - NONE => NONE - | SOME thm => if SMT_Builtin.is_builtin_typ_ext ctxt T then SOME thm else NONE) - | abs_min_max _ _ = NONE - - fun unfold_amm_conv ctxt ct = - (case abs_min_max ctxt (Term.head_of (Thm.term_of ct)) of - SOME thm => expand_head_conv (Conv.rewr_conv thm) - | NONE => Conv.all_conv) ct -in - -fun unfold_abs_min_max_conv ctxt = - SMT_Util.if_exists_conv (is_some o abs_min_max ctxt) (Conv.top_conv unfold_amm_conv ctxt) - -val setup_abs_min_max = fold (SMT_Builtin.add_builtin_fun_ext'' o fst) abs_min_max_table - -end - - -(** embedding of standard natural number operations into integer operations **) - -local - val nat_embedding = @{lemma - "ALL n. nat (int n) = n" - "ALL i. i >= 0 --> int (nat i) = i" - "ALL i. i < 0 --> int (nat i) = 0" - by simp_all} - - val simple_nat_ops = [ - @{const less (nat)}, @{const less_eq (nat)}, - @{const Suc}, @{const plus (nat)}, @{const minus (nat)}] - - val mult_nat_ops = - [@{const times (nat)}, @{const divide (nat)}, @{const modulo (nat)}] - - val nat_ops = simple_nat_ops @ mult_nat_ops - - val nat_consts = nat_ops @ [@{const numeral (nat)}, - @{const zero_class.zero (nat)}, @{const one_class.one (nat)}] - - val nat_int_coercions = [@{const of_nat (int)}, @{const nat}] - - val builtin_nat_ops = nat_int_coercions @ simple_nat_ops - - val is_nat_const = member (op aconv) nat_consts - - fun is_nat_const' @{const of_nat (int)} = true - | is_nat_const' t = is_nat_const t - - val expands = map mk_meta_eq @{lemma - "0 = nat 0" - "1 = nat 1" - "(numeral :: num => nat) = (%i. nat (numeral i))" - "op < = (%a b. int a < int b)" - "op <= = (%a b. int a <= int b)" - "Suc = (%a. nat (int a + 1))" - "op + = (%a b. nat (int a + int b))" - "op - = (%a b. nat (int a - int b))" - "op * = (%a b. nat (int a * int b))" - "op div = (%a b. nat (int a div int b))" - "op mod = (%a b. nat (int a mod int b))" - by (fastforce simp add: nat_mult_distrib nat_div_distrib nat_mod_distrib)+} - - val ints = map mk_meta_eq @{lemma - "int 0 = 0" - "int 1 = 1" - "int (Suc n) = int n + 1" - "int (n + m) = int n + int m" - "int (n - m) = int (nat (int n - int m))" - "int (n * m) = int n * int m" - "int (n div m) = int n div int m" - "int (n mod m) = int n mod int m" - by (auto simp add: zdiv_int zmod_int)} - - val int_if = mk_meta_eq @{lemma - "int (if P then n else m) = (if P then int n else int m)" - by simp} - - fun mk_number_eq ctxt i lhs = - let - val eq = SMT_Util.mk_cequals lhs (Numeral.mk_cnumber @{ctyp int} i) - val tac = - Simplifier.simp_tac (put_simpset HOL_ss ctxt addsimps [@{thm of_nat_numeral [where 'a=int]}]) 1 - in Goal.norm_result ctxt (Goal.prove_internal ctxt [] eq (K tac)) end - - fun ite_conv cv1 cv2 = - Conv.combination_conv (Conv.combination_conv (Conv.arg_conv cv1) cv2) cv2 - - fun int_conv ctxt ct = - (case Thm.term_of ct of - @{const of_nat (int)} $ (n as (@{const numeral (nat)} $ _)) => - Conv.rewr_conv (mk_number_eq ctxt (snd (HOLogic.dest_number n)) ct) - | @{const of_nat (int)} $ _ => - (Conv.rewrs_conv ints then_conv Conv.sub_conv ints_conv ctxt) else_conv - (Conv.rewr_conv int_if then_conv - ite_conv (nat_conv ctxt) (int_conv ctxt)) else_conv - Conv.sub_conv (Conv.top_sweep_conv nat_conv) ctxt - | _ => Conv.no_conv) ct - - and ints_conv ctxt = Conv.top_sweep_conv int_conv ctxt - - and expand_conv ctxt = - SMT_Util.if_conv (is_nat_const o Term.head_of) - (expand_head_conv (Conv.rewrs_conv expands) then_conv ints_conv ctxt) - (int_conv ctxt) - - and nat_conv ctxt = SMT_Util.if_exists_conv is_nat_const' - (Conv.top_sweep_conv expand_conv ctxt) - - val uses_nat_int = Term.exists_subterm (member (op aconv) nat_int_coercions) -in - -val nat_as_int_conv = nat_conv - -fun add_nat_embedding thms = - if exists (uses_nat_int o Thm.prop_of) thms then (thms, nat_embedding) - else (thms, []) - -val setup_nat_as_int = - SMT_Builtin.add_builtin_typ_ext (@{typ nat}, K true) #> - fold (SMT_Builtin.add_builtin_fun_ext' o Term.dest_Const) builtin_nat_ops - -end - - - -(** normalize numerals **) - -local - (* - rewrite Numeral1 into 1 - rewrite - 0 into 0 - *) - - fun is_irregular_number (Const (@{const_name numeral}, _) $ Const (@{const_name num.One}, _)) = - true - | is_irregular_number (Const (@{const_name uminus}, _) $ Const (@{const_name Groups.zero}, _)) = - true - | is_irregular_number _ = false - - fun is_strange_number ctxt t = is_irregular_number t andalso SMT_Builtin.is_builtin_num ctxt t - - val proper_num_ss = - simpset_of (put_simpset HOL_ss @{context} addsimps @{thms Num.numeral_One minus_zero}) - - fun norm_num_conv ctxt = - SMT_Util.if_conv (is_strange_number ctxt) (Simplifier.rewrite (put_simpset proper_num_ss ctxt)) - Conv.no_conv -in - -fun normalize_numerals_conv ctxt = - SMT_Util.if_exists_conv (is_strange_number ctxt) (Conv.top_sweep_conv norm_num_conv ctxt) - -end - - -(** combined unfoldings and rewritings **) - -fun unfold_conv ctxt = - rewrite_case_bool_conv ctxt then_conv - unfold_abs_min_max_conv ctxt then_conv - nat_as_int_conv ctxt then_conv - Thm.beta_conversion true - -fun burrow_ids f ithms = - let - val (is, thms) = split_list ithms - val (thms', extra_thms) = f thms - in (is ~~ thms') @ map (pair ~1) extra_thms end - -fun unfold_polymorph ctxt = map (apsnd (Conv.fconv_rule (unfold_conv ctxt))) -fun unfold_monomorph ctxt ithms = - ithms - |> map (apsnd (Conv.fconv_rule (normalize_numerals_conv ctxt))) - |> burrow_ids add_nat_embedding - - -(* overall normalization *) - -type extra_norm = Proof.context -> thm list * thm list -> thm list * thm list - -structure Extra_Norms = Generic_Data -( - type T = extra_norm SMT_Util.dict - val empty = [] - val extend = I - fun merge data = SMT_Util.dict_merge fst data -) - -fun add_extra_norm (cs, norm) = Extra_Norms.map (SMT_Util.dict_update (cs, norm)) - -fun apply_extra_norms ctxt ithms = - let - val cs = SMT_Config.solver_class_of ctxt - val es = SMT_Util.dict_lookup (Extra_Norms.get (Context.Proof ctxt)) cs - in burrow_ids (fold (fn e => e ctxt) es o rpair []) ithms end - -local - val ignored = member (op =) [@{const_name All}, @{const_name Ex}, - @{const_name Let}, @{const_name If}, @{const_name HOL.eq}] - - val schematic_consts_of = - let - fun collect (@{const trigger} $ p $ t) = collect_trigger p #> collect t - | collect (t $ u) = collect t #> collect u - | collect (Abs (_, _, t)) = collect t - | collect (t as Const (n, _)) = - if not (ignored n) then Monomorph.add_schematic_consts_of t else I - | collect _ = I - and collect_trigger t = - let val dest = these o try SMT_Util.dest_symb_list - in fold (fold collect_pat o dest) (dest t) end - and collect_pat (Const (@{const_name pat}, _) $ t) = collect t - | collect_pat (Const (@{const_name nopat}, _) $ t) = collect t - | collect_pat _ = I - in (fn t => collect t Symtab.empty) end -in - -fun monomorph ctxt xthms = - let val (xs, thms) = split_list xthms - in - map (pair 1) thms - |> Monomorph.monomorph schematic_consts_of ctxt - |> maps (uncurry (map o pair)) o map2 pair xs o map (map snd) - end - -end - -fun normalize ctxt wthms = - wthms - |> map_index I - |> gen_normalize ctxt - |> unfold_polymorph ctxt - |> monomorph ctxt - |> unfold_monomorph ctxt - |> apply_extra_norms ctxt - -val _ = Theory.setup (Context.theory_map ( - setup_atomize #> - setup_unfolded_quants #> - setup_trigger #> - setup_case_bool #> - setup_abs_min_max #> - setup_nat_as_int)) - -end; diff --git a/src/test-gen/src/main/new_smt_patch/smt_solver_patch.ML b/src/test-gen/src/main/new_smt_patch/smt_solver_patch.ML deleted file mode 100644 index e0e4811..0000000 --- a/src/test-gen/src/main/new_smt_patch/smt_solver_patch.ML +++ /dev/null @@ -1,346 +0,0 @@ -(* Title: HOL/Tools/SMT/smt_solver.ML - Author: Sascha Boehme, TU Muenchen - -SMT solvers registry and SMT tactic. -*) - -signature SMT_SOLVER = -sig - (*configuration*) - datatype outcome = Unsat | Sat | Unknown - - type parsed_proof = - {outcome: SMT_Failure.failure option, - fact_ids: (int * ((string * ATP_Problem_Generate.stature) * thm)) list option, - atp_proof: unit -> (term, string) ATP_Proof.atp_step list} - - type parsed_model = - {const_defs: (term * term) list} - - type solver_config = - {name: string, - class: Proof.context -> SMT_Util.class, - avail: unit -> bool, - command: unit -> string list, - options: Proof.context -> string list, - smt_options: (string * string) list, - default_max_relevant: int, - outcome: string -> string list -> outcome * string list, - parse_proof: (Proof.context -> SMT_Translate.replay_data -> - ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> - parsed_proof) option, - replay: (Proof.context -> SMT_Translate.replay_data -> string list -> thm) option, - parse_model: (SMT_Translate.replay_data -> string list -> parsed_model) option} - - (*registry*) - val add_solver: solver_config -> theory -> theory - val default_max_relevant: Proof.context -> string -> int - - (*filter*) - val smt_filter: Proof.context -> thm -> ((string * ATP_Problem_Generate.stature) * thm) list -> - int -> Time.time -> parsed_proof - - (*tactic*) - val smt_tac: Proof.context -> thm list -> int -> tactic - val smt_tac': Proof.context -> thm list -> int -> tactic - - exception SMT_Model of parsed_model - val smt_get_model_tac: Proof.context -> thm list -> int -> tactic -end; - -structure SMT_patch_Solver: SMT_SOLVER = -struct - -(* interface to external solvers *) - -local - -val shell_quote = enclose "'" "'"; -val shell_path = shell_quote o File.standard_path; - -fun make_command command options problem_path proof_path = - "(exec 2>&1;" :: map shell_quote (command () @ options) @ - [shell_path problem_path, ")", ">", shell_path proof_path] - |> space_implode " " - -fun with_trace ctxt msg f x = - let val _ = SMT_Config.trace_msg ctxt (fn () => msg) () - in f x end - -fun run ctxt name mk_cmd input = - (case SMT_Config.certificates_of ctxt of - NONE => - if not (SMT_Config.is_available ctxt name) then - error ("The SMT solver " ^ quote name ^ " is not installed") - else if Config.get ctxt SMT_Config.debug_files = "" then - with_trace ctxt ("Invoking SMT solver " ^ quote name ^ " ...") (Cache_IO.run mk_cmd) input - else - let - val base_path = Path.explode (Config.get ctxt SMT_Config.debug_files) - val in_path = Path.ext "smt_in" base_path - val out_path = Path.ext "smt_out" base_path - in Cache_IO.raw_run mk_cmd input in_path out_path end - | SOME certs => - (case Cache_IO.lookup certs input of - (NONE, key) => - if Config.get ctxt SMT_Config.read_only_certificates then - error ("Bad certificate cache: missing certificate") - else - Cache_IO.run_and_cache certs key mk_cmd input - | (SOME output, _) => - with_trace ctxt ("Using cached certificate from " ^ - shell_path (Cache_IO.cache_path_of certs) ^ " ...") I output)) - -(* Z3 returns 1 if "get-model" or "get-model" fails *) -val normal_return_codes = [0, 1] - -fun run_solver ctxt name mk_cmd input = - let - fun pretty tag lines = Pretty.string_of (Pretty.big_list tag (map Pretty.str lines)) - - val _ = SMT_Config.trace_msg ctxt (pretty "Problem:" o split_lines) input - - val {redirected_output = res, output = err, return_code} = - SMT_Config.with_timeout ctxt (run ctxt name mk_cmd) input - val _ = SMT_Config.trace_msg ctxt (pretty "Solver:") err - - val output = fst (take_suffix (equal "") res) - val _ = SMT_Config.trace_msg ctxt (pretty "Result:") output - - val _ = member (op =) normal_return_codes return_code orelse - raise SMT_Failure.SMT (SMT_Failure.Abnormal_Termination return_code) - in output end - -fun trace_assms ctxt = - SMT_Config.trace_msg ctxt (Pretty.string_of o - Pretty.big_list "Assertions:" o map (Thm.pretty_thm ctxt o snd)) - -fun trace_replay_data ({context = ctxt, typs, terms, ...} : SMT_Translate.replay_data) = - let - fun pretty_eq n p = Pretty.block [Pretty.str n, Pretty.str " = ", p] - fun p_typ (n, T) = pretty_eq n (Syntax.pretty_typ ctxt T) - fun p_term (n, t) = pretty_eq n (Syntax.pretty_term ctxt t) - in - SMT_Config.trace_msg ctxt (fn () => - Pretty.string_of (Pretty.big_list "Names:" [ - Pretty.big_list "sorts:" (map p_typ (Symtab.dest typs)), - Pretty.big_list "functions:" (map p_term (Symtab.dest terms))])) () - end - -in - -fun invoke name command smt_options ithms ctxt = - let - val options = SMT_Config.solver_options_of ctxt - val comments = [space_implode " " options] - - val (str, replay_data as {context = ctxt', ...}) = - ithms - |> tap (trace_assms ctxt) - |> SMT_Translate.translate ctxt smt_options comments - ||> tap trace_replay_data - in (run_solver ctxt' name (make_command command options) str, replay_data) end - -end - - -(* configuration *) - -datatype outcome = Unsat | Sat | Unknown - -type parsed_proof = - {outcome: SMT_Failure.failure option, - fact_ids: (int * ((string * ATP_Problem_Generate.stature) * thm)) list option, - atp_proof: unit -> (term, string) ATP_Proof.atp_step list} - -type parsed_model = - {const_defs: (term * term) list} - -type solver_config = - {name: string, - class: Proof.context -> SMT_Util.class, - avail: unit -> bool, - command: unit -> string list, - options: Proof.context -> string list, - smt_options: (string * string) list, - default_max_relevant: int, - outcome: string -> string list -> outcome * string list, - parse_proof: (Proof.context -> SMT_Translate.replay_data -> - ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> - parsed_proof) option, - replay: (Proof.context -> SMT_Translate.replay_data -> string list -> thm) option, - parse_model: (SMT_Translate.replay_data -> string list -> parsed_model) option} - - -(* check well-sortedness *) - -val has_topsort = Term.exists_type (Term.exists_subtype (fn - TFree (_, []) => true - | TVar (_, []) => true - | _ => false)) - -(* top sorts cause problems with atomization *) -fun check_topsort ctxt thm = - if has_topsort (Thm.prop_of thm) then (SMT_Normalize.drop_fact_warning ctxt thm; TrueI) else thm - - -(* registry *) - -type solver_info = { - command: unit -> string list, - smt_options: (string * string) list, - default_max_relevant: int, - parse_proof: Proof.context -> SMT_Translate.replay_data -> - ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> - parsed_proof, - replay: Proof.context -> SMT_Translate.replay_data -> string list -> thm, - parse_model: SMT_Translate.replay_data -> string list -> parsed_model} - -structure Solvers = Generic_Data -( - type T = solver_info Symtab.table - val empty = Symtab.empty - val extend = I - fun merge data = Symtab.merge (K true) data -) - -exception SMT_Model of parsed_model - -local - fun parse_proof outcome parse_proof0 outer_ctxt replay_data xfacts prems concl output = - (case outcome output of - (Unsat, lines) => - (case parse_proof0 of - SOME pp => pp outer_ctxt replay_data xfacts prems concl lines - | NONE => {outcome = NONE, fact_ids = NONE, atp_proof = K []}) - | (result, _) => raise SMT_Failure.SMT (SMT_Failure.Counterexample (result = Sat))) - - fun replay outcome replay0 oracle outer_ctxt - (replay_data as {context = ctxt, ...} : SMT_Translate.replay_data) output = - (case outcome output of - (Unsat, lines) => - if not (Config.get ctxt SMT_Config.oracle) andalso is_some replay0 - then the replay0 outer_ctxt replay_data lines - else oracle () - | (result, _) => raise SMT_Failure.SMT (SMT_Failure.Counterexample (result = Sat))) - - (* TODO: get back models *) - fun parse_model outcome parse_model0 - (replay_data as {context = ctxt, ...} : SMT_Translate.replay_data) output = - (case outcome output of - (Unsat, _) => {const_defs = []} - | (result, ls) => - if ((result = Sat) orelse (result = Unknown)) then - (case parse_model0 of SOME f => f replay_data ls | _ => {const_defs = []}) - else - {const_defs = []}) - - val cfalse = Thm.cterm_of @{context} @{prop False} -in - -fun add_solver ({name, class, avail, command, options, smt_options, default_max_relevant, outcome, - parse_proof = parse_proof0, replay = replay0, parse_model = parse_model0} : solver_config) = - let - fun solver oracle = { - command = command, - smt_options = smt_options, - default_max_relevant = default_max_relevant, - parse_proof = parse_proof (outcome name) parse_proof0, - replay = replay (outcome name) replay0 oracle, - parse_model = parse_model (outcome name) parse_model0} - - val info = {name = name, class = class, avail = avail, options = options} - in - Thm.add_oracle (Binding.name name, K cfalse) #-> (fn (_, oracle) => - Context.theory_map (Solvers.map (Symtab.update_new (name, solver oracle)))) #> - Context.theory_map (SMT_Config.add_solver info) - end - -end - -fun get_info ctxt name = the (Symtab.lookup (Solvers.get (Context.Proof ctxt)) name) - -fun name_and_info_of ctxt = - let val name = SMT_Config.solver_of ctxt - in (name, get_info ctxt name) end - -val default_max_relevant = #default_max_relevant oo get_info - -fun apply_solver_and_replay ctxt thms0 = - let - val thms = map (check_topsort ctxt) thms0 - val (name, {command, smt_options, replay, ...}) = name_and_info_of ctxt - val (output, replay_data) = - invoke name command smt_options (SMT_Normalize.normalize ctxt thms) ctxt - in replay ctxt replay_data output end - -fun apply_solver_and_get_model ctxt thms0 = - let - val thms = map (check_topsort ctxt) thms0 - val (name, {command, smt_options, parse_model, ...}) = name_and_info_of ctxt - val (output, replay_data) = - invoke name command smt_options (SMT_Normalize.normalize ctxt thms) ctxt - (* This is just a current artifact in order to use parse_model inside a tactic. We may clean this up next. *) - in raise SMT_Model (parse_model replay_data output) end - - -(* filter *) - -fun smt_filter ctxt0 goal xfacts i time_limit = - let - val ctxt = ctxt0 |> Config.put SMT_Config.timeout (Time.toReal time_limit) - - val ({context = ctxt, prems, concl, ...}, _) = Subgoal.focus ctxt i NONE goal - fun negate ct = Thm.dest_comb ct ||> Thm.apply @{cterm Not} |-> Thm.apply - val cprop = - (case try negate (Thm.rhs_of (SMT_Normalize.atomize_conv ctxt concl)) of - SOME ct => ct - | NONE => raise SMT_Failure.SMT (SMT_Failure.Other_Failure "cannot atomize goal")) - - val conjecture = Thm.assume cprop - val facts = map snd xfacts - val thms = conjecture :: prems @ facts - val thms' = map (check_topsort ctxt) thms - - val (name, {command, smt_options, parse_proof, ...}) = name_and_info_of ctxt - val (output, replay_data) = - invoke name command smt_options (SMT_Normalize.normalize ctxt thms') ctxt - in - parse_proof ctxt replay_data xfacts (map Thm.prop_of prems) (Thm.term_of concl) output - end - handle SMT_Failure.SMT fail => {outcome = SOME fail, fact_ids = NONE, atp_proof = K []} - - -(* SMT tactic *) - -local - fun str_of ctxt fail = - "Solver " ^ SMT_Config.solver_of ctxt ^ ": " ^ SMT_Failure.string_of_failure fail - - fun safe_solve ctxt facts = SOME (apply_solver_and_replay ctxt facts) - handle - SMT_Failure.SMT (fail as SMT_Failure.Counterexample _) => - (SMT_Config.verbose_msg ctxt (str_of ctxt) fail; NONE) - | SMT_Failure.SMT (fail as SMT_Failure.Time_Out) => - error ("SMT: Solver " ^ quote (SMT_Config.solver_of ctxt) ^ ": " ^ - SMT_Failure.string_of_failure fail ^ " (setting the " ^ - "configuration option " ^ quote (Config.name_of SMT_Config.timeout) ^ " might help)") - | SMT_Failure.SMT fail => error (str_of ctxt fail) - - fun resolve ctxt (SOME thm) = resolve_tac ctxt [thm] 1 - | resolve _ NONE = no_tac - - fun tac prove ctxt rules = - CONVERSION (SMT_Normalize.atomize_conv ctxt) - THEN' resolve_tac ctxt @{thms ccontr} - THEN' SUBPROOF (fn {context = ctxt', prems, ...} => - resolve ctxt' (prove ctxt' (rules @ prems))) ctxt -in - -val smt_tac = tac safe_solve -val smt_tac' = tac (SOME oo apply_solver_and_replay) -val smt_get_model_tac = tac (SOME oo apply_solver_and_get_model) - -end - -end; diff --git a/src/test-gen/src/main/new_smt_patch/smt_systems_patch.ML b/src/test-gen/src/main/new_smt_patch/smt_systems_patch.ML deleted file mode 100644 index 22f2ca4..0000000 --- a/src/test-gen/src/main/new_smt_patch/smt_systems_patch.ML +++ /dev/null @@ -1,159 +0,0 @@ -(* Title: HOL/Tools/SMT/smt_systems.ML - Author: Sascha Boehme, TU Muenchen - -Setup SMT solvers. -*) - -signature SMT_SYSTEMS = -sig - val cvc4_extensions: bool Config.T - val z3_extensions: bool Config.T -end; - -structure SMT_Systems: SMT_SYSTEMS = -struct - -(* helper functions *) - -fun make_avail name () = getenv (name ^ "_SOLVER") <> "" - -fun make_command name () = [getenv (name ^ "_SOLVER")] - -fun outcome_of unsat sat unknown solver_name line = - if String.isPrefix unsat line then SMT_patch_Solver.Unsat - else if String.isPrefix sat line then SMT_patch_Solver.Sat - else if String.isPrefix unknown line then SMT_patch_Solver.Unknown - else raise SMT_Failure.SMT (SMT_Failure.Other_Failure ("Solver " ^ quote solver_name ^ - " failed -- enable tracing using the " ^ quote (Config.name_of SMT_Config.trace) ^ - " option for details")) - -fun is_blank_or_error_line "" = true - | is_blank_or_error_line s = String.isPrefix "(error " s - -fun on_first_line test_outcome solver_name lines = - let - val split_first = (fn [] => ("", []) | l :: ls => (l, ls)) - val (l, ls) = split_first (snd (take_prefix is_blank_or_error_line lines)) - in (test_outcome solver_name l, ls) end - -fun on_first_non_unsupported_line test_outcome solver_name lines = - on_first_line test_outcome solver_name (filter (curry (op <>) "unsupported") lines) - -(* CVC3 *) - -local - fun cvc3_options ctxt = [ - "-seed", string_of_int (Config.get ctxt SMT_Config.random_seed), - "-lang", "smt2", - "-timeout", string_of_int (Real.ceil (Config.get ctxt SMT_Config.timeout))] -in - -val cvc3: SMT_patch_Solver.solver_config = { - name = "cvc3", - class = K SMTLIB_Interface.smtlibC, - avail = make_avail "CVC3", - command = make_command "CVC3", - options = cvc3_options, - smt_options = [], - default_max_relevant = 400 (* FUDGE *), - outcome = on_first_line (outcome_of "unsat" "sat" "unknown"), - parse_proof = NONE, - replay = NONE, - parse_model = NONE} - -end - -(* CVC4 *) - -val cvc4_extensions = Attrib.setup_config_bool @{binding cvc4_extensions} (K false) - -local - fun cvc4_options ctxt = [ - "--random-seed=" ^ string_of_int (Config.get ctxt SMT_Config.random_seed), - "--lang=smt2", - "--continued-execution", - "--tlimit", string_of_int (Real.ceil (1000.0 * Config.get ctxt SMT_Config.timeout))] - - fun select_class ctxt = - (* We currently do not support CVC4 *) - (* if Config.get ctxt cvc4_extensions then CVC4_Interface.smtlib_cvc4C - else *) SMTLIB_Interface.smtlibC -in - -val cvc4: SMT_patch_Solver.solver_config = { - name = "cvc4", - class = select_class, - avail = make_avail "CVC4", - command = make_command "CVC4", - options = cvc4_options, - smt_options = [(":produce-unsat-cores", "true")], - default_max_relevant = 400 (* FUDGE *), - outcome = on_first_line (outcome_of "unsat" "sat" "unknown"), - parse_proof = NONE, (* SOME (K CVC4_Proof_Parse.parse_proof), *) (* We currently do not support CVC4 *) - replay = NONE, - parse_model = NONE} - -end - -(* veriT *) - -val veriT: SMT_patch_Solver.solver_config = { - name = "verit", - class = K SMTLIB_Interface.smtlibC, - avail = make_avail "VERIT", - command = make_command "VERIT", - options = (fn ctxt => [ - "--proof-version=1", - "--proof-prune", - "--proof-merge", - "--disable-print-success", - "--disable-banner", - "--max-time=" ^ string_of_int (Real.ceil (Config.get ctxt SMT_Config.timeout))]), - smt_options = [(":produce-proofs", "true")], - default_max_relevant = 200 (* FUDGE *), - outcome = on_first_non_unsupported_line (outcome_of "unsat" "sat" - "(error \"status is not unsat.\")"), - parse_proof = NONE, (* SOME (K VeriT_Proof_Parse.parse_proof), *) (* Do not support veriT *) - replay = NONE, - parse_model = NONE} - -(* Z3 *) - -val z3_extensions = Attrib.setup_config_bool @{binding z3_extensions} (K false) - -local - fun z3_options ctxt = - ["smt.random_seed=" ^ string_of_int (Config.get ctxt SMT_Config.random_seed), - "smt.refine_inj_axioms=false", - "-T:" ^ string_of_int (Real.ceil (Config.get ctxt SMT_Config.timeout)), - "-smt2"] - - fun select_class ctxt = - if Config.get ctxt z3_extensions then Z3_Interface.smtlib_z3C else SMTLIB_Interface.smtlibC -in - -val z3: SMT_patch_Solver.solver_config = { - name = "z3", - class = select_class, - avail = make_avail "Z3", - command = make_command "Z3", - options = z3_options, - smt_options = [(":model", "true")], (* Produce model (instead of proof) *) - default_max_relevant = 350 (* FUDGE *), - outcome = on_first_line (outcome_of "unsat" "sat" "unknown"), - parse_proof = SOME Z3_Replay.parse_proof, - replay = SOME Z3_Replay.replay, - parse_model = SOME Z3_Model.parse_model } - -end - - -(* overall setup *) - -val _ = Theory.setup ( - SMT_patch_Solver.add_solver cvc3 #> - SMT_patch_Solver.add_solver cvc4 #> - SMT_patch_Solver.add_solver veriT #> - SMT_patch_Solver.add_solver z3) - -end; diff --git a/src/test-gen/src/main/new_smt_patch/smtlib_interface_patch.ML b/src/test-gen/src/main/new_smt_patch/smtlib_interface_patch.ML deleted file mode 100644 index 9507929..0000000 --- a/src/test-gen/src/main/new_smt_patch/smtlib_interface_patch.ML +++ /dev/null @@ -1,172 +0,0 @@ -(* Title: HOL/Tools/SMT/smtlib_interface.ML - Author: Sascha Boehme, TU Muenchen - Author: Jasmin Blanchette, TU Muenchen - -Interface to SMT solvers based on the SMT-LIB 2 format. -*) - -signature SMTLIB_INTERFACE = -sig - val smtlibC: SMT_Util.class - val add_logic: int * (term list -> string option) -> Context.generic -> Context.generic - val translate_config: Proof.context -> SMT_Translate.config - val assert_name_of_index: int -> string - val assert_index_of_name: string -> int - val assert_prefix : string -end; - -structure SMTLIB_Interface: SMTLIB_INTERFACE = -struct - -val smtlibC = ["smtlib"] - - -(* builtins *) - -local - fun int_num _ i = SOME (string_of_int i) - - fun is_linear [t] = SMT_Util.is_number t - | is_linear [t, u] = SMT_Util.is_number t orelse SMT_Util.is_number u - | is_linear _ = false - - fun times _ _ ts = - let val mk = Term.list_comb o pair @{const times (int)} - in if is_linear ts then SOME ("*", 2, ts, mk) else NONE end -in - -val setup_builtins = - fold (SMT_Builtin.add_builtin_typ smtlibC) [ - (@{typ bool}, K (SOME "Bool"), K (K NONE)), - (@{typ int}, K (SOME "Int"), int_num)] #> - fold (SMT_Builtin.add_builtin_fun' smtlibC) [ - (@{const True}, "true"), - (@{const False}, "false"), - (@{const Not}, "not"), - (@{const HOL.conj}, "and"), - (@{const HOL.disj}, "or"), - (@{const HOL.implies}, "=>"), - (@{const HOL.eq ('a)}, "="), - (@{const If ('a)}, "ite"), - (@{const less (int)}, "<"), - (@{const less_eq (int)}, "<="), - (@{const uminus (int)}, "-"), - (@{const plus (int)}, "+"), - (@{const minus (int)}, "-")] #> - SMT_Builtin.add_builtin_fun smtlibC - (Term.dest_Const @{const times (int)}, times) - -end - - -(* serialization *) - -(** logic **) - -fun fst_int_ord ((i1, _), (i2, _)) = int_ord (i1, i2) - -structure Logics = Generic_Data -( - type T = (int * (term list -> string option)) list - val empty = [] - val extend = I - fun merge data = Ord_List.merge fst_int_ord data -) - -fun add_logic pf = Logics.map (Ord_List.insert fst_int_ord pf) - -fun choose_logic ctxt ts = - let - fun choose [] = "AUFLIA" - | choose ((_, f) :: fs) = (case f ts of SOME s => s | NONE => choose fs) - in - (case choose (Logics.get (Context.Proof ctxt)) of - "" => "" (* for default Z3 logic, a subset of everything *) - | logic => "(set-logic " ^ logic ^ ")\n") - end - - -(** serialization **) - -fun var i = "?v" ^ string_of_int i - -fun tree_of_sterm l (SMT_Translate.SVar i) = SMTLIB.Sym (var (l - i - 1)) - | tree_of_sterm _ (SMT_Translate.SApp (n, [])) = SMTLIB.Sym n - | tree_of_sterm l (SMT_Translate.SApp (n, ts)) = - SMTLIB.S (SMTLIB.Sym n :: map (tree_of_sterm l) ts) - | tree_of_sterm _ (SMT_Translate.SLet _) = - raise Fail "SMT-LIB: unsupported let expression" - | tree_of_sterm l (SMT_Translate.SQua (q, ss, pats, t)) = - let - val l' = l + length ss - - fun quant_name SMT_Translate.SForall = "forall" - | quant_name SMT_Translate.SExists = "exists" - - fun gen_trees_of_pat keyword ps = - [SMTLIB.Key keyword, SMTLIB.S (map (tree_of_sterm l') ps)] - fun trees_of_pat (SMT_Translate.SPat ps) = gen_trees_of_pat "pattern" ps - | trees_of_pat (SMT_Translate.SNoPat ps) = gen_trees_of_pat "no-pattern" ps - fun tree_of_pats [] t = t - | tree_of_pats pats t = SMTLIB.S (SMTLIB.Sym "!" :: t :: maps trees_of_pat pats) - - val vs = map_index (fn (i, ty) => - SMTLIB.S [SMTLIB.Sym (var (l + i)), SMTLIB.Sym ty]) ss - - val body = t - |> tree_of_sterm l' - |> tree_of_pats pats - in - SMTLIB.S [SMTLIB.Sym (quant_name q), SMTLIB.S vs, body] - end - - -fun sctrarg (sel, typ) = "(" ^ sel ^ " " ^ typ ^ ")" -fun sctr (name, args) = enclose "(" ")" (space_implode " " (name :: map sctrarg args)) -fun sdatatype (name, ctrs) = enclose "(" ")" (space_implode " " (name :: map sctr ctrs)) - -fun string_of_fun (f, (ss, s)) = f ^ " (" ^ space_implode " " ss ^ ") " ^ s - -fun named_sterm s t = SMTLIB.S [SMTLIB.Sym "!", t, SMTLIB.Key "named", SMTLIB.Sym s] - -val assert_prefix = "a" - -fun assert_name_of_index i = assert_prefix ^ string_of_int i -fun assert_index_of_name s = the_default ~1 (Int.fromString (unprefix assert_prefix s)) - -fun sdtyp (fp, dtyps) = - Buffer.add (enclose ("(declare-" ^ BNF_FP_Util.co_prefix fp ^ "datatypes () (") "))\n" - (space_implode "\n " (map sdatatype dtyps))) - -fun serialize smt_options comments {logic, sorts, dtyps, funcs} ts = - let - val unsat_core = member (op =) smt_options (":produce-unsat-cores", "true") - val model = member (op =) smt_options (":model", "true") - in - Buffer.empty - |> fold (Buffer.add o enclose "; " "\n") comments - |> fold (fn (k, v) => Buffer.add ("(set-option " ^ k ^ " " ^ v ^ ")\n")) smt_options - |> Buffer.add logic - |> fold (Buffer.add o enclose "(declare-sort " " 0)\n") (sort fast_string_ord sorts) - |> fold sdtyp (AList.coalesce (op =) dtyps) - |> fold (Buffer.add o enclose "(declare-fun " ")\n" o string_of_fun) - (sort (fast_string_ord o apply2 fst) funcs) - |> fold (fn (i, t) => Buffer.add (enclose "(assert " ")\n" - (SMTLIB.str_of (named_sterm (assert_name_of_index i) (tree_of_sterm 0 t))))) (map_index I ts) - |> Buffer.add "(push)\n(check-sat)\n" (* Workaround to get models, see *) - |> Buffer.add (if unsat_core then "(get-unsat-core)\n" else if model then "(get-model)\n" else "(get-proof)\n") - |> Buffer.content - end - -(* interface *) - -fun translate_config ctxt = { - logic = choose_logic ctxt, - fp_kinds = [], - serialize = serialize} - -val _ = Theory.setup (Context.theory_map - (setup_builtins #> - SMT_Translate.add_config (smtlibC, translate_config))) - -end; diff --git a/src/test-gen/src/main/new_smt_patch/smtlib_patch.ML b/src/test-gen/src/main/new_smt_patch/smtlib_patch.ML deleted file mode 100644 index edc0c05..0000000 --- a/src/test-gen/src/main/new_smt_patch/smtlib_patch.ML +++ /dev/null @@ -1,202 +0,0 @@ -(* Title: HOL/Tools/SMT/smtlib.ML - Author: Sascha Boehme, TU Muenchen - -Parsing and generating SMT-LIB 2. -*) - -signature SMTLIB = -sig - exception PARSE of int * string - datatype tree = - Num of int | - Dec of int * int | - Str of string | - Sym of string | - Key of string | - S of tree list - val parse: string list -> tree - val pretty_tree: tree -> Pretty.T - val str_of: tree -> string -end; - -structure SMTLIB: SMTLIB = -struct - -(* data structures *) - -exception PARSE of int * string - -datatype tree = - Num of int | - Dec of int * int | - Str of string | - Sym of string | - Key of string | - S of tree list - -datatype unfinished = None | String of string | Symbol of string - - -(* utilities *) - -fun read_raw pred l cs = - (case take_prefix pred cs of - ([], []) => raise PARSE (l, "empty token") - | ([], c :: _) => raise PARSE (l, "unexpected character " ^ quote c) - | x => x) - - -(* numerals and decimals *) - -fun int_of cs = fst (read_int cs) - -fun read_num l cs = - (case read_raw Symbol.is_ascii_digit l cs of - (cs1, "." :: cs') => - let val (cs2, cs'') = read_raw Symbol.is_ascii_digit l cs' - in (Dec (int_of cs1, int_of cs2), cs'') end - | (cs1, cs2) => (Num (int_of cs1), cs2)) - - -(* binary numbers *) - -fun is_bin c = (c = "0" orelse c = "1") - -fun read_bin l cs = read_raw is_bin l cs |>> Num o fst o read_radix_int 2 - - -(* hex numbers *) - -val is_hex = member (op =) (raw_explode "0123456789abcdefABCDEF") - -fun within c1 c2 c = (ord c1 <= ord c andalso ord c <= ord c2) - -fun unhex i [] = i - | unhex i (c :: cs) = - if within "0" "9" c then unhex (i * 16 + (ord c - ord "0")) cs - else if within "a" "f" c then unhex (i * 16 + (ord c - ord "a" + 10)) cs - else if within "A" "F" c then unhex (i * 16 + (ord c - ord "A" + 10)) cs - else raise Fail ("bad hex character " ^ quote c) - -fun read_hex l cs = read_raw is_hex l cs |>> Num o unhex 0 - - -(* symbols *) - -val symbol_chars = raw_explode "~!@$%^&*_+=<>.?/-" - -fun is_sym c = - Symbol.is_ascii_letter c orelse - Symbol.is_ascii_digit c orelse - member (op =) symbol_chars c - -fun read_sym f l cs = read_raw is_sym l cs |>> f o implode - - -(* quoted tokens *) - -fun read_quoted stop (escape, replacement) cs = - let - fun read _ [] = NONE - | read rs (cs as (c :: cs')) = - if is_prefix (op =) stop cs then - SOME (implode (rev rs), drop (length stop) cs) - else if not (null escape) andalso is_prefix (op =) escape cs then - read (replacement :: rs) (drop (length escape) cs) - else read (c :: rs) cs' - in read [] cs end - -fun read_string cs = read_quoted ["\\", "\""] (["\\", "\\"], "\\") cs -fun read_symbol cs = read_quoted ["|"] ([], "") cs - - -(* core parser *) - -fun read _ [] rest tss = (rest, tss) - | read l ("(" :: cs) None tss = read l cs None ([] :: tss) - | read l (")" :: cs) None (ts1 :: ts2 :: tss) = - read l cs None ((S (rev ts1) :: ts2) :: tss) - | read l ("#" :: "x" :: cs) None (ts :: tss) = - token read_hex l cs ts tss - | read l ("#" :: "b" :: cs) None (ts :: tss) = - token read_bin l cs ts tss - | read l (":" :: cs) None (ts :: tss) = - token (read_sym Key) l cs ts tss - | read l ("\"" :: cs) None (ts :: tss) = - quoted read_string String Str l "" cs ts tss - | read l ("|" :: cs) None (ts :: tss) = - quoted read_symbol Symbol Sym l "" cs ts tss - | read l ((c as "!") :: cs) None (ts :: tss) = - token (fn _ => pair (Sym c)) l cs ts tss - | read l (c :: cs) None (ts :: tss) = - if Symbol.is_ascii_blank c then read l cs None (ts :: tss) - else if Symbol.is_digit c then token read_num l (c :: cs) ts tss - else token (read_sym Sym) l (c :: cs) ts tss - | read l cs (String s) (ts :: tss) = - quoted read_string String Str l s cs ts tss - | read l cs (Symbol s) (ts :: tss) = - quoted read_symbol Symbol Sym l s cs ts tss - | read l _ _ [] = raise PARSE (l, "bad parser state") - -and token f l cs ts tss = - let val (t, cs') = f l cs - in read l cs' None ((t :: ts) :: tss) end - -and quoted r f g l s cs ts tss = - (case r cs of - NONE => (f (s ^ implode cs), ts :: tss) - | SOME (s', cs') => read l cs' None ((g (s ^ s') :: ts) :: tss)) - - - -(* overall parser *) - -fun read_line l line = read l (raw_explode line) - -fun first_line line = - let - fun first i = - if size line <= i then "\n" else - let val c = nth_string line i in - if c = " " then first (i+1) else c - end - in - first 0 - end - -fun add_line line (l, (None, tss)) = - if size line = 0 orelse first_line line = ";" then (l + 1, (None, tss)) - else (l + 1, read_line l line None tss) - | add_line line (l, (unfinished, tss)) = - (l + 1, read_line l line unfinished tss) - -fun finish (_, (None, [[t]])) = t - | finish (l, _) = raise PARSE (l, "bad nesting") - -fun parse lines = finish (fold add_line lines (1, (None, [[]]))) - - -(* pretty printer *) - -fun pretty_tree (Num i) = Pretty.str (string_of_int i) - | pretty_tree (Dec (i, j)) = - Pretty.str (string_of_int i ^ "." ^ string_of_int j) - | pretty_tree (Str s) = - raw_explode s - |> maps (fn "\"" => ["\\", "\""] | "\\" => ["\\", "\\"] | c => [c]) - |> implode - |> enclose "\"" "\"" - |> Pretty.str - | pretty_tree (Sym s) = - if String.isPrefix "(" s (* for bit vector functions *) orelse - forall is_sym (raw_explode s) then - Pretty.str s - else - Pretty.str ("|" ^ s ^ "|") - | pretty_tree (Key s) = Pretty.str (":" ^ s) - | pretty_tree (S trees) = - Pretty.enclose "(" ")" (Pretty.separate "" (map pretty_tree trees)) - -val str_of = Pretty.unformatted_string_of o pretty_tree - -end; diff --git a/src/test-gen/src/main/new_smt_patch/z3_model.ML b/src/test-gen/src/main/new_smt_patch/z3_model.ML deleted file mode 100644 index 5c2b0f8..0000000 --- a/src/test-gen/src/main/new_smt_patch/z3_model.ML +++ /dev/null @@ -1,113 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * z3_model.ML --- a parser for models generated by Z3. - * This file is part of HOL-TestGen. - * - * Copyright (c) 2005-2010 ETH Zurich, Switzerland - * 2008-2013 Achim D. Brucker, Germany - * 2009-2013 Universite Paris-Sud, France - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id:$ *) - -signature Z3_MODEL = -sig - val parse_model : SMT_Translate.replay_data -> string list -> SMT_patch_Solver.parsed_model -end - - - -structure Z3_Model : Z3_MODEL = -struct - -open SMTLIB_Proof - - -(* Extracting definitions of a variable or a function symbol *) - -(* Non-functional variables: the codomain is empty *) -fun get_def ctxt smt_ctxt (SMTLIB.S [SMTLIB.Sym "define-fun", n, SMTLIB.S [], _, v]) = - let - (* Parsing the variable *) - val (x, smt_ctxt2) = term_of n smt_ctxt - (* For debugging *) - (* val _ = tracing ("Variable: "^(Pretty.string_of (Syntax.pretty_term ctxt x))) *) - (* Parsing its value *) - val (t, smt_ctxt3) = term_of v smt_ctxt2 - (* For debugging *) - (* val _ = tracing ("Value: "^(Pretty.string_of (Syntax.pretty_term ctxt t))) *) - in - SOME (x, t, smt_ctxt3) - end - - (* UNDER PROGRESS *) - (* Functional variables: the codomain is nonempty *) - | get_def ctxt smt_ctxt (SMTLIB.S [SMTLIB.Sym "define-fun", n, SMTLIB.S (absname::args), _, v]) = NONE (* UNDER PROGRESS *) - - (* Other definitions: dismissed for the moment *) - | get_def _ _ v = NONE - - -(* Extracting all the definitions *) - -fun get_defs ctxt smt_ctxt smtlib_defs = - snd ( - List.foldl (fn (def, (smt_ctxt', acc)) => - case get_def ctxt smt_ctxt' def of - NONE => (smt_ctxt', acc) - | SOME (a, b, smt_ctxt'') => (smt_ctxt'', (a,b)::acc)) - (smt_ctxt, []) smtlib_defs - ) - - -(* Top-level function to interpret Z3 models *) - -fun parse_model_main ctxt typs funs lines = - let - (* val _ = tracing "Lines:\n" - val _ = List.app (fn l => tracing (l^"")) lines *) - val res = - case SMTLIB.parse lines of - SMTLIB.S ((SMTLIB.Sym "model") :: vs) => get_defs ctxt (empty_context ctxt typs funs) vs - | ts => raise SMTLIB_PARSE ("bad Z3 model declaration", ts) - in - {const_defs = res} - end - -fun parse_model - ({context = ctxt, typs, terms, ...} : SMT_Translate.replay_data) - output = - parse_model_main ctxt typs terms output - -end diff --git a/src/test-gen/src/main/new_smt_patch/z3_replay_patch.ML b/src/test-gen/src/main/new_smt_patch/z3_replay_patch.ML deleted file mode 100644 index f8d49d8..0000000 --- a/src/test-gen/src/main/new_smt_patch/z3_replay_patch.ML +++ /dev/null @@ -1,262 +0,0 @@ -(* Title: HOL/Tools/SMT/z3_replay.ML - Author: Sascha Boehme, TU Muenchen - Author: Jasmin Blanchette, TU Muenchen - -Z3 proof parsing and replay. -*) - -signature Z3_REPLAY = -sig - val parse_proof: Proof.context -> SMT_Translate.replay_data -> - ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> - SMT_patch_Solver.parsed_proof - val replay: Proof.context -> SMT_Translate.replay_data -> string list -> thm -end; - -structure Z3_Replay: Z3_REPLAY = -struct - -fun params_of t = Term.strip_qnt_vars @{const_name Pure.all} t - -fun varify ctxt thm = - let - val maxidx = Thm.maxidx_of thm + 1 - val vs = params_of (Thm.prop_of thm) - val vars = map_index (fn (i, (n, T)) => Var ((n, i + maxidx), T)) vs - in Drule.forall_elim_list (map (Thm.cterm_of ctxt) vars) thm end - -fun add_paramTs names t = - fold2 (fn n => fn (_, T) => AList.update (op =) (n, T)) names (params_of t) - -fun new_fixes ctxt nTs = - let - val (ns, ctxt') = Variable.variant_fixes (replicate (length nTs) "") ctxt - fun mk (n, T) n' = (n, Thm.cterm_of ctxt' (Free (n', T))) - in (ctxt', Symtab.make (map2 mk nTs ns)) end - -fun forall_elim_term ct (Const (@{const_name Pure.all}, _) $ (a as Abs _)) = - Term.betapply (a, Thm.term_of ct) - | forall_elim_term _ qt = raise TERM ("forall_elim'", [qt]) - -fun apply_fixes elim env = fold (elim o the o Symtab.lookup env) - -val apply_fixes_prem = uncurry o apply_fixes Thm.forall_elim -val apply_fixes_concl = apply_fixes forall_elim_term - -fun export_fixes env names = Drule.forall_intr_list (map (the o Symtab.lookup env) names) - -fun under_fixes f ctxt (prems, nthms) names concl = - let - val thms1 = map (varify ctxt) prems - val (ctxt', env) = - add_paramTs names concl [] - |> fold (uncurry add_paramTs o apsnd Thm.prop_of) nthms - |> new_fixes ctxt - val thms2 = map (apply_fixes_prem env) nthms - val t = apply_fixes_concl env names concl - in export_fixes env names (f ctxt' (thms1 @ thms2) t) end - -fun replay_thm ctxt assumed nthms (Z3_Proof.Z3_Step {id, rule, concl, fixes, is_fix_step, ...}) = - if Z3_Proof.is_assumption rule then - (case Inttab.lookup assumed id of - SOME (_, thm) => thm - | NONE => Thm.assume (Thm.cterm_of ctxt concl)) - else - under_fixes (Z3_Replay_Methods.method_for rule) ctxt - (if is_fix_step then (map snd nthms, []) else ([], nthms)) fixes concl - -fun replay_step ctxt assumed (step as Z3_Proof.Z3_Step {id, rule, prems, fixes, ...}) state = - let - val (proofs, stats) = state - val nthms = map (the o Inttab.lookup proofs) prems - val replay = Timing.timing (replay_thm ctxt assumed nthms) - val ({elapsed, ...}, thm) = - SMT_Config.with_time_limit ctxt SMT_Config.reconstruction_step_timeout replay step - handle Timeout.TIMEOUT t => raise SMT_Failure.SMT SMT_Failure.Time_Out - val stats' = Symtab.cons_list (Z3_Proof.string_of_rule rule, Time.toMilliseconds elapsed) stats - in (Inttab.update (id, (fixes, thm)) proofs, stats') end - -local - val remove_trigger = mk_meta_eq @{thm trigger_def} - val remove_fun_app = mk_meta_eq @{thm fun_app_def} - - fun rewrite_conv _ [] = Conv.all_conv - | rewrite_conv ctxt eqs = Simplifier.full_rewrite (empty_simpset ctxt addsimps eqs) - - val rewrite_true_rule = @{lemma "True == ~ False" by simp} - val prep_rules = [@{thm Let_def}, remove_trigger, remove_fun_app, rewrite_true_rule] - - fun rewrite _ [] = I - | rewrite ctxt eqs = Conv.fconv_rule (rewrite_conv ctxt eqs) - - fun lookup_assm assms_net ct = - Z3_Replay_Util.net_instances assms_net ct - |> map (fn ithm as (_, thm) => (ithm, Thm.cprop_of thm aconvc ct)) -in - -fun add_asserted outer_ctxt rewrite_rules assms steps ctxt = - let - val eqs = map (rewrite ctxt [rewrite_true_rule]) rewrite_rules - val eqs' = union Thm.eq_thm eqs prep_rules - - val assms_net = - assms - |> map (apsnd (rewrite ctxt eqs')) - |> map (apsnd (Conv.fconv_rule Thm.eta_conversion)) - |> Z3_Replay_Util.thm_net_of snd - - fun revert_conv ctxt = rewrite_conv ctxt eqs' then_conv Thm.eta_conversion - - fun assume thm ctxt = - let - val ct = Thm.cprem_of thm 1 - val (thm', ctxt') = yield_singleton Assumption.add_assumes ct ctxt - in (thm' RS thm, ctxt') end - - fun add1 id fixes thm1 ((i, th), exact) ((iidths, thms), (ctxt, ptab)) = - let - val (thm, ctxt') = if exact then (Thm.implies_elim thm1 th, ctxt) else assume thm1 ctxt - val thms' = if exact then thms else th :: thms - in (((i, (id, th)) :: iidths, thms'), (ctxt', Inttab.update (id, (fixes, thm)) ptab)) end - - fun add (Z3_Proof.Z3_Step {id, rule, concl, fixes, ...}) - (cx as ((iidths, thms), (ctxt, ptab))) = - if Z3_Proof.is_assumption rule andalso rule <> Z3_Proof.Hypothesis then - let - val ct = Thm.cterm_of ctxt concl - val thm1 = Thm.trivial ct |> Conv.fconv_rule (Conv.arg1_conv (revert_conv outer_ctxt)) - val thm2 = singleton (Variable.export ctxt outer_ctxt) thm1 - in - (case lookup_assm assms_net (Thm.cprem_of thm2 1) of - [] => - let val (thm, ctxt') = assume thm1 ctxt - in ((iidths, thms), (ctxt', Inttab.update (id, (fixes, thm)) ptab)) end - | ithms => fold (add1 id fixes thm1) ithms cx) - end - else - cx - in fold add steps (([], []), (ctxt, Inttab.empty)) end - -end - -(* |- (EX x. P x) = P c |- ~ (ALL x. P x) = ~ P c *) -local - val sk_rules = @{lemma - "c = (SOME x. P x) ==> (EX x. P x) = P c" - "c = (SOME x. ~ P x) ==> (~ (ALL x. P x)) = (~ P c)" - by (metis someI_ex)+} -in - -fun discharge_sk_tac ctxt i st = - (resolve_tac ctxt @{thms trans} i - THEN resolve_tac ctxt sk_rules i - THEN (resolve_tac ctxt @{thms refl} ORELSE' discharge_sk_tac ctxt) (i+1) - THEN resolve_tac ctxt @{thms refl} i) st - -end - -val true_thm = @{lemma "~False" by simp} -fun make_discharge_rules rules = rules @ [@{thm allI}, @{thm refl}, @{thm reflexive}, true_thm] - -val intro_def_rules = @{lemma - "(~ P | P) & (P | ~ P)" - "(P | ~ P) & (~ P | P)" - by fast+} - -fun discharge_assms_tac ctxt rules = - REPEAT - (HEADGOAL (resolve_tac ctxt (intro_def_rules @ rules) ORELSE' - SOLVED' (discharge_sk_tac ctxt))) - -fun discharge_assms ctxt rules thm = - (if Thm.nprems_of thm = 0 then - thm - else - (case Seq.pull (discharge_assms_tac ctxt rules thm) of - SOME (thm', _) => thm' - | NONE => raise THM ("failed to discharge premise", 1, [thm]))) - |> Goal.norm_result ctxt - -fun discharge rules outer_ctxt inner_ctxt = - singleton (Proof_Context.export inner_ctxt outer_ctxt) - #> discharge_assms outer_ctxt (make_discharge_rules rules) - -fun parse_proof outer_ctxt - ({context = ctxt, typs, terms, ll_defs, rewrite_rules, assms} : SMT_Translate.replay_data) - xfacts prems concl output = - let - val (steps, ctxt2) = Z3_Proof.parse typs terms output ctxt - val ((iidths, _), _) = add_asserted outer_ctxt rewrite_rules assms steps ctxt2 - - fun id_of_index i = the_default ~1 (Option.map fst (AList.lookup (op =) iidths i)) - - val conjecture_i = 0 - val prems_i = 1 - val facts_i = prems_i + length prems - - val conjecture_id = id_of_index conjecture_i - val prem_ids = map id_of_index (prems_i upto facts_i - 1) - val fact_ids' = - map_filter (fn (i, (id, _)) => try (apsnd (nth xfacts)) (id, i - facts_i)) iidths - val helper_ids' = map_filter (try (fn (~1, idth) => idth)) iidths - - val fact_helper_ts = - map (fn (_, th) => (ATP_Util.short_thm_name ctxt th, Thm.prop_of th)) helper_ids' @ - map (fn (_, ((s, _), th)) => (s, Thm.prop_of th)) fact_ids' - val fact_helper_ids' = - map (apsnd (ATP_Util.short_thm_name ctxt)) helper_ids' @ map (apsnd (fst o fst)) fact_ids' - in - {outcome = NONE, fact_ids = SOME fact_ids', - atp_proof = fn () => Z3_Isar.atp_proof_of_z3_proof ctxt ll_defs rewrite_rules prems concl - fact_helper_ts prem_ids conjecture_id fact_helper_ids' steps} - end - -fun intermediate_statistics ctxt start total = - SMT_Config.statistics_msg ctxt (fn current => - "Reconstructed " ^ string_of_int current ^ " of " ^ string_of_int total ^ " steps in " ^ - string_of_int (Time.toMilliseconds (#elapsed (Timing.result start))) ^ " ms") - -fun pretty_statistics total stats = - let - fun mean_of is = - let - val len = length is - val mid = len div 2 - in if len mod 2 = 0 then (nth is (mid - 1) + nth is mid) div 2 else nth is mid end - fun pretty_item name p = Pretty.item (Pretty.separate ":" [Pretty.str name, p]) - fun pretty (name, milliseconds) = pretty_item name (Pretty.block (Pretty.separate "," [ - Pretty.str (string_of_int (length milliseconds) ^ " occurrences") , - Pretty.str (string_of_int (mean_of milliseconds) ^ " ms mean time"), - Pretty.str (string_of_int (fold Integer.max milliseconds 0) ^ " ms maximum time"), - Pretty.str (string_of_int (fold Integer.add milliseconds 0) ^ " ms total time")])) - in - Pretty.big_list "Z3 proof reconstruction statistics:" ( - pretty_item "total time" (Pretty.str (string_of_int total ^ " ms")) :: - map pretty (Symtab.dest stats)) - end - -fun replay outer_ctxt - ({context = ctxt, typs, terms, rewrite_rules, assms, ...} : SMT_Translate.replay_data) output = - let - val (steps, ctxt2) = Z3_Proof.parse typs terms output ctxt - val ((_, rules), (ctxt3, assumed)) = add_asserted outer_ctxt rewrite_rules assms steps ctxt2 - val ctxt4 = - ctxt3 - |> put_simpset (Z3_Replay_Util.make_simpset ctxt3 []) - |> Config.put SAT.solver (Config.get ctxt3 SMT_Config.sat_solver) - val len = length steps - val start = Timing.start () - val print_runtime_statistics = intermediate_statistics ctxt4 start len - fun blockwise f (i, x) y = - (if i > 0 andalso i mod 100 = 0 then print_runtime_statistics i else (); f x y) - val (proofs, stats) = - fold_index (blockwise (replay_step ctxt4 assumed)) steps (assumed, Symtab.empty) - val _ = print_runtime_statistics len - val total = Time.toMilliseconds (#elapsed (Timing.result start)) - val (_, Z3_Proof.Z3_Step {id, ...}) = split_last steps - val _ = SMT_Config.statistics_msg ctxt4 (Pretty.string_of o pretty_statistics total) stats - in - Inttab.lookup proofs id |> the |> snd |> discharge rules outer_ctxt ctxt4 - end - -end; diff --git a/src/test-gen/src/main/smt_patch/old_smt_config_patch.ML b/src/test-gen/src/main/smt_patch/old_smt_config_patch.ML deleted file mode 100644 index d498f9b..0000000 --- a/src/test-gen/src/main/smt_patch/old_smt_config_patch.ML +++ /dev/null @@ -1,254 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_smt_config.ML - Author: Sascha Boehme, TU Muenchen - -Configuration options and diagnostic tools for SMT. -*) - -signature OLD_SMT_CONFIG = -sig - (*solver*) - type solver_info = { - name: string, - class: Proof.context -> Old_SMT_Utils.class, - avail: unit -> bool, - options: Proof.context -> string list } - val add_solver: solver_info -> Context.generic -> Context.generic - val set_solver_options: string * string -> Context.generic -> Context.generic - val is_available: Proof.context -> string -> bool - val available_solvers_of: Proof.context -> string list - val select_solver: string -> Context.generic -> Context.generic - val solver_of: Proof.context -> string - val solver_class_of: Proof.context -> Old_SMT_Utils.class - val solver_options_of: Proof.context -> string list - - (*options*) - val oracle: bool Config.T - val datatypes: bool Config.T - val timeout: real Config.T - val random_seed: int Config.T - val read_only_certificates: bool Config.T - val verbose: bool Config.T - val trace: bool Config.T - val trace_used_facts: bool Config.T - val monomorph_limit: int Config.T - val monomorph_instances: int Config.T - val infer_triggers: bool Config.T - val filter_only_facts: bool Config.T - val debug_files: string Config.T - - (*tools*) - val with_timeout: Proof.context -> ('a -> 'b) -> 'a -> 'b - - (*diagnostics*) - val trace_msg: Proof.context -> ('a -> string) -> 'a -> unit - val verbose_msg: Proof.context -> ('a -> string) -> 'a -> unit - - (*certificates*) - val select_certificates: string -> Context.generic -> Context.generic - val certificates_of: Proof.context -> Cache_IO.cache option - - (*setup*) - val setup: theory -> theory - val print_setup: Proof.context -> unit -end - -structure Old_SMT_Config: OLD_SMT_CONFIG = -struct - -(* solver *) - -type solver_info = { - name: string, - class: Proof.context -> Old_SMT_Utils.class, - avail: unit -> bool, - options: Proof.context -> string list } - -(* FIXME just one data slot (record) per program unit *) -structure Solvers = Generic_Data -( - type T = (solver_info * string list) Symtab.table * string option - val empty = (Symtab.empty, NONE) - val extend = I - fun merge ((ss1, s1), (ss2, s2)) = - (Symtab.merge (K true) (ss1, ss2), merge_options (s1, s2)) -) - -fun set_solver_options (name, options) = - let val opts = String.tokens (Symbol.is_ascii_blank o str) options - in Solvers.map (apfst (Symtab.map_entry name (apsnd (K opts)))) end - -fun add_solver (info as {name, ...} : solver_info) context = - if Symtab.defined (fst (Solvers.get context)) name then - error ("Solver already registered: " ^ quote name) - else - context - |> Solvers.map (apfst (Symtab.update (name, (info, [])))) - |> Context.map_theory (Attrib.setup (Binding.name ("old_" ^ name ^ "_options")) - (Scan.lift (@{keyword "="} |-- Args.name) >> - (Thm.declaration_attribute o K o set_solver_options o pair name)) - ("additional command line options for SMT solver " ^ quote name)) - -fun all_solvers_of ctxt = Symtab.keys (fst (Solvers.get (Context.Proof ctxt))) - -fun solver_name_of ctxt = snd (Solvers.get (Context.Proof ctxt)) - -fun is_available ctxt name = - (case Symtab.lookup (fst (Solvers.get (Context.Proof ctxt))) name of - SOME ({avail, ...}, _) => avail () - | NONE => false) - -fun available_solvers_of ctxt = - filter (is_available ctxt) (all_solvers_of ctxt) - -fun warn_solver (Context.Proof ctxt) name = - if Context_Position.is_visible ctxt then - warning ("The SMT solver " ^ quote name ^ " is not installed.") - else () - | warn_solver _ _ = (); - -fun select_solver name context = - let - val ctxt = Context.proof_of context - val upd = Solvers.map (apsnd (K (SOME name))) - in - if not (member (op =) (all_solvers_of ctxt) name) then - error ("Trying to select unknown solver: " ^ quote name) - else if not (is_available ctxt name) then - (warn_solver context name; upd context) - else upd context - end - -fun no_solver_err () = error "No SMT solver selected" - -fun solver_of ctxt = - (case solver_name_of ctxt of - SOME name => name - | NONE => no_solver_err ()) - -fun solver_info_of default select ctxt = - (case Solvers.get (Context.Proof ctxt) of - (solvers, SOME name) => select (Symtab.lookup solvers name) - | (_, NONE) => default ()) - -fun solver_class_of ctxt = - let fun class_of ({class, ...}: solver_info, _) = class ctxt - in solver_info_of no_solver_err (class_of o the) ctxt end - -fun solver_options_of ctxt = - let - fun all_options NONE = [] - | all_options (SOME ({options, ...} : solver_info, opts)) = - opts @ options ctxt - in solver_info_of (K []) all_options ctxt end - -val setup_solver = - Attrib.setup @{binding old_smt_solver} - (Scan.lift (@{keyword "="} |-- Args.name) >> - (Thm.declaration_attribute o K o select_solver)) - "SMT solver configuration" - - -(* options *) - -val oracle = Attrib.setup_config_bool @{binding old_smt_oracle} (K true) -val datatypes = Attrib.setup_config_bool @{binding old_smt_datatypes} (K false) -val timeout = Attrib.setup_config_real @{binding old_smt_timeout} (K 30.0) -val random_seed = Attrib.setup_config_int @{binding old_smt_random_seed} (K 1) -val read_only_certificates = Attrib.setup_config_bool @{binding old_smt_read_only_certificates} (K false) -val verbose = Attrib.setup_config_bool @{binding old_smt_verbose} (K true) -val trace = Attrib.setup_config_bool @{binding old_smt_trace} (K false) -val trace_used_facts = Attrib.setup_config_bool @{binding old_smt_trace_used_facts} (K false) -val monomorph_limit = Attrib.setup_config_int @{binding old_smt_monomorph_limit} (K 10) -val monomorph_instances = Attrib.setup_config_int @{binding old_smt_monomorph_instances} (K 500) -val infer_triggers = Attrib.setup_config_bool @{binding old_smt_infer_triggers} (K false) -val filter_only_facts = Attrib.setup_config_bool @{binding old_smt_filter_only_facts} (K false) -val debug_files = Attrib.setup_config_string @{binding old_smt_debug_files} (K "") - - -(* diagnostics *) - -fun cond_trace flag f x = if flag then tracing ("SMT: " ^ f x) else () - -fun verbose_msg ctxt = cond_trace (Config.get ctxt verbose) - -fun trace_msg ctxt = cond_trace (Config.get ctxt trace) - - -(* tools *) - -fun with_timeout ctxt f x = - Timeout.apply (seconds (Config.get ctxt timeout)) f x - handle Timeout.TIMEOUT t => raise Old_SMT_patch_Failure.SMT Old_SMT_patch_Failure.Time_Out - - -(* certificates *) - -(* FIXME just one data slot (record) per program unit *) -structure Certificates = Generic_Data -( - type T = Cache_IO.cache option - val empty = NONE - val extend = I - fun merge (s, _) = s (* FIXME merge options!? *) -) - -val get_certificates_path = - Option.map (Cache_IO.cache_path_of) o Certificates.get o Context.Proof - -fun select_certificates name context = context |> Certificates.put ( - if name = "" then NONE - else - Path.explode name - |> Path.append (Resources.master_directory (Context.theory_of context)) - |> SOME o Cache_IO.unsynchronized_init) - -val certificates_of = Certificates.get o Context.Proof - -val setup_certificates = - Attrib.setup @{binding old_smt_certificates} - (Scan.lift (@{keyword "="} |-- Args.name) >> - (Thm.declaration_attribute o K o select_certificates)) - "SMT certificates configuration" - - -(* setup *) - -val setup = - setup_solver #> - setup_certificates - -fun print_setup ctxt = - let - fun string_of_bool b = if b then "true" else "false" - - val names = available_solvers_of ctxt - val ns = if null names then ["(none)"] else sort_strings names - val n = the_default "(none)" (solver_name_of ctxt) - val opts = solver_options_of ctxt - - val t = string_of_real (Config.get ctxt timeout) - - val certs_filename = - (case get_certificates_path ctxt of - SOME path => Path.print path - | NONE => "(disabled)") - in - Pretty.writeln (Pretty.big_list "SMT setup:" [ - Pretty.str ("Current SMT solver: " ^ n), - Pretty.str ("Current SMT solver options: " ^ space_implode " " opts), - Pretty.str_list "Available SMT solvers: " "" ns, - Pretty.str ("Current timeout: " ^ t ^ " seconds"), - Pretty.str ("With proofs: " ^ - string_of_bool (not (Config.get ctxt oracle))), - Pretty.str ("Certificates cache: " ^ certs_filename), - Pretty.str ("Fixed certificates: " ^ - string_of_bool (Config.get ctxt read_only_certificates))]) - end - -val _ = - Outer_Syntax.command @{command_keyword old_smt_status} - "show the available SMT solvers, the currently selected SMT solver, \ - \and the values of SMT configuration options" - (Scan.succeed (Toplevel.keep (print_setup o Toplevel.context_of))) - -end diff --git a/src/test-gen/src/main/smt_patch/old_smt_failure_patch.ML b/src/test-gen/src/main/smt_patch/old_smt_failure_patch.ML deleted file mode 100644 index 2910062..0000000 --- a/src/test-gen/src/main/smt_patch/old_smt_failure_patch.ML +++ /dev/null @@ -1,61 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_smt_failure.ML - Author: Sascha Boehme, TU Muenchen - -Failures and exception of SMT. -*) - -signature OLD_SMT_FAILURE = -sig - type counterexample = { - is_real_cex: bool, - free_constraints: term list, - const_defs: term list} - datatype failure = - Counterexample of counterexample | - Time_Out | - Out_Of_Memory | - Abnormal_Termination of int | - Other_Failure of string - val pretty_counterexample: Proof.context -> counterexample -> Pretty.T - val string_of_failure: Proof.context -> failure -> string - exception SMT of failure -end - -structure Old_SMT_patch_Failure: OLD_SMT_FAILURE = -struct - -type counterexample = { - is_real_cex: bool, - free_constraints: term list, - const_defs: term list} - -datatype failure = - Counterexample of counterexample | - Time_Out | - Out_Of_Memory | - Abnormal_Termination of int | - Other_Failure of string - -fun pretty_counterexample ctxt {is_real_cex, free_constraints, const_defs} = - let - val msg = - if is_real_cex then "Counterexample found (possibly spurious)" - else "Potential counterexample found" - in - if null free_constraints andalso null const_defs then Pretty.str msg - else - Pretty.big_list (msg ^ ":") - (map (Syntax.pretty_term ctxt) (free_constraints @ const_defs)) - end - -fun string_of_failure ctxt (Counterexample cex) = - Pretty.string_of (pretty_counterexample ctxt cex) - | string_of_failure _ Time_Out = "Timed out" - | string_of_failure _ Out_Of_Memory = "Ran out of memory" - | string_of_failure _ (Abnormal_Termination err) = - "Solver terminated abnormally with error code " ^ string_of_int err - | string_of_failure _ (Other_Failure msg) = msg - -exception SMT of failure - -end diff --git a/src/test-gen/src/main/smt_patch/old_smt_normalize_patch.ML b/src/test-gen/src/main/smt_patch/old_smt_normalize_patch.ML deleted file mode 100644 index fbc3b50..0000000 --- a/src/test-gen/src/main/smt_patch/old_smt_normalize_patch.ML +++ /dev/null @@ -1,652 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_smt_normalize.ML - Author: Sascha Boehme, TU Muenchen - -Normalization steps on theorems required by SMT solvers. -*) - -signature OLD_SMT_NORMALIZE = -sig - val drop_fact_warning: Proof.context -> thm -> unit - val atomize_conv: Proof.context -> conv - type extra_norm = Proof.context -> thm list * thm list -> thm list * thm list - val add_extra_norm: Old_SMT_Utils.class * extra_norm -> Context.generic -> - Context.generic - val normalize: (int * (int option * thm)) list -> Proof.context -> - (int * thm) list * Proof.context - val setup: theory -> theory -end - -structure Old_SMT_Normalize: OLD_SMT_NORMALIZE = -struct - -fun drop_fact_warning ctxt = - Old_SMT_Config.verbose_msg ctxt (prefix "Warning: dropping assumption: " o - Thm.string_of_thm ctxt) - - -(* general theorem normalizations *) - -(** instantiate elimination rules **) - -local - val (cpfalse, cfalse) = - `Old_SMT_Utils.mk_cprop (Thm.cterm_of @{context} @{const False}) - - fun inst f ct thm = - let val cv = f (Drule.strip_imp_concl (Thm.cprop_of thm)) - in Thm.instantiate ([], [(dest_Var (Thm.term_of cv), ct)]) thm end -in - -fun instantiate_elim thm = - (case Thm.concl_of thm of - @{const Trueprop} $ Var (_, @{typ bool}) => inst Thm.dest_arg cfalse thm - | Var _ => inst I cpfalse thm - | _ => thm) - -end - - -(** normalize definitions **) - -fun norm_def thm = - (case Thm.prop_of thm of - @{const Trueprop} $ (Const (@{const_name HOL.eq}, _) $ _ $ Abs _) => - norm_def (thm RS @{thm fun_cong}) - | Const (@{const_name Pure.eq}, _) $ _ $ Abs _ => - norm_def (thm RS @{thm meta_eq_to_obj_eq}) - | _ => thm) - - -(** atomization **) - -fun atomize_conv ctxt ct = - (case Thm.term_of ct of - @{const Pure.imp} $ _ $ _ => - Conv.binop_conv (atomize_conv ctxt) then_conv - Conv.rewr_conv @{thm atomize_imp} - | Const (@{const_name Pure.eq}, _) $ _ $ _ => - Conv.binop_conv (atomize_conv ctxt) then_conv - Conv.rewr_conv @{thm atomize_eq} - | Const (@{const_name Pure.all}, _) $ Abs _ => - Conv.binder_conv (atomize_conv o snd) ctxt then_conv - Conv.rewr_conv @{thm atomize_all} - | _ => Conv.all_conv) ct - -val setup_atomize = - fold Old_SMT_Builtin.add_builtin_fun_ext'' [@{const_name Pure.imp}, - @{const_name Pure.eq}, @{const_name Pure.all}, @{const_name Trueprop}] - - -(** unfold special quantifiers **) - -local - val ex1_def = mk_meta_eq @{lemma - "Ex1 = (%P. EX x. P x & (ALL y. P y --> y = x))" - by (rule ext) (simp only: Ex1_def)} - - val ball_def = mk_meta_eq @{lemma "Ball = (%A P. ALL x. x : A --> P x)" - by (rule ext)+ (rule Ball_def)} - - val bex_def = mk_meta_eq @{lemma "Bex = (%A P. EX x. x : A & P x)" - by (rule ext)+ (rule Bex_def)} - - val special_quants = [(@{const_name Ex1}, ex1_def), - (@{const_name Ball}, ball_def), (@{const_name Bex}, bex_def)] - - fun special_quant (Const (n, _)) = AList.lookup (op =) special_quants n - | special_quant _ = NONE - - fun special_quant_conv _ ct = - (case special_quant (Thm.term_of ct) of - SOME thm => Conv.rewr_conv thm - | NONE => Conv.all_conv) ct -in - -fun unfold_special_quants_conv ctxt = - Old_SMT_Utils.if_exists_conv (is_some o special_quant) - (Conv.top_conv special_quant_conv ctxt) - -val setup_unfolded_quants = - fold (Old_SMT_Builtin.add_builtin_fun_ext'' o fst) special_quants - -end - - -(** trigger inference **) - -local - (*** check trigger syntax ***) - - fun dest_trigger (Const (@{const_name pat}, _) $ _) = SOME true - | dest_trigger (Const (@{const_name nopat}, _) $ _) = SOME false - | dest_trigger _ = NONE - - fun eq_list [] = false - | eq_list (b :: bs) = forall (equal b) bs - - fun proper_trigger t = - t - |> these o try HOLogic.dest_list - |> map (map_filter dest_trigger o these o try HOLogic.dest_list) - |> (fn [] => false | bss => forall eq_list bss) - - fun proper_quant inside f t = - (case t of - Const (@{const_name All}, _) $ Abs (_, _, u) => proper_quant true f u - | Const (@{const_name Ex}, _) $ Abs (_, _, u) => proper_quant true f u - | @{const trigger} $ p $ u => - (if inside then f p else false) andalso proper_quant false f u - | Abs (_, _, u) => proper_quant false f u - | u1 $ u2 => proper_quant false f u1 andalso proper_quant false f u2 - | _ => true) - - fun check_trigger_error ctxt t = - error ("SMT triggers must only occur under quantifier and multipatterns " ^ - "must have the same kind: " ^ Syntax.string_of_term ctxt t) - - fun check_trigger_conv ctxt ct = - if proper_quant false proper_trigger (Old_SMT_Utils.term_of ct) then - Conv.all_conv ct - else check_trigger_error ctxt (Thm.term_of ct) - - - (*** infer simple triggers ***) - - fun dest_cond_eq ct = - (case Thm.term_of ct of - Const (@{const_name HOL.eq}, _) $ _ $ _ => Thm.dest_binop ct - | @{const HOL.implies} $ _ $ _ => dest_cond_eq (Thm.dest_arg ct) - | _ => raise CTERM ("no equation", [ct])) - - fun get_constrs thy (Type (n, _)) = these (Old_Datatype_Data.get_constrs thy n) - | get_constrs _ _ = [] - - fun is_constr thy (n, T) = - let fun match (m, U) = m = n andalso Sign.typ_instance thy (T, U) - in can (the o find_first match o get_constrs thy o Term.body_type) T end - - fun is_constr_pat thy t = - (case Term.strip_comb t of - (Free _, []) => true - | (Const c, ts) => is_constr thy c andalso forall (is_constr_pat thy) ts - | _ => false) - - fun is_simp_lhs ctxt t = - (case Term.strip_comb t of - (Const c, ts as _ :: _) => - not (Old_SMT_Builtin.is_builtin_fun_ext ctxt c ts) andalso - forall (is_constr_pat (Proof_Context.theory_of ctxt)) ts - | _ => false) - - fun has_all_vars vs t = - subset (op aconv) (vs, map Free (Term.add_frees t [])) - - fun minimal_pats vs ct = - if has_all_vars vs (Thm.term_of ct) then - (case Thm.term_of ct of - _ $ _ => - (case apply2 (minimal_pats vs) (Thm.dest_comb ct) of - ([], []) => [[ct]] - | (ctss, ctss') => union (eq_set (op aconvc)) ctss ctss') - | _ => []) - else [] - - fun proper_mpat _ _ _ [] = false - | proper_mpat thy gen u cts = - let - val tps = (op ~~) (`gen (map Thm.term_of cts)) - fun some_match u = tps |> exists (fn (t', t) => - Pattern.matches thy (t', u) andalso not (t aconv u)) - in not (Term.exists_subterm some_match u) end - - val pat = - Old_SMT_Utils.mk_const_pat @{theory} @{const_name Old_SMT_patch.pat} Old_SMT_Utils.destT1 - fun mk_pat ct = Thm.apply (Old_SMT_Utils.instT' ct pat) ct - - fun mk_clist T = apply2 (Thm.cterm_of @{context}) (HOLogic.cons_const T, HOLogic.nil_const T) - fun mk_list (ccons, cnil) f cts = fold_rev (Thm.mk_binop ccons o f) cts cnil - val mk_pat_list = mk_list (mk_clist @{typ Old_SMT_patch.pattern}) - val mk_mpat_list = mk_list (mk_clist @{typ "Old_SMT_patch.pattern list"}) - fun mk_trigger ctss = mk_mpat_list (mk_pat_list mk_pat) ctss - - val trigger_eq = - mk_meta_eq @{lemma "p = Old_SMT_patch.trigger t p" by (simp add: trigger_def)} - - fun insert_trigger_conv [] ct = Conv.all_conv ct - | insert_trigger_conv ctss ct = - let val (ctr, cp) = Thm.dest_binop (Thm.rhs_of trigger_eq) ||> rpair ct - in Thm.instantiate ([], map (apfst (dest_Var o Thm.term_of)) [cp, (ctr, mk_trigger ctss)]) trigger_eq end - - fun infer_trigger_eq_conv outer_ctxt (ctxt, cvs) ct = - let - val (lhs, rhs) = dest_cond_eq ct - - val vs = map Thm.term_of cvs - val thy = Proof_Context.theory_of ctxt - - fun get_mpats ct = - if is_simp_lhs ctxt (Thm.term_of ct) then minimal_pats vs ct - else [] - val gen = Variable.export_terms ctxt outer_ctxt - val filter_mpats = filter (proper_mpat thy gen (Thm.term_of rhs)) - - in insert_trigger_conv (filter_mpats (get_mpats lhs)) ct end - - fun has_trigger (@{const Old_SMT_patch.trigger} $ _ $ _) = true - | has_trigger _ = false - - fun try_trigger_conv cv ct = - if Old_SMT_Utils.under_quant has_trigger (Old_SMT_Utils.term_of ct) then - Conv.all_conv ct - else Conv.try_conv cv ct - - fun infer_trigger_conv ctxt = - if Config.get ctxt Old_SMT_Config.infer_triggers then - try_trigger_conv - (Old_SMT_Utils.under_quant_conv (infer_trigger_eq_conv ctxt) ctxt) - else Conv.all_conv -in - -fun trigger_conv ctxt = - Old_SMT_Utils.prop_conv - (check_trigger_conv ctxt then_conv infer_trigger_conv ctxt) - -val setup_trigger = - fold Old_SMT_Builtin.add_builtin_fun_ext'' - [@{const_name Old_SMT_patch.pat}, @{const_name Old_SMT_patch.nopat}, @{const_name Old_SMT_patch.trigger}] - -end - - -(** adding quantifier weights **) - -local - (*** check weight syntax ***) - - val has_no_weight = - not o Term.exists_subterm (fn @{const Old_SMT_patch.weight} => true | _ => false) - - fun is_weight (@{const Old_SMT_patch.weight} $ w $ t) = - (case try HOLogic.dest_number w of - SOME (_, i) => i >= 0 andalso has_no_weight t - | _ => false) - | is_weight t = has_no_weight t - - fun proper_trigger (@{const Old_SMT_patch.trigger} $ _ $ t) = is_weight t - | proper_trigger t = is_weight t - - fun check_weight_error ctxt t = - error ("SMT weight must be a non-negative number and must only occur " ^ - "under the top-most quantifier and an optional trigger: " ^ - Syntax.string_of_term ctxt t) - - fun check_weight_conv ctxt ct = - if Old_SMT_Utils.under_quant proper_trigger (Old_SMT_Utils.term_of ct) then - Conv.all_conv ct - else check_weight_error ctxt (Thm.term_of ct) - - - (*** insertion of weights ***) - - fun under_trigger_conv cv ct = - (case Thm.term_of ct of - @{const Old_SMT_patch.trigger} $ _ $ _ => Conv.arg_conv cv - | _ => cv) ct - - val weight_eq = - mk_meta_eq @{lemma "p = Old_SMT_patch.weight i p" by (simp add: weight_def)} - fun mk_weight_eq w = - let val cv = Thm.dest_arg1 (Thm.rhs_of weight_eq) - in - Thm.instantiate ([], [(dest_Var (Thm.term_of cv), Numeral.mk_cnumber @{ctyp int} w)]) - weight_eq - end - - fun add_weight_conv NONE _ = Conv.all_conv - | add_weight_conv (SOME weight) ctxt = - let val cv = Conv.rewr_conv (mk_weight_eq weight) - in Old_SMT_Utils.under_quant_conv (K (under_trigger_conv cv)) ctxt end -in - -fun weight_conv weight ctxt = - Old_SMT_Utils.prop_conv - (check_weight_conv ctxt then_conv add_weight_conv weight ctxt) - -val setup_weight = Old_SMT_Builtin.add_builtin_fun_ext'' @{const_name Old_SMT_patch.weight} - -end - - -(** combined general normalizations **) - -fun gen_normalize1_conv ctxt weight = - atomize_conv ctxt then_conv - unfold_special_quants_conv ctxt then_conv - Thm.beta_conversion true then_conv - trigger_conv ctxt then_conv - weight_conv weight ctxt - -fun gen_normalize1 ctxt weight thm = - thm - |> instantiate_elim - |> norm_def - |> Conv.fconv_rule (Thm.beta_conversion true then_conv Thm.eta_conversion) - |> Drule.forall_intr_vars - |> Conv.fconv_rule (gen_normalize1_conv ctxt weight) - -fun gen_norm1_safe ctxt (i, (weight, thm)) = - (case try (gen_normalize1 ctxt weight) thm of - SOME thm' => SOME (i, thm') - | NONE => (drop_fact_warning ctxt thm; NONE)) - -fun gen_normalize ctxt iwthms = map_filter (gen_norm1_safe ctxt) iwthms - - - -(* unfolding of definitions and theory-specific rewritings *) - -fun expand_head_conv cv ct = - (case Thm.term_of ct of - _ $ _ => - Conv.fun_conv (expand_head_conv cv) then_conv - Conv.try_conv (Thm.beta_conversion false) - | _ => cv) ct - - -(** rewrite bool case expressions as if expressions **) - -local - fun is_case_bool (Const (@{const_name "bool.case_bool"}, _)) = true - | is_case_bool _ = false - - val thm = mk_meta_eq @{lemma - "case_bool = (%x y P. if P then x else y)" by (rule ext)+ simp} - - fun unfold_conv _ = - Old_SMT_Utils.if_true_conv (is_case_bool o Term.head_of) - (expand_head_conv (Conv.rewr_conv thm)) -in - -fun rewrite_case_bool_conv ctxt = - Old_SMT_Utils.if_exists_conv is_case_bool (Conv.top_conv unfold_conv ctxt) - -val setup_case_bool = - Old_SMT_Builtin.add_builtin_fun_ext'' @{const_name "bool.case_bool"} - -end - - -(** unfold abs, min and max **) - -local - val abs_def = mk_meta_eq @{lemma - "abs = (%a::'a::abs_if. if a < 0 then - a else a)" - by (rule ext) (rule abs_if)} - - val min_def = mk_meta_eq @{lemma "min = (%a b. if a <= b then a else b)" - by (rule ext)+ (rule min_def)} - - val max_def = mk_meta_eq @{lemma "max = (%a b. if a <= b then b else a)" - by (rule ext)+ (rule max_def)} - - val defs = [(@{const_name min}, min_def), (@{const_name max}, max_def), - (@{const_name abs}, abs_def)] - - fun is_builtinT ctxt T = - Old_SMT_Builtin.is_builtin_typ_ext ctxt (Term.domain_type T) - - fun abs_min_max ctxt (Const (n, T)) = - (case AList.lookup (op =) defs n of - NONE => NONE - | SOME thm => if is_builtinT ctxt T then SOME thm else NONE) - | abs_min_max _ _ = NONE - - fun unfold_amm_conv ctxt ct = - (case abs_min_max ctxt (Term.head_of (Thm.term_of ct)) of - SOME thm => expand_head_conv (Conv.rewr_conv thm) - | NONE => Conv.all_conv) ct -in - -fun unfold_abs_min_max_conv ctxt = - Old_SMT_Utils.if_exists_conv (is_some o abs_min_max ctxt) - (Conv.top_conv unfold_amm_conv ctxt) - -val setup_abs_min_max = fold (Old_SMT_Builtin.add_builtin_fun_ext'' o fst) defs - -end - - -(** embedding of standard natural number operations into integer operations **) - -local - val nat_embedding = @{lemma - "ALL n. nat (int n) = n" - "ALL i. i >= 0 --> int (nat i) = i" - "ALL i. i < 0 --> int (nat i) = 0" - by simp_all} - - val simple_nat_ops = [ - @{const less (nat)}, @{const less_eq (nat)}, - @{const Suc}, @{const plus (nat)}, @{const minus (nat)}] - - val mult_nat_ops = - [@{const times (nat)}, @{const divide (nat)}, @{const modulo (nat)}] - - val nat_ops = simple_nat_ops @ mult_nat_ops - - val nat_consts = nat_ops @ [@{const numeral (nat)}, - @{const zero_class.zero (nat)}, @{const one_class.one (nat)}] - - val nat_int_coercions = [@{const of_nat (int)}, @{const nat}] - - val builtin_nat_ops = nat_int_coercions @ simple_nat_ops - - val is_nat_const = member (op aconv) nat_consts - - fun is_nat_const' @{const of_nat (int)} = true - | is_nat_const' t = is_nat_const t - - val expands = map mk_meta_eq @{lemma - "0 = nat 0" - "1 = nat 1" - "(numeral :: num => nat) = (%i. nat (numeral i))" - "op < = (%a b. int a < int b)" - "op <= = (%a b. int a <= int b)" - "Suc = (%a. nat (int a + 1))" - "op + = (%a b. nat (int a + int b))" - "op - = (%a b. nat (int a - int b))" - "op * = (%a b. nat (int a * int b))" - "op div = (%a b. nat (int a div int b))" - "op mod = (%a b. nat (int a mod int b))" - by (fastforce simp add: nat_mult_distrib nat_div_distrib nat_mod_distrib)+} - - val ints = map mk_meta_eq @{lemma - "int 0 = 0" - "int 1 = 1" - "int (Suc n) = int n + 1" - "int (n + m) = int n + int m" - "int (n - m) = int (nat (int n - int m))" - "int (n * m) = int n * int m" - "int (n div m) = int n div int m" - "int (n mod m) = int n mod int m" - by (auto simp add: zdiv_int zmod_int)} - - val int_if = mk_meta_eq @{lemma - "int (if P then n else m) = (if P then int n else int m)" - by simp} - - fun mk_number_eq ctxt i lhs = - let - val eq = Old_SMT_Utils.mk_cequals lhs (Numeral.mk_cnumber @{ctyp int} i) - val tac = - Simplifier.simp_tac (put_simpset HOL_ss ctxt addsimps [@{thm of_nat_numeral [where 'a=int]}]) 1 - in Goal.norm_result ctxt (Goal.prove_internal ctxt [] eq (K tac)) end - - fun ite_conv cv1 cv2 = - Conv.combination_conv (Conv.combination_conv (Conv.arg_conv cv1) cv2) cv2 - - fun int_conv ctxt ct = - (case Thm.term_of ct of - @{const of_nat (int)} $ (n as (@{const numeral (nat)} $ _)) => - Conv.rewr_conv (mk_number_eq ctxt (snd (HOLogic.dest_number n)) ct) - | @{const of_nat (int)} $ _ => - (Conv.rewrs_conv ints then_conv Conv.sub_conv ints_conv ctxt) else_conv - (Conv.rewr_conv int_if then_conv - ite_conv (nat_conv ctxt) (int_conv ctxt)) else_conv - Conv.sub_conv (Conv.top_sweep_conv nat_conv) ctxt - | _ => Conv.no_conv) ct - - and ints_conv ctxt = Conv.top_sweep_conv int_conv ctxt - - and expand_conv ctxt = - Old_SMT_Utils.if_conv (is_nat_const o Term.head_of) - (expand_head_conv (Conv.rewrs_conv expands) then_conv ints_conv ctxt) - (int_conv ctxt) - - and nat_conv ctxt = Old_SMT_Utils.if_exists_conv is_nat_const' - (Conv.top_sweep_conv expand_conv ctxt) - - val uses_nat_int = Term.exists_subterm (member (op aconv) nat_int_coercions) -in - -val nat_as_int_conv = nat_conv - -fun add_nat_embedding thms = - if exists (uses_nat_int o Thm.prop_of) thms then (thms, nat_embedding) - else (thms, []) - -val setup_nat_as_int = - Old_SMT_Builtin.add_builtin_typ_ext (@{typ nat}, K true) #> - fold (Old_SMT_Builtin.add_builtin_fun_ext' o Term.dest_Const) builtin_nat_ops - -end - - -(** normalize numerals **) - -local - (* - rewrite Numeral1 into 1 - rewrite - 0 into 0 - *) - - fun is_irregular_number (Const (@{const_name numeral}, _) $ Const (@{const_name num.One}, _)) = - true - | is_irregular_number (Const (@{const_name uminus}, _) $ Const (@{const_name Groups.zero}, _)) = - true - | is_irregular_number _ = - false; - - fun is_strange_number ctxt t = is_irregular_number t andalso Old_SMT_Builtin.is_builtin_num ctxt t; - - val proper_num_ss = - simpset_of (put_simpset HOL_ss @{context} - addsimps @{thms Num.numeral_One minus_zero}) - - fun norm_num_conv ctxt = - Old_SMT_Utils.if_conv (is_strange_number ctxt) - (Simplifier.rewrite (put_simpset proper_num_ss ctxt)) Conv.no_conv -in - -fun normalize_numerals_conv ctxt = - Old_SMT_Utils.if_exists_conv (is_strange_number ctxt) - (Conv.top_sweep_conv norm_num_conv ctxt) - -end - - -(** combined unfoldings and rewritings **) - -fun unfold_conv ctxt = - rewrite_case_bool_conv ctxt then_conv - unfold_abs_min_max_conv ctxt then_conv - nat_as_int_conv ctxt then_conv - Thm.beta_conversion true - -fun unfold1 ctxt = map (apsnd (Conv.fconv_rule (unfold_conv ctxt))) - -fun burrow_ids f ithms = - let - val (is, thms) = split_list ithms - val (thms', extra_thms) = f thms - in (is ~~ thms') @ map (pair ~1) extra_thms end - -fun unfold2 ctxt ithms = - ithms - |> map (apsnd (Conv.fconv_rule (normalize_numerals_conv ctxt))) - |> burrow_ids add_nat_embedding - - - -(* overall normalization *) - -type extra_norm = Proof.context -> thm list * thm list -> thm list * thm list - -structure Extra_Norms = Generic_Data -( - type T = extra_norm Old_SMT_Utils.dict - val empty = [] - val extend = I - fun merge data = Old_SMT_Utils.dict_merge fst data -) - -fun add_extra_norm (cs, norm) = - Extra_Norms.map (Old_SMT_Utils.dict_update (cs, norm)) - -fun apply_extra_norms ctxt ithms = - let - val cs = Old_SMT_Config.solver_class_of ctxt - val es = Old_SMT_Utils.dict_lookup (Extra_Norms.get (Context.Proof ctxt)) cs - in burrow_ids (fold (fn e => e ctxt) es o rpair []) ithms end - -local - val ignored = member (op =) [@{const_name All}, @{const_name Ex}, - @{const_name Let}, @{const_name If}, @{const_name HOL.eq}] - - val schematic_consts_of = - let - fun collect (@{const Old_SMT_patch.trigger} $ p $ t) = - collect_trigger p #> collect t - | collect (t $ u) = collect t #> collect u - | collect (Abs (_, _, t)) = collect t - | collect (t as Const (n, _)) = - if not (ignored n) then Monomorph.add_schematic_consts_of t else I - | collect _ = I - and collect_trigger t = - let val dest = these o try HOLogic.dest_list - in fold (fold collect_pat o dest) (dest t) end - and collect_pat (Const (@{const_name Old_SMT_patch.pat}, _) $ t) = collect t - | collect_pat (Const (@{const_name Old_SMT_patch.nopat}, _) $ t) = collect t - | collect_pat _ = I - in (fn t => collect t Symtab.empty) end -in - -fun monomorph ctxt xthms = - let val (xs, thms) = split_list xthms - in - map (pair 1) thms - |> Monomorph.monomorph schematic_consts_of ctxt - |> maps (uncurry (map o pair)) o map2 pair xs o map (map snd) - end - -end - -fun normalize iwthms ctxt = - iwthms - |> gen_normalize ctxt - |> unfold1 ctxt - |> monomorph ctxt - |> unfold2 ctxt - |> apply_extra_norms ctxt - |> rpair ctxt - -val setup = Context.theory_map ( - setup_atomize #> - setup_unfolded_quants #> - setup_trigger #> - setup_weight #> - setup_case_bool #> - setup_abs_min_max #> - setup_nat_as_int) - -end diff --git a/src/test-gen/src/main/smt_patch/old_smt_setup_solvers_patch.ML b/src/test-gen/src/main/smt_patch/old_smt_setup_solvers_patch.ML deleted file mode 100644 index 30617b3..0000000 --- a/src/test-gen/src/main/smt_patch/old_smt_setup_solvers_patch.ML +++ /dev/null @@ -1,189 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_smt_setup_solvers.ML - Author: Sascha Boehme, TU Muenchen - -Setup SMT solvers. -*) - -signature OLD_SMT_SETUP_SOLVERS = -sig - datatype z3_non_commercial = - Z3_Non_Commercial_Unknown | - Z3_Non_Commercial_Accepted | - Z3_Non_Commercial_Declined - val z3_non_commercial: unit -> z3_non_commercial - val z3_with_extensions: bool Config.T - val setup: theory -> theory -end - -structure Old_SMT_Setup_Solvers: OLD_SMT_SETUP_SOLVERS = -struct - -(* helper functions *) - -fun make_avail name () = getenv ("OLD_" ^ name ^ "_SOLVER") <> "" -fun make_command name () = [getenv ("OLD_" ^ name ^ "_SOLVER")] - -fun outcome_of unsat sat unknown solver_name line = - if String.isPrefix unsat line then Old_SMT_patch_Solver.Unsat - else if String.isPrefix sat line then Old_SMT_patch_Solver.Sat - else if String.isPrefix unknown line then Old_SMT_patch_Solver.Unknown - else raise Old_SMT_patch_Failure.SMT (Old_SMT_patch_Failure.Other_Failure ("Solver " ^ - quote solver_name ^ " failed. Enable SMT tracing by setting the " ^ - "configuration option " ^ quote (Config.name_of Old_SMT_Config.trace) ^ " and " ^ - "see the trace for details.")) - -fun on_first_line test_outcome solver_name lines = - let - val empty_line = (fn "" => true | _ => false) - val split_first = (fn [] => ("", []) | l :: ls => (l, ls)) - val (l, ls) = split_first (snd (take_prefix empty_line lines)) - in (test_outcome solver_name l, ls) end - - -(* CVC3 *) - -local - fun cvc3_options ctxt = [ - "-seed", string_of_int (Config.get ctxt Old_SMT_Config.random_seed), - "-lang", "smtlib", "-output-lang", "presentation", - "-timeout", string_of_int (Real.ceil (Config.get ctxt Old_SMT_Config.timeout))] -in - -val cvc3: Old_SMT_patch_Solver.solver_config = { - name = "cvc3", - class = K Old_SMTLIB_Interface.smtlibC, - avail = make_avail "CVC3", - command = make_command "CVC3", - options = cvc3_options, - default_max_relevant = 400 (* FUDGE *), - supports_filter = false, - outcome = - on_first_line (outcome_of "Unsatisfiable." "Satisfiable." "Unknown."), - cex_parser = NONE, - reconstruct = NONE } - -end - - -(* Yices *) - -val yices: Old_SMT_patch_Solver.solver_config = { - name = "yices", - class = K Old_SMTLIB_Interface.smtlibC, - avail = make_avail "YICES", - command = make_command "YICES", - options = (fn ctxt => [ - "--rand-seed=" ^ string_of_int (Config.get ctxt Old_SMT_Config.random_seed), - "--timeout=" ^ - string_of_int (Real.ceil (Config.get ctxt Old_SMT_Config.timeout)), - "--smtlib"]), - default_max_relevant = 350 (* FUDGE *), - supports_filter = false, - outcome = on_first_line (outcome_of "unsat" "sat" "unknown"), - cex_parser = NONE, - reconstruct = NONE } - - -(* Z3 *) - -datatype z3_non_commercial = - Z3_Non_Commercial_Unknown | - Z3_Non_Commercial_Accepted | - Z3_Non_Commercial_Declined - - -local - val accepted = member (op =) ["yes", "Yes", "YES"] - val declined = member (op =) ["no", "No", "NO"] -in - -fun z3_non_commercial () = - let - val flag2 = getenv "OLD_Z3_NON_COMMERCIAL" - in - if accepted flag2 then Z3_Non_Commercial_Accepted - else if declined flag2 then Z3_Non_Commercial_Declined - else Z3_Non_Commercial_Unknown - end - -fun if_z3_non_commercial f = - (case z3_non_commercial () of - Z3_Non_Commercial_Accepted => f () - | Z3_Non_Commercial_Declined => - error (Pretty.string_of (Pretty.para - "The SMT solver Z3 may only be used for non-commercial applications.")) - | Z3_Non_Commercial_Unknown => - error - (Pretty.string_of - (Pretty.para - ("The SMT solver Z3 is not activated. To activate it, set the Isabelle \ - \system option \"z3_non_commercial\" to \"yes\" (e.g. via \ - \the Isabelle/jEdit menu Plugin Options / Isabelle / General).")) ^ - "\n\nSee also " ^ Url.print (Url.explode "http://z3.codeplex.com/license"))) - -end - - -val z3_with_extensions = - Attrib.setup_config_bool @{binding old_z3_with_extensions} (K false) - -local - fun z3_make_command name () = if_z3_non_commercial (make_command name) - - fun z3_options ctxt = - ["-rs:" ^ string_of_int (Config.get ctxt Old_SMT_Config.random_seed), - "MODEL=true", - "SOFT_TIMEOUT=" ^ - string_of_int (Real.ceil (1000.0 * Config.get ctxt Old_SMT_Config.timeout)), - "-smt"] - |> not (Config.get ctxt Old_SMT_Config.oracle) ? - append ["DISPLAY_PROOF=true", "PROOF_MODE=2"] - - fun z3_on_first_or_last_line solver_name lines = - let - fun junk l = - if String.isPrefix "WARNING: Out of allocated virtual memory" l - then raise Old_SMT_patch_Failure.SMT Old_SMT_patch_Failure.Out_Of_Memory - else - String.isPrefix "WARNING" l orelse - String.isPrefix "ERROR" l orelse - forall Symbol.is_ascii_blank (Symbol.explode l) - val lines = filter_out junk lines - fun outcome split = - the_default ("", []) (try split lines) - |>> outcome_of "unsat" "sat" "unknown" solver_name - in - (* Starting with version 4.0, Z3 puts the outcome on the first line of the - output rather than on the last line. *) - outcome (fn lines => (hd lines, tl lines)) - handle Old_SMT_patch_Failure.SMT _ => outcome (swap o split_last) - end - - fun select_class ctxt = - if Config.get ctxt z3_with_extensions then Old_Z3_Interface.smtlib_z3C - else Old_SMTLIB_Interface.smtlibC -in - -val z3: Old_SMT_patch_Solver.solver_config = { - name = "z3", - class = select_class, - avail = make_avail "Z3", - command = z3_make_command "Z3", - options = z3_options, - default_max_relevant = 350 (* FUDGE *), - supports_filter = true, - outcome = z3_on_first_or_last_line, - cex_parser = SOME Old_Z3_Model.parse_counterex, - reconstruct = SOME Old_Z3_Proof_Reconstruction.reconstruct } - -end - - -(* overall setup *) - -val setup = - Old_SMT_patch_Solver.add_solver cvc3 #> - Old_SMT_patch_Solver.add_solver yices #> - Old_SMT_patch_Solver.add_solver z3 - -end diff --git a/src/test-gen/src/main/smt_patch/old_smt_solver_patch.ML b/src/test-gen/src/main/smt_patch/old_smt_solver_patch.ML deleted file mode 100644 index 944f67e..0000000 --- a/src/test-gen/src/main/smt_patch/old_smt_solver_patch.ML +++ /dev/null @@ -1,378 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_smt_solver.ML - Author: Sascha Boehme, TU Muenchen - -SMT solvers registry and SMT tactic. -*) - -signature OLD_SMT_SOLVER = -sig - (*configuration*) - datatype outcome = Unsat | Sat | Unknown - type solver_config = { - name: string, - class: Proof.context -> Old_SMT_Utils.class, - avail: unit -> bool, - command: unit -> string list, - options: Proof.context -> string list, - default_max_relevant: int, - supports_filter: bool, - outcome: string -> string list -> outcome * string list, - cex_parser: (Proof.context -> Old_SMT_Translate.recon -> string list -> - term list * term list) option, - reconstruct: (Proof.context -> Old_SMT_Translate.recon -> string list -> - int list * thm) option } - - (*registry*) - val add_solver: solver_config -> theory -> theory - val solver_name_of: Proof.context -> string - val available_solvers_of: Proof.context -> string list - val apply_solver: Proof.context -> (int * (int option * thm)) list -> - int list * thm - val default_max_relevant: Proof.context -> string -> int - - (*filter*) - type 'a smt_filter_data = - ('a * thm) list * ((int * thm) list * Proof.context) - val smt_filter_preprocess: Proof.context -> thm list -> thm -> - ('a * (int option * thm)) list -> int -> 'a smt_filter_data - val smt_filter_apply: Time.time -> 'a smt_filter_data -> - {outcome: Old_SMT_patch_Failure.failure option, used_facts: ('a * thm) list} - - (*tactic*) - val smt_tac: Proof.context -> thm list -> int -> tactic - val smt_tac': Proof.context -> thm list -> int -> tactic -end - -structure Old_SMT_patch_Solver: OLD_SMT_SOLVER = -struct - - -(* interface to external solvers *) - -local - -val shell_quote = enclose "'" "'"; -val shell_path = shell_quote o File.standard_path; - -fun make_cmd command options problem_path proof_path = space_implode " " ( - "(exec 2>&1;" :: map shell_quote (command () @ options) @ - [shell_path problem_path, ")", ">", shell_path proof_path]) - -fun trace_and ctxt msg f x = - let val _ = Old_SMT_Config.trace_msg ctxt (fn () => msg) () - in f x end - -fun run ctxt name mk_cmd input = - (case Old_SMT_Config.certificates_of ctxt of - NONE => - if not (Old_SMT_Config.is_available ctxt name) then - error ("The SMT solver " ^ quote name ^ " is not installed.") - else if Config.get ctxt Old_SMT_Config.debug_files = "" then - trace_and ctxt ("Invoking SMT solver " ^ quote name ^ " ...") - (Cache_IO.run mk_cmd) input - else - let - val base_path = Path.explode (Config.get ctxt Old_SMT_Config.debug_files) - val in_path = Path.ext "smt_in" base_path - val out_path = Path.ext "smt_out" base_path - in Cache_IO.raw_run mk_cmd input in_path out_path end - | SOME certs => - (case Cache_IO.lookup certs input of - (NONE, key) => - if Config.get ctxt Old_SMT_Config.read_only_certificates then - error ("Bad certificate cache: missing certificate") - else - Cache_IO.run_and_cache certs key mk_cmd input - | (SOME output, _) => - trace_and ctxt ("Using cached certificate from " ^ - shell_path (Cache_IO.cache_path_of certs) ^ " ...") - I output)) - -fun run_solver ctxt name mk_cmd input = - let - fun pretty tag ls = Pretty.string_of (Pretty.big_list tag - (map Pretty.str ls)) - - val _ = Old_SMT_Config.trace_msg ctxt (pretty "Problem:" o split_lines) input - - val {redirected_output=res, output=err, return_code} = - Old_SMT_Config.with_timeout ctxt (run ctxt name mk_cmd) input - val _ = Old_SMT_Config.trace_msg ctxt (pretty "Solver:") err - - val ls = fst (take_suffix (equal "") res) - val _ = Old_SMT_Config.trace_msg ctxt (pretty "Result:") ls - - (* Patch compared to Isabelle2016 Old_SMT *) - - (* val _ = return_code <> 0 andalso - raise Old_SMT_Failure.SMT (Old_SMT_Failure.Abnormal_Termination return_code) *) - in ls end - -fun trace_assms ctxt = - Old_SMT_Config.trace_msg ctxt (Pretty.string_of o - Pretty.big_list "Assertions:" o map (Thm.pretty_thm ctxt o snd)) - -fun trace_recon_data ({context=ctxt, typs, terms, ...} : Old_SMT_Translate.recon) = - let - fun pretty_eq n p = Pretty.block [Pretty.str n, Pretty.str " = ", p] - fun p_typ (n, T) = pretty_eq n (Syntax.pretty_typ ctxt T) - fun p_term (n, t) = pretty_eq n (Syntax.pretty_term ctxt t) - in - Old_SMT_Config.trace_msg ctxt (fn () => - Pretty.string_of (Pretty.big_list "Names:" [ - Pretty.big_list "sorts:" (map p_typ (Symtab.dest typs)), - Pretty.big_list "functions:" (map p_term (Symtab.dest terms))])) () - end - -in - -fun invoke name command ithms ctxt = - let - val options = Old_SMT_Config.solver_options_of ctxt - val comments = ("solver: " ^ name) :: - ("timeout: " ^ string_of_real (Config.get ctxt Old_SMT_Config.timeout)) :: - ("random seed: " ^ - string_of_int (Config.get ctxt Old_SMT_Config.random_seed)) :: - "arguments:" :: options - - val (str, recon as {context=ctxt', ...}) = - ithms - |> tap (trace_assms ctxt) - |> Old_SMT_Translate.translate ctxt comments - ||> tap trace_recon_data - in (run_solver ctxt' name (make_cmd command options) str, recon) end - -end - - -(* configuration *) - -datatype outcome = Unsat | Sat | Unknown - -type solver_config = { - name: string, - class: Proof.context -> Old_SMT_Utils.class, - avail: unit -> bool, - command: unit -> string list, - options: Proof.context -> string list, - default_max_relevant: int, - supports_filter: bool, - outcome: string -> string list -> outcome * string list, - cex_parser: (Proof.context -> Old_SMT_Translate.recon -> string list -> - term list * term list) option, - reconstruct: (Proof.context -> Old_SMT_Translate.recon -> string list -> - int list * thm) option } - - -(* registry *) - -type solver_info = { - command: unit -> string list, - default_max_relevant: int, - supports_filter: bool, - reconstruct: Proof.context -> string list * Old_SMT_Translate.recon -> - int list * thm } - -structure Solvers = Generic_Data -( - type T = solver_info Symtab.table - val empty = Symtab.empty - val extend = I - fun merge data = Symtab.merge (K true) data -) - -local - fun finish outcome cex_parser reconstruct ocl outer_ctxt - (output, (recon as {context=ctxt, ...} : Old_SMT_Translate.recon)) = - (case outcome output of - (Unsat, ls) => - if not (Config.get ctxt Old_SMT_Config.oracle) andalso is_some reconstruct - then the reconstruct outer_ctxt recon ls - else ([], ocl ()) - | (result, ls) => - let - val (ts, us) = - (case cex_parser of SOME f => f ctxt recon ls | _ => ([], [])) - in - raise Old_SMT_patch_Failure.SMT (Old_SMT_patch_Failure.Counterexample { - is_real_cex = (result = Sat), - free_constraints = ts, - const_defs = us}) - end) - - val cfalse = Thm.cterm_of @{context} (@{const Trueprop} $ @{const False}) -in - -fun add_solver cfg = - let - val {name, class, avail, command, options, default_max_relevant, - supports_filter, outcome, cex_parser, reconstruct} = cfg - - fun core_oracle () = cfalse - - fun solver ocl = { - command = command, - default_max_relevant = default_max_relevant, - supports_filter = supports_filter, - reconstruct = finish (outcome name) cex_parser reconstruct ocl } - - val info = {name=name, class=class, avail=avail, options=options} - in - Thm.add_oracle (Binding.name name, core_oracle) #-> (fn (_, ocl) => - Context.theory_map (Solvers.map (Symtab.update_new (name, solver ocl)))) #> - Context.theory_map (Old_SMT_Config.add_solver info) - end - -end - -fun get_info ctxt name = - the (Symtab.lookup (Solvers.get (Context.Proof ctxt)) name) - -val solver_name_of = Old_SMT_Config.solver_of - -val available_solvers_of = Old_SMT_Config.available_solvers_of - -fun name_and_info_of ctxt = - let val name = solver_name_of ctxt - in (name, get_info ctxt name) end - -fun gen_preprocess ctxt iwthms = Old_SMT_Normalize.normalize iwthms ctxt - -fun gen_apply (ithms, ctxt) = - let val (name, {command, reconstruct, ...}) = name_and_info_of ctxt - in - (ithms, ctxt) - |-> invoke name command - |> reconstruct ctxt - |>> distinct (op =) - end - -fun apply_solver ctxt = gen_apply o gen_preprocess ctxt - -val default_max_relevant = #default_max_relevant oo get_info - -val supports_filter = #supports_filter o snd o name_and_info_of - - -(* check well-sortedness *) - -val has_topsort = Term.exists_type (Term.exists_subtype (fn - TFree (_, []) => true - | TVar (_, []) => true - | _ => false)) - -(* without this test, we would run into problems when atomizing the rules: *) -fun check_topsort ctxt thm = - if has_topsort (Thm.prop_of thm) then - (Old_SMT_Normalize.drop_fact_warning ctxt thm; TrueI) - else - thm - -fun check_topsorts ctxt iwthms = map (apsnd (apsnd (check_topsort ctxt))) iwthms - - -(* filter *) - -val cnot = Thm.cterm_of @{context} @{const Not} - -fun mk_result outcome xrules = { outcome = outcome, used_facts = xrules } - -type 'a smt_filter_data = ('a * thm) list * ((int * thm) list * Proof.context) - -fun smt_filter_preprocess ctxt facts goal xwthms i = - let - val ctxt = - ctxt - |> Config.put Old_SMT_Config.oracle false - |> Config.put Old_SMT_Config.filter_only_facts true - - val ({context=ctxt', prems, concl, ...}, _) = Subgoal.focus ctxt i NONE goal - fun negate ct = Thm.dest_comb ct ||> Thm.apply cnot |-> Thm.apply - val cprop = - (case try negate (Thm.rhs_of (Old_SMT_Normalize.atomize_conv ctxt' concl)) of - SOME ct => ct - | NONE => raise Old_SMT_patch_Failure.SMT (Old_SMT_patch_Failure.Other_Failure ( - "goal is not a HOL term"))) - in - map snd xwthms - |> map_index I - |> append (map (pair ~1 o pair NONE) (Thm.assume cprop :: prems @ facts)) - |> check_topsorts ctxt' - |> gen_preprocess ctxt' - |> pair (map (apsnd snd) xwthms) - end - -fun smt_filter_apply time_limit (xthms, (ithms, ctxt)) = - let - val ctxt' = - ctxt - |> Config.put Old_SMT_Config.timeout (Time.toReal time_limit) - - fun filter_thms false = K xthms - | filter_thms true = map_filter (try (nth xthms)) o fst - in - (ithms, ctxt') - |> gen_apply - |> filter_thms (supports_filter ctxt') - |> mk_result NONE - end - handle Old_SMT_patch_Failure.SMT fail => mk_result (SOME fail) [] - - -(* SMT tactic *) - -local - fun trace_assumptions ctxt iwthms idxs = - let - val wthms = - idxs - |> filter (fn i => i >= 0) - |> map_filter (AList.lookup (op =) iwthms) - in - if Config.get ctxt Old_SMT_Config.trace_used_facts andalso length wthms > 0 - then - tracing (Pretty.string_of (Pretty.big_list "SMT used facts:" - (map (Thm.pretty_thm ctxt o snd) wthms))) - else () - end - - fun solve ctxt iwthms = - iwthms - |> check_topsorts ctxt - |> apply_solver ctxt - |>> trace_assumptions ctxt iwthms - |> snd - - fun str_of ctxt fail = - Old_SMT_patch_Failure.string_of_failure ctxt fail - |> prefix ("Solver " ^ Old_SMT_Config.solver_of ctxt ^ ": ") - - fun safe_solve ctxt iwthms = SOME (solve ctxt iwthms) - handle - Old_SMT_patch_Failure.SMT (fail as Old_SMT_patch_Failure.Counterexample _) => - (Old_SMT_Config.verbose_msg ctxt (str_of ctxt) fail; NONE) - | Old_SMT_patch_Failure.SMT (fail as Old_SMT_patch_Failure.Time_Out) => - error ("SMT: Solver " ^ quote (Old_SMT_Config.solver_of ctxt) ^ ": " ^ - Old_SMT_patch_Failure.string_of_failure ctxt fail ^ " (setting the " ^ - "configuration option " ^ quote (Config.name_of Old_SMT_Config.timeout) ^ " might help)") - | Old_SMT_patch_Failure.SMT fail => error (str_of ctxt fail) - - fun tag_rules thms = map_index (apsnd (pair NONE)) thms - fun tag_prems thms = map (pair ~1 o pair NONE) thms - - fun resolve ctxt (SOME thm) = resolve_tac ctxt [thm] 1 - | resolve _ NONE = no_tac - - fun tac prove ctxt rules = - CONVERSION (Old_SMT_Normalize.atomize_conv ctxt) - THEN' resolve_tac ctxt @{thms ccontr} - THEN' SUBPROOF (fn {context = ctxt', prems, ...} => - resolve ctxt' (prove ctxt' (tag_rules rules @ tag_prems prems))) ctxt -in - -val smt_tac = tac safe_solve -val smt_tac' = tac (SOME oo solve) - -end - -end diff --git a/src/test-gen/src/main/smt_patch/old_smt_translate_patch.ML b/src/test-gen/src/main/smt_patch/old_smt_translate_patch.ML deleted file mode 100644 index 1eb4b4c..0000000 --- a/src/test-gen/src/main/smt_patch/old_smt_translate_patch.ML +++ /dev/null @@ -1,589 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_smt_translate.ML - Author: Sascha Boehme, TU Muenchen - -Translate theorems into an SMT intermediate format and serialize them. -*) - -signature OLD_SMT_TRANSLATE = -sig - (*intermediate term structure*) - datatype squant = SForall | SExists - datatype 'a spattern = SPat of 'a list | SNoPat of 'a list - datatype sterm = - SVar of int | - SApp of string * sterm list | - SLet of string * sterm * sterm | - SQua of squant * string list * sterm spattern list * int option * sterm - - (*translation configuration*) - type prefixes = {sort_prefix: string, func_prefix: string} - type sign = { - header: string list, - sorts: string list, - dtyps: (string * (string * (string * string) list) list) list list, - funcs: (string * (string list * string)) list } - type config = { - prefixes: prefixes, - header: term list -> string list, - is_fol: bool, - has_datatypes: bool, - serialize: string list -> sign -> sterm list -> string } - type recon = { - context: Proof.context, - typs: typ Symtab.table, - terms: term Symtab.table, - rewrite_rules: thm list, - assms: (int * thm) list } - - (*translation*) - val add_config: Old_SMT_Utils.class * (Proof.context -> config) -> - Context.generic -> Context.generic - val translate: Proof.context -> string list -> (int * thm) list -> - string * recon -end - -structure Old_SMT_Translate: OLD_SMT_TRANSLATE = -struct - - -(* intermediate term structure *) - -datatype squant = SForall | SExists - -datatype 'a spattern = SPat of 'a list | SNoPat of 'a list - -datatype sterm = - SVar of int | - SApp of string * sterm list | - SLet of string * sterm * sterm | - SQua of squant * string list * sterm spattern list * int option * sterm - - - -(* translation configuration *) - -type prefixes = {sort_prefix: string, func_prefix: string} - -type sign = { - header: string list, - sorts: string list, - dtyps: (string * (string * (string * string) list) list) list list, - funcs: (string * (string list * string)) list } - -type config = { - prefixes: prefixes, - header: term list -> string list, - is_fol: bool, - has_datatypes: bool, - serialize: string list -> sign -> sterm list -> string } - -type recon = { - context: Proof.context, - typs: typ Symtab.table, - terms: term Symtab.table, - rewrite_rules: thm list, - assms: (int * thm) list } - - - -(* translation context *) - -fun make_tr_context {sort_prefix, func_prefix} = - (sort_prefix, 1, Typtab.empty, func_prefix, 1, Termtab.empty) - -fun string_of_index pre i = pre ^ string_of_int i - -fun add_typ T proper (cx as (sp, Tidx, typs, fp, idx, terms)) = - (case Typtab.lookup typs T of - SOME (n, _) => (n, cx) - | NONE => - let - val n = string_of_index sp Tidx - val typs' = Typtab.update (T, (n, proper)) typs - in (n, (sp, Tidx+1, typs', fp, idx, terms)) end) - -fun add_fun t sort (cx as (sp, Tidx, typs, fp, idx, terms)) = - (case Termtab.lookup terms t of - SOME (n, _) => (n, cx) - | NONE => - let - val n = string_of_index fp idx - val terms' = Termtab.update (t, (n, sort)) terms - in (n, (sp, Tidx, typs, fp, idx+1, terms')) end) - -fun sign_of header dtyps (_, _, typs, _, _, terms) = { - header = header, - sorts = Typtab.fold (fn (_, (n, true)) => cons n | _ => I) typs [], - dtyps = dtyps, - funcs = Termtab.fold (fn (_, (n, SOME ss)) => cons (n,ss) | _ => I) terms []} - -fun recon_of ctxt rules thms ithms (_, _, typs, _, _, terms) = - let - fun add_typ (T, (n, _)) = Symtab.update (n, T) - val typs' = Typtab.fold add_typ typs Symtab.empty - - fun add_fun (t, (n, _)) = Symtab.update (n, t) - val terms' = Termtab.fold add_fun terms Symtab.empty - - val assms = map (pair ~1) thms @ ithms - in - {context=ctxt, typs=typs', terms=terms', rewrite_rules=rules, assms=assms} - end - - - -(* preprocessing *) - -(** datatype declarations **) - -fun collect_datatypes_and_records (tr_context, ctxt) ts = - let - val (declss, ctxt') = - fold (Term.fold_types Old_SMT_Datatypes.add_decls) ts ([], ctxt) - - fun is_decl_typ T = exists (exists (equal T o fst)) declss - - fun add_typ' T proper = - (case Old_SMT_Builtin.dest_builtin_typ ctxt' T of - SOME n => pair n - | NONE => add_typ T proper) - - fun tr_select sel = - let val T = Term.range_type (Term.fastype_of sel) - in add_fun sel NONE ##>> add_typ' T (not (is_decl_typ T)) end - fun tr_constr (constr, selects) = - add_fun constr NONE ##>> fold_map tr_select selects - fun tr_typ (T, cases) = add_typ' T false ##>> fold_map tr_constr cases - val (declss', tr_context') = fold_map (fold_map tr_typ) declss tr_context - - fun add (constr, selects) = - Termtab.update (constr, length selects) #> - fold (Termtab.update o rpair 1) selects - val funcs = fold (fold (fold add o snd)) declss Termtab.empty - in ((funcs, declss', tr_context', ctxt'), ts) end - (* FIXME: also return necessary datatype and record theorems *) - - -(** eta-expand quantifiers, let expressions and built-ins *) - -local - fun eta f T t = Abs (Name.uu, T, f (Term.incr_boundvars 1 t $ Bound 0)) - - fun exp f T = eta f (Term.domain_type (Term.domain_type T)) - - fun exp2 T q = - let val U = Term.domain_type T - in Abs (Name.uu, U, q $ eta I (Term.domain_type U) (Bound 0)) end - - fun exp2' T l = - let val (U1, U2) = Term.dest_funT T ||> Term.domain_type - in Abs (Name.uu, U1, eta I U2 (l $ Bound 0)) end - - fun expf k i T t = - let val Ts = drop i (fst (Old_SMT_Utils.dest_funT k T)) - in - Term.incr_boundvars (length Ts) t - |> fold_rev (fn i => fn u => u $ Bound i) (0 upto length Ts - 1) - |> fold_rev (fn T => fn u => Abs (Name.uu, T, u)) Ts - end -in - -fun eta_expand ctxt is_fol funcs = - let - fun exp_func t T ts = - (case Termtab.lookup funcs t of - SOME k => - Term.list_comb (t, ts) - |> k <> length ts ? expf k (length ts) T - | NONE => Term.list_comb (t, ts)) - - fun expand ((q as Const (@{const_name All}, _)) $ Abs a) = q $ abs_expand a - | expand ((q as Const (@{const_name All}, T)) $ t) = q $ exp expand T t - | expand (q as Const (@{const_name All}, T)) = exp2 T q - | expand ((q as Const (@{const_name Ex}, _)) $ Abs a) = q $ abs_expand a - | expand ((q as Const (@{const_name Ex}, T)) $ t) = q $ exp expand T t - | expand (q as Const (@{const_name Ex}, T)) = exp2 T q - | expand ((l as Const (@{const_name Let}, _)) $ t $ Abs a) = - if is_fol then expand (Term.betapply (Abs a, t)) - else l $ expand t $ abs_expand a - | expand ((l as Const (@{const_name Let}, T)) $ t $ u) = - if is_fol then expand (u $ t) - else l $ expand t $ exp expand (Term.range_type T) u - | expand ((l as Const (@{const_name Let}, T)) $ t) = - if is_fol then - let val U = Term.domain_type (Term.range_type T) - in Abs (Name.uu, U, Bound 0 $ Term.incr_boundvars 1 t) end - else exp2 T (l $ expand t) - | expand (l as Const (@{const_name Let}, T)) = - if is_fol then - let val U = Term.domain_type (Term.range_type T) - in - Abs (Name.uu, Term.domain_type T, Abs (Name.uu, U, - Bound 0 $ Bound 1)) - end - else exp2' T l - | expand t = - (case Term.strip_comb t of - (u as Const (c as (_, T)), ts) => - (case Old_SMT_Builtin.dest_builtin ctxt c ts of - SOME (_, k, us, mk) => - if k = length us then mk (map expand us) - else if k < length us then - chop k (map expand us) |>> mk |> Term.list_comb - else expf k (length ts) T (mk (map expand us)) - | NONE => exp_func u T (map expand ts)) - | (u as Free (_, T), ts) => exp_func u T (map expand ts) - | (Abs a, ts) => Term.list_comb (abs_expand a, map expand ts) - | (u, ts) => Term.list_comb (u, map expand ts)) - - and abs_expand (n, T, t) = Abs (n, T, expand t) - - in map expand end - -end - - -(** introduce explicit applications **) - -local - (* - Make application explicit for functions with varying number of arguments. - *) - - fun add t i = apfst (Termtab.map_default (t, i) (Integer.min i)) - fun add_type T = apsnd (Typtab.update (T, ())) - - fun min_arities t = - (case Term.strip_comb t of - (u as Const _, ts) => add u (length ts) #> fold min_arities ts - | (u as Free _, ts) => add u (length ts) #> fold min_arities ts - | (Abs (_, T, u), ts) => add_type T #> min_arities u #> fold min_arities ts - | (_, ts) => fold min_arities ts) - - fun minimize types t i = - let - fun find_min j [] _ = j - | find_min j (U :: Us) T = - if Typtab.defined types T then j - else find_min (j + 1) Us (U --> T) - - val (Ts, T) = Term.strip_type (Term.type_of t) - in find_min 0 (take i (rev Ts)) T end - - fun app u (t, T) = - (Const (@{const_name Old_SMT_patch.fun_app}, T --> T) $ t $ u, Term.range_type T) - - fun apply i t T ts = - let - val (ts1, ts2) = chop i ts - val (_, U) = Old_SMT_Utils.dest_funT i T - in fst (fold app ts2 (Term.list_comb (t, ts1), U)) end -in - -fun intro_explicit_application ctxt funcs ts = - let - val (arities, types) = fold min_arities ts (Termtab.empty, Typtab.empty) - val arities' = Termtab.map (minimize types) arities - - fun app_func t T ts = - if is_some (Termtab.lookup funcs t) then Term.list_comb (t, ts) - else apply (the (Termtab.lookup arities' t)) t T ts - - fun in_list T f t = HOLogic.mk_list T (map f (HOLogic.dest_list t)) - - fun traverse Ts t = - (case Term.strip_comb t of - (q as Const (@{const_name All}, _), [Abs (x, T, u)]) => - q $ Abs (x, T, in_trigger (T :: Ts) u) - | (q as Const (@{const_name Ex}, _), [Abs (x, T, u)]) => - q $ Abs (x, T, in_trigger (T :: Ts) u) - | (q as Const (@{const_name Let}, _), [u1, u2 as Abs _]) => - q $ traverse Ts u1 $ traverse Ts u2 - | (u as Const (c as (_, T)), ts) => - (case Old_SMT_Builtin.dest_builtin ctxt c ts of - SOME (_, k, us, mk) => - let - val (ts1, ts2) = chop k (map (traverse Ts) us) - val U = Term.strip_type T |>> snd o chop k |> (op --->) - in apply 0 (mk ts1) U ts2 end - | NONE => app_func u T (map (traverse Ts) ts)) - | (u as Free (_, T), ts) => app_func u T (map (traverse Ts) ts) - | (u as Bound i, ts) => apply 0 u (nth Ts i) (map (traverse Ts) ts) - | (Abs (n, T, u), ts) => traverses Ts (Abs (n, T, traverse (T::Ts) u)) ts - | (u, ts) => traverses Ts u ts) - and in_trigger Ts ((c as @{const Old_SMT_patch.trigger}) $ p $ t) = - c $ in_pats Ts p $ in_weight Ts t - | in_trigger Ts t = in_weight Ts t - and in_pats Ts ps = - in_list @{typ "Old_SMT_patch.pattern list"} - (in_list @{typ Old_SMT_patch.pattern} (in_pat Ts)) ps - and in_pat Ts ((p as Const (@{const_name Old_SMT_patch.pat}, _)) $ t) = - p $ traverse Ts t - | in_pat Ts ((p as Const (@{const_name Old_SMT_patch.nopat}, _)) $ t) = - p $ traverse Ts t - | in_pat _ t = raise TERM ("bad pattern", [t]) - and in_weight Ts ((c as @{const Old_SMT_patch.weight}) $ w $ t) = - c $ w $ traverse Ts t - | in_weight Ts t = traverse Ts t - and traverses Ts t ts = Term.list_comb (t, map (traverse Ts) ts) - in map (traverse []) ts end - -val fun_app_eq = mk_meta_eq @{thm Old_SMT_patch.fun_app_def} - -end - - -(** map HOL formulas to FOL formulas (i.e., separate formulas froms terms) **) - -local - val term_bool = @{lemma "Old_SMT_patch.term_true ~= Old_SMT_patch.term_false" - by (simp add: Old_SMT_patch.term_true_def Old_SMT_patch.term_false_def)} - - val is_quant = member (op =) [@{const_name All}, @{const_name Ex}] - - val fol_rules = [ - Let_def, - mk_meta_eq @{thm Old_SMT_patch.term_true_def}, - mk_meta_eq @{thm Old_SMT_patch.term_false_def}, - @{lemma "P = True == P" by (rule eq_reflection) simp}, - @{lemma "if P then True else False == P" by (rule eq_reflection) simp}] - - fun as_term t = @{const HOL.eq (bool)} $ t $ @{const Old_SMT_patch.term_true} - - exception BAD_PATTERN of unit - - fun wrap_in_if pat t = - if pat then - raise BAD_PATTERN () - else - @{const If (bool)} $ t $ @{const Old_SMT_patch.term_true} $ @{const Old_SMT_patch.term_false} - - fun is_builtin_conn_or_pred ctxt c ts = - is_some (Old_SMT_Builtin.dest_builtin_conn ctxt c ts) orelse - is_some (Old_SMT_Builtin.dest_builtin_pred ctxt c ts) - - fun builtin b ctxt c ts = - (case (Const c, ts) of - (@{const HOL.eq (bool)}, [t, u]) => - if t = @{const Old_SMT_patch.term_true} orelse u = @{const Old_SMT_patch.term_true} then - Old_SMT_Builtin.dest_builtin_eq ctxt t u - else b ctxt c ts - | _ => b ctxt c ts) -in - -fun folify ctxt = - let - fun in_list T f t = HOLogic.mk_list T (map_filter f (HOLogic.dest_list t)) - - fun in_term pat t = - (case Term.strip_comb t of - (@{const True}, []) => @{const Old_SMT_patch.term_true} - | (@{const False}, []) => @{const Old_SMT_patch.term_false} - | (u as Const (@{const_name If}, _), [t1, t2, t3]) => - if pat then raise BAD_PATTERN () - else u $ in_form t1 $ in_term pat t2 $ in_term pat t3 - | (Const (c as (n, _)), ts) => - if is_builtin_conn_or_pred ctxt c ts then wrap_in_if pat (in_form t) - else if is_quant n then wrap_in_if pat (in_form t) - else Term.list_comb (Const c, map (in_term pat) ts) - | (Free c, ts) => Term.list_comb (Free c, map (in_term pat) ts) - | _ => t) - - and in_weight ((c as @{const Old_SMT_patch.weight}) $ w $ t) = c $ w $ in_form t - | in_weight t = in_form t - - and in_pat ((p as Const (@{const_name Old_SMT_patch.pat}, _)) $ t) = - p $ in_term true t - | in_pat ((p as Const (@{const_name Old_SMT_patch.nopat}, _)) $ t) = - p $ in_term true t - | in_pat t = raise TERM ("bad pattern", [t]) - - and in_pats ps = - in_list @{typ "Old_SMT_patch.pattern list"} - (SOME o in_list @{typ Old_SMT_patch.pattern} (try in_pat)) ps - - and in_trigger ((c as @{const Old_SMT_patch.trigger}) $ p $ t) = - c $ in_pats p $ in_weight t - | in_trigger t = in_weight t - - and in_form t = - (case Term.strip_comb t of - (q as Const (qn, _), [Abs (n, T, u)]) => - if is_quant qn then q $ Abs (n, T, in_trigger u) - else as_term (in_term false t) - | (Const c, ts) => - (case Old_SMT_Builtin.dest_builtin_conn ctxt c ts of - SOME (_, _, us, mk) => mk (map in_form us) - | NONE => - (case Old_SMT_Builtin.dest_builtin_pred ctxt c ts of - SOME (_, _, us, mk) => mk (map (in_term false) us) - | NONE => as_term (in_term false t))) - | _ => as_term (in_term false t)) - in - map in_form #> - cons (Old_SMT_Utils.prop_of term_bool) #> - pair (fol_rules, [term_bool], builtin) - end - -end - - -(* translation into intermediate format *) - -(** utility functions **) - -val quantifier = (fn - @{const_name All} => SOME SForall - | @{const_name Ex} => SOME SExists - | _ => NONE) - -fun group_quant qname Ts (t as Const (q, _) $ Abs (_, T, u)) = - if q = qname then group_quant qname (T :: Ts) u else (Ts, t) - | group_quant _ Ts t = (Ts, t) - -fun dest_weight (@{const Old_SMT_patch.weight} $ w $ t) = - (SOME (snd (HOLogic.dest_number w)), t) - | dest_weight t = (NONE, t) - -fun dest_pat (Const (@{const_name Old_SMT_patch.pat}, _) $ t) = (t, true) - | dest_pat (Const (@{const_name Old_SMT_patch.nopat}, _) $ t) = (t, false) - | dest_pat t = raise TERM ("bad pattern", [t]) - -fun dest_pats [] = I - | dest_pats ts = - (case map dest_pat ts |> split_list ||> distinct (op =) of - (ps, [true]) => cons (SPat ps) - | (ps, [false]) => cons (SNoPat ps) - | _ => raise TERM ("bad multi-pattern", ts)) - -fun dest_trigger (@{const Old_SMT_patch.trigger} $ tl $ t) = - (rev (fold (dest_pats o HOLogic.dest_list) (HOLogic.dest_list tl) []), t) - | dest_trigger t = ([], t) - -fun dest_quant qn T t = quantifier qn |> Option.map (fn q => - let - val (Ts, u) = group_quant qn [T] t - val (ps, p) = dest_trigger u - val (w, b) = dest_weight p - in (q, rev Ts, ps, w, b) end) - -fun fold_map_pat f (SPat ts) = fold_map f ts #>> SPat - | fold_map_pat f (SNoPat ts) = fold_map f ts #>> SNoPat - - -(** translation from Isabelle terms into SMT intermediate terms **) - -fun intermediate header dtyps builtin ctxt ts trx = - let - fun transT (T as TFree _) = add_typ T true - | transT (T as TVar _) = (fn _ => raise TYPE ("bad SMT type", [T], [])) - | transT (T as Type _) = - (case Old_SMT_Builtin.dest_builtin_typ ctxt T of - SOME n => pair n - | NONE => add_typ T true) - - fun app n ts = SApp (n, ts) - - fun trans t = - (case Term.strip_comb t of - (Const (qn, _), [Abs (_, T, t1)]) => - (case dest_quant qn T t1 of - SOME (q, Ts, ps, w, b) => - fold_map transT Ts ##>> fold_map (fold_map_pat trans) ps ##>> - trans b #>> (fn ((Ts', ps'), b') => SQua (q, Ts', ps', w, b')) - | NONE => raise TERM ("unsupported quantifier", [t])) - | (Const (@{const_name Let}, _), [t1, Abs (_, T, t2)]) => - transT T ##>> trans t1 ##>> trans t2 #>> - (fn ((U, u1), u2) => SLet (U, u1, u2)) - | (u as Const (c as (_, T)), ts) => - (case builtin ctxt c ts of - SOME (n, _, us, _) => fold_map trans us #>> app n - | NONE => transs u T ts) - | (u as Free (_, T), ts) => transs u T ts - | (Bound i, []) => pair (SVar i) - | _ => raise TERM ("bad SMT term", [t])) - - and transs t T ts = - let val (Us, U) = Old_SMT_Utils.dest_funT (length ts) T - in - fold_map transT Us ##>> transT U #-> (fn Up => - add_fun t (SOME Up) ##>> fold_map trans ts #>> SApp) - end - - val (us, trx') = fold_map trans ts trx - in ((sign_of (header ts) dtyps trx', us), trx') end - - - -(* translation *) - -structure Configs = Generic_Data -( - type T = (Proof.context -> config) Old_SMT_Utils.dict - val empty = [] - val extend = I - fun merge data = Old_SMT_Utils.dict_merge fst data -) - -fun add_config (cs, cfg) = Configs.map (Old_SMT_Utils.dict_update (cs, cfg)) - -fun get_config ctxt = - let val cs = Old_SMT_Config.solver_class_of ctxt - in - (case Old_SMT_Utils.dict_get (Configs.get (Context.Proof ctxt)) cs of - SOME cfg => cfg ctxt - | NONE => error ("SMT: no translation configuration found " ^ - "for solver class " ^ quote (Old_SMT_Utils.string_of_class cs))) - end - -fun translate ctxt comments ithms = - let - val {prefixes, is_fol, header, has_datatypes, serialize} = get_config ctxt - - val with_datatypes = - has_datatypes andalso Config.get ctxt Old_SMT_Config.datatypes - - fun no_dtyps (tr_context, ctxt) ts = - ((Termtab.empty, [], tr_context, ctxt), ts) - - val ts1 = map (Envir.beta_eta_contract o Old_SMT_Utils.prop_of o snd) ithms - - val ((funcs, dtyps, tr_context, ctxt1), ts2) = - ((make_tr_context prefixes, ctxt), ts1) - |-> (if with_datatypes then collect_datatypes_and_records else no_dtyps) - - fun is_binder (Const (@{const_name Let}, _) $ _) = true - | is_binder t = Lambda_Lifting.is_quantifier t - - fun mk_trigger ((q as Const (@{const_name All}, _)) $ Abs (n, T, t)) = - q $ Abs (n, T, mk_trigger t) - | mk_trigger (eq as (Const (@{const_name HOL.eq}, T) $ lhs $ _)) = - Term.domain_type T --> @{typ Old_SMT_patch.pattern} - |> (fn T => Const (@{const_name Old_SMT_patch.pat}, T) $ lhs) - |> HOLogic.mk_list @{typ Old_SMT_patch.pattern} o single - |> HOLogic.mk_list @{typ "Old_SMT_patch.pattern list"} o single - |> (fn t => @{const Old_SMT_patch.trigger} $ t $ eq) - | mk_trigger t = t - - val (ctxt2, ts3) = - ts2 - |> eta_expand ctxt1 is_fol funcs - |> rpair ctxt1 - |-> Lambda_Lifting.lift_lambdas NONE is_binder - |-> (fn (ts', defs) => fn ctxt' => - map mk_trigger defs @ ts' - |> intro_explicit_application ctxt' funcs - |> pair ctxt') - - val ((rewrite_rules, extra_thms, builtin), ts4) = - (if is_fol then folify ctxt2 else pair ([], [], I)) ts3 - - val rewrite_rules' = fun_app_eq :: rewrite_rules - in - (ts4, tr_context) - |-> intermediate header dtyps (builtin Old_SMT_Builtin.dest_builtin) ctxt2 - |>> uncurry (serialize comments) - ||> recon_of ctxt2 rewrite_rules' extra_thms ithms - end - -end diff --git a/src/test-gen/src/main/smt_patch/old_z3_interface_patch.ML b/src/test-gen/src/main/smt_patch/old_z3_interface_patch.ML deleted file mode 100644 index 31b0837..0000000 --- a/src/test-gen/src/main/smt_patch/old_z3_interface_patch.ML +++ /dev/null @@ -1,239 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_z3_interface.ML - Author: Sascha Boehme, TU Muenchen - -Interface to Z3 based on a relaxed version of SMT-LIB. -*) - -signature OLD_Z3_INTERFACE = -sig - val smtlib_z3C: Old_SMT_Utils.class - val setup: theory -> theory - - datatype sym = Sym of string * sym list - type mk_builtins = { - mk_builtin_typ: sym -> typ option, - mk_builtin_num: theory -> int -> typ -> cterm option, - mk_builtin_fun: theory -> sym -> cterm list -> cterm option } - val add_mk_builtins: mk_builtins -> Context.generic -> Context.generic - val mk_builtin_typ: Proof.context -> sym -> typ option - val mk_builtin_num: Proof.context -> int -> typ -> cterm option - val mk_builtin_fun: Proof.context -> sym -> cterm list -> cterm option - - val is_builtin_theory_term: Proof.context -> term -> bool -end - -structure Old_Z3_Interface: OLD_Z3_INTERFACE = -struct - -val smtlib_z3C = Old_SMTLIB_Interface.smtlibC @ ["z3"] - - - -(* interface *) - -local - fun translate_config ctxt = - let - val {prefixes, header, is_fol, serialize, ...} = - Old_SMTLIB_Interface.translate_config ctxt - in - {prefixes=prefixes, header=header, is_fol=is_fol, serialize=serialize, - has_datatypes=true} - end - - fun is_div_mod @{const divide (int)} = true - | is_div_mod @{const modulo (int)} = true - | is_div_mod _ = false - - val div_by_z3div = @{lemma - "ALL k l. k div l = ( - if k = 0 | l = 0 then 0 - else if (0 < k & 0 < l) | (k < 0 & 0 < l) then z3div k l - else z3div (-k) (-l))" - by (simp add: Old_SMT_patch.z3div_def)} - - val mod_by_z3mod = @{lemma - "ALL k l. k mod l = ( - if l = 0 then k - else if k = 0 then 0 - else if (0 < k & 0 < l) | (k < 0 & 0 < l) then z3mod k l - else - z3mod (-k) (-l))" - by (simp add: z3mod_def)} - - val have_int_div_mod = - exists (Term.exists_subterm is_div_mod o Thm.prop_of) - - fun add_div_mod _ (thms, extra_thms) = - if have_int_div_mod thms orelse have_int_div_mod extra_thms then - (thms, div_by_z3div :: mod_by_z3mod :: extra_thms) - else (thms, extra_thms) - - val setup_builtins = - Old_SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const times (int)}, "*") #> - Old_SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const z3div}, "div") #> - Old_SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const z3mod}, "mod") -in - -val setup = Context.theory_map ( - setup_builtins #> - Old_SMT_Normalize.add_extra_norm (smtlib_z3C, add_div_mod) #> - Old_SMT_Translate.add_config (smtlib_z3C, translate_config)) - -end - - - -(* constructors *) - -datatype sym = Sym of string * sym list - - -(** additional constructors **) - -type mk_builtins = { - mk_builtin_typ: sym -> typ option, - mk_builtin_num: theory -> int -> typ -> cterm option, - mk_builtin_fun: theory -> sym -> cterm list -> cterm option } - -fun chained _ [] = NONE - | chained f (b :: bs) = (case f b of SOME y => SOME y | NONE => chained f bs) - -fun chained_mk_builtin_typ bs sym = - chained (fn {mk_builtin_typ=mk, ...} : mk_builtins => mk sym) bs - -fun chained_mk_builtin_num ctxt bs i T = - let val thy = Proof_Context.theory_of ctxt - in chained (fn {mk_builtin_num=mk, ...} : mk_builtins => mk thy i T) bs end - -fun chained_mk_builtin_fun ctxt bs s cts = - let val thy = Proof_Context.theory_of ctxt - in chained (fn {mk_builtin_fun=mk, ...} : mk_builtins => mk thy s cts) bs end - -fun fst_int_ord ((i1, _), (i2, _)) = int_ord (i1, i2) - -structure Mk_Builtins = Generic_Data -( - type T = (int * mk_builtins) list - val empty = [] - val extend = I - fun merge data = Ord_List.merge fst_int_ord data -) - -fun add_mk_builtins mk = - Mk_Builtins.map (Ord_List.insert fst_int_ord (serial (), mk)) - -fun get_mk_builtins ctxt = map snd (Mk_Builtins.get (Context.Proof ctxt)) - - -(** basic and additional constructors **) - -fun mk_builtin_typ _ (Sym ("Bool", _)) = SOME @{typ bool} - | mk_builtin_typ _ (Sym ("Int", _)) = SOME @{typ int} - | mk_builtin_typ _ (Sym ("bool", _)) = SOME @{typ bool} (*FIXME: legacy*) - | mk_builtin_typ _ (Sym ("int", _)) = SOME @{typ int} (*FIXME: legacy*) - | mk_builtin_typ ctxt sym = chained_mk_builtin_typ (get_mk_builtins ctxt) sym - -fun mk_builtin_num _ i @{typ int} = SOME (Numeral.mk_cnumber @{ctyp int} i) - | mk_builtin_num ctxt i T = - chained_mk_builtin_num ctxt (get_mk_builtins ctxt) i T - -val mk_true = Thm.cterm_of @{context} (@{const Not} $ @{const False}) -val mk_false = Thm.cterm_of @{context} @{const False} -val mk_not = Thm.apply (Thm.cterm_of @{context} @{const Not}) -val mk_implies = Thm.mk_binop (Thm.cterm_of @{context} @{const HOL.implies}) -val mk_iff = Thm.mk_binop (Thm.cterm_of @{context} @{const HOL.eq (bool)}) -val conj = Thm.cterm_of @{context} @{const HOL.conj} -val disj = Thm.cterm_of @{context} @{const HOL.disj} - -fun mk_nary _ cu [] = cu - | mk_nary ct _ cts = uncurry (fold_rev (Thm.mk_binop ct)) (split_last cts) - -val eq = Old_SMT_Utils.mk_const_pat @{theory} @{const_name HOL.eq} Old_SMT_Utils.destT1 -fun mk_eq ct cu = Thm.mk_binop (Old_SMT_Utils.instT' ct eq) ct cu - -val if_term = - Old_SMT_Utils.mk_const_pat @{theory} @{const_name If} - (Old_SMT_Utils.destT1 o Old_SMT_Utils.destT2) -fun mk_if cc ct cu = - Thm.mk_binop (Thm.apply (Old_SMT_Utils.instT' ct if_term) cc) ct cu - -val nil_term = - Old_SMT_Utils.mk_const_pat @{theory} @{const_name Nil} Old_SMT_Utils.destT1 -val cons_term = - Old_SMT_Utils.mk_const_pat @{theory} @{const_name Cons} Old_SMT_Utils.destT1 -fun mk_list cT cts = - fold_rev (Thm.mk_binop (Old_SMT_Utils.instT cT cons_term)) cts - (Old_SMT_Utils.instT cT nil_term) - -val distinct = Old_SMT_Utils.mk_const_pat @{theory} @{const_name distinct} - (Old_SMT_Utils.destT1 o Old_SMT_Utils.destT1) -fun mk_distinct [] = mk_true - | mk_distinct (cts as (ct :: _)) = - Thm.apply (Old_SMT_Utils.instT' ct distinct) - (mk_list (Thm.ctyp_of_cterm ct) cts) - -val access = - Old_SMT_Utils.mk_const_pat @{theory} @{const_name fun_app} Old_SMT_Utils.destT1 -fun mk_access array = Thm.apply (Old_SMT_Utils.instT' array access) array - -val update = Old_SMT_Utils.mk_const_pat @{theory} @{const_name fun_upd} - (Thm.dest_ctyp o Old_SMT_Utils.destT1) -fun mk_update array index value = - let val cTs = Thm.dest_ctyp (Thm.ctyp_of_cterm array) - in - Thm.apply (Thm.mk_binop (Old_SMT_Utils.instTs cTs update) array index) value - end - -val mk_uminus = Thm.apply (Thm.cterm_of @{context} @{const uminus (int)}) -val add = Thm.cterm_of @{context} @{const plus (int)} -val int0 = Numeral.mk_cnumber @{ctyp int} 0 -val mk_sub = Thm.mk_binop (Thm.cterm_of @{context} @{const minus (int)}) -val mk_mul = Thm.mk_binop (Thm.cterm_of @{context} @{const times (int)}) -val mk_div = Thm.mk_binop (Thm.cterm_of @{context} @{const z3div}) -val mk_mod = Thm.mk_binop (Thm.cterm_of @{context} @{const z3mod}) -val mk_lt = Thm.mk_binop (Thm.cterm_of @{context} @{const less (int)}) -val mk_le = Thm.mk_binop (Thm.cterm_of @{context} @{const less_eq (int)}) - -fun mk_builtin_fun ctxt sym cts = - (case (sym, cts) of - (Sym ("true", _), []) => SOME mk_true - | (Sym ("false", _), []) => SOME mk_false - | (Sym ("not", _), [ct]) => SOME (mk_not ct) - | (Sym ("and", _), _) => SOME (mk_nary conj mk_true cts) - | (Sym ("or", _), _) => SOME (mk_nary disj mk_false cts) - | (Sym ("implies", _), [ct, cu]) => SOME (mk_implies ct cu) - | (Sym ("iff", _), [ct, cu]) => SOME (mk_iff ct cu) - | (Sym ("~", _), [ct, cu]) => SOME (mk_iff ct cu) - | (Sym ("xor", _), [ct, cu]) => SOME (mk_not (mk_iff ct cu)) - | (Sym ("if", _), [ct1, ct2, ct3]) => SOME (mk_if ct1 ct2 ct3) - | (Sym ("ite", _), [ct1, ct2, ct3]) => SOME (mk_if ct1 ct2 ct3) (* FIXME: remove *) - | (Sym ("=", _), [ct, cu]) => SOME (mk_eq ct cu) - | (Sym ("distinct", _), _) => SOME (mk_distinct cts) - | (Sym ("select", _), [ca, ck]) => SOME (Thm.apply (mk_access ca) ck) - | (Sym ("store", _), [ca, ck, cv]) => SOME (mk_update ca ck cv) - | _ => - (case (sym, try (Thm.typ_of_cterm o hd) cts, cts) of - (Sym ("+", _), SOME @{typ int}, _) => SOME (mk_nary add int0 cts) - | (Sym ("-", _), SOME @{typ int}, [ct]) => SOME (mk_uminus ct) - | (Sym ("-", _), SOME @{typ int}, [ct, cu]) => SOME (mk_sub ct cu) - | (Sym ("*", _), SOME @{typ int}, [ct, cu]) => SOME (mk_mul ct cu) - | (Sym ("div", _), SOME @{typ int}, [ct, cu]) => SOME (mk_div ct cu) - | (Sym ("mod", _), SOME @{typ int}, [ct, cu]) => SOME (mk_mod ct cu) - | (Sym ("<", _), SOME @{typ int}, [ct, cu]) => SOME (mk_lt ct cu) - | (Sym ("<=", _), SOME @{typ int}, [ct, cu]) => SOME (mk_le ct cu) - | (Sym (">", _), SOME @{typ int}, [ct, cu]) => SOME (mk_lt cu ct) - | (Sym (">=", _), SOME @{typ int}, [ct, cu]) => SOME (mk_le cu ct) - | _ => chained_mk_builtin_fun ctxt (get_mk_builtins ctxt) sym cts)) - - - -(* abstraction *) - -fun is_builtin_theory_term ctxt t = - if Old_SMT_Builtin.is_builtin_num ctxt t then true - else - (case Term.strip_comb t of - (Const c, ts) => Old_SMT_Builtin.is_builtin_fun ctxt c ts - | _ => false) - -end diff --git a/src/test-gen/src/main/smt_patch/old_z3_model_patch.ML b/src/test-gen/src/main/smt_patch/old_z3_model_patch.ML deleted file mode 100644 index 8ea560c..0000000 --- a/src/test-gen/src/main/smt_patch/old_z3_model_patch.ML +++ /dev/null @@ -1,337 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_z3_model.ML - Author: Sascha Boehme and Philipp Meyer, TU Muenchen - -Parser for counterexamples generated by Z3. -*) - -signature OLD_Z3_MODEL = -sig - val parse_counterex: Proof.context -> Old_SMT_Translate.recon -> string list -> - term list * term list -end - -structure Old_Z3_Model: OLD_Z3_MODEL = -struct - - -(* counterexample expressions *) - -datatype expr = True | False | Number of int * int option | Value of int | - Array of array | App of string * expr list -and array = Fresh of expr | Store of (array * expr) * expr - - -(* parsing *) - -val space = Scan.many Symbol.is_ascii_blank -fun spaced p = p --| space -fun in_parens p = spaced (Scan.$$ "(") |-- p --| spaced (Scan.$$ ")") -fun in_braces p = spaced (Scan.$$ "{") |-- p --| spaced (Scan.$$ "}") - -val digit = (fn - "0" => SOME 0 | "1" => SOME 1 | "2" => SOME 2 | "3" => SOME 3 | - "4" => SOME 4 | "5" => SOME 5 | "6" => SOME 6 | "7" => SOME 7 | - "8" => SOME 8 | "9" => SOME 9 | _ => NONE) - -val nat_num = spaced (Scan.repeat1 (Scan.some digit) >> - (fn ds => fold (fn d => fn i => i * 10 + d) ds 0)) -val int_num = spaced (Scan.optional ($$ "-" >> K (fn i => ~i)) I :|-- - (fn sign => nat_num >> sign)) - -val is_char = Symbol.is_ascii_letter orf Symbol.is_ascii_digit orf - member (op =) (raw_explode "_+*-/%~=<>$&|?!.@^#") -val name = spaced (Scan.many1 is_char >> implode) - -fun $$$ s = spaced (Scan.this_string s) - -fun array_expr st = st |> in_parens ( - $$$ "const" |-- expr >> Fresh || - $$$ "store" |-- array_expr -- expr -- expr >> Store) - -and expr st = st |> ( - $$$ "true" >> K True || - $$$ "false" >> K False || - int_num -- Scan.option ($$$ "/" |-- int_num) >> Number || - $$$ "val!" |-- nat_num >> Value || - name >> (App o rpair []) || - array_expr >> Array || - in_parens (name -- Scan.repeat1 expr) >> App) - -fun args st = ($$$ "->" >> K [] || expr ::: args) st -val args_case = args -- expr -val else_case = $$$ "else" -- $$$ "->" |-- expr >> pair ([] : expr list) - -val func = - let fun cases st = (else_case >> single || args_case ::: cases) st - in in_braces cases end - -val cex = space |-- - Scan.repeat (name --| $$$ "->" -- (func || expr >> (single o pair []))) - -fun resolve terms ((n, k), cases) = - (case Symtab.lookup terms n of - NONE => NONE - | SOME t => SOME ((t, k), cases)) - -fun annotate _ (_, []) = NONE - | annotate terms (n, [([], c)]) = resolve terms ((n, 0), (c, [])) - | annotate _ (_, [_]) = NONE - | annotate terms (n, cases as (args, _) :: _) = - let val (cases', (_, else_case)) = split_last cases - in resolve terms ((n, length args), (else_case, cases')) end - -fun read_cex terms ls = - maps (cons "\n" o raw_explode) ls - |> try (fst o Scan.finite Symbol.stopper cex) - |> the_default [] - |> map_filter (annotate terms) - - -(* translation into terms *) - -fun max_value vs = - let - fun max_val_expr (Value i) = Integer.max i - | max_val_expr (App (_, es)) = fold max_val_expr es - | max_val_expr (Array a) = max_val_array a - | max_val_expr _ = I - - and max_val_array (Fresh e) = max_val_expr e - | max_val_array (Store ((a, e1), e2)) = - max_val_array a #> max_val_expr e1 #> max_val_expr e2 - - fun max_val (_, (ec, cs)) = - max_val_expr ec #> fold (fn (es, e) => fold max_val_expr (e :: es)) cs - - in fold max_val vs ~1 end - -fun with_context terms f vs = fst (fold_map f vs (terms, max_value vs + 1)) - -fun get_term n T es (cx as (terms, next_val)) = - (case Symtab.lookup terms n of - SOME t => ((t, es), cx) - | NONE => - let val t = Var (("skolem", next_val), T) - in ((t, []), (Symtab.update (n, t) terms, next_val + 1)) end) - -fun trans_expr _ True = pair @{const True} - | trans_expr _ False = pair @{const False} - | trans_expr T (Number (i, NONE)) = pair (HOLogic.mk_number T i) - | trans_expr T (Number (i, SOME j)) = - pair (Const (@{const_name divide}, [T, T] ---> T) $ - HOLogic.mk_number T i $ HOLogic.mk_number T j) - | trans_expr T (Value i) = pair (Var (("value", i), T)) - | trans_expr T (Array a) = trans_array T a - | trans_expr T (App (n, es)) = get_term n T es #-> (fn (t, es') => - let val Ts = fst (Old_SMT_Utils.dest_funT (length es') (Term.fastype_of t)) - in - fold_map (uncurry trans_expr) (Ts ~~ es') #>> Term.list_comb o pair t - end) - -and trans_array T a = - let val (dT, rT) = Term.dest_funT T - in - (case a of - Fresh e => trans_expr rT e #>> (fn t => Abs ("x", dT, t)) - | Store ((a', e1), e2) => - trans_array T a' ##>> trans_expr dT e1 ##>> trans_expr rT e2 #>> - (fn ((m, k), v) => - Const (@{const_name fun_upd}, [T, dT, rT] ---> T) $ m $ k $ v)) - end - -fun trans_pattern T ([], e) = trans_expr T e #>> pair [] - | trans_pattern T (arg :: args, e) = - trans_expr (Term.domain_type T) arg ##>> - trans_pattern (Term.range_type T) (args, e) #>> - (fn (arg', (args', e')) => (arg' :: args', e')) - -fun mk_fun_upd T U = Const (@{const_name fun_upd}, [T --> U, T, U, T] ---> U) - -fun mk_update ([], u) _ = u - | mk_update ([t], u) f = - uncurry mk_fun_upd (Term.dest_funT (Term.fastype_of f)) $ f $ t $ u - | mk_update (t :: ts, u) f = - let - val (dT, rT) = Term.dest_funT (Term.fastype_of f) - val (dT', rT') = Term.dest_funT rT - in - mk_fun_upd dT rT $ f $ t $ - mk_update (ts, u) (absdummy dT' (Const ("_", rT'))) - end - -fun mk_lambda Ts (t, pats) = - fold_rev absdummy Ts t |> fold mk_update pats - -fun translate ((t, k), (e, cs)) = - let - val T = Term.fastype_of t - val (Us, U) = Old_SMT_Utils.dest_funT k (Term.fastype_of t) - - fun mk_full_def u' pats = - pats - |> filter_out (fn (_, u) => u aconv u') - |> HOLogic.mk_eq o pair t o mk_lambda Us o pair u' - - fun mk_eq (us, u) = HOLogic.mk_eq (Term.list_comb (t, us), u) - fun mk_eqs u' [] = [HOLogic.mk_eq (t, u')] - | mk_eqs _ pats = map mk_eq pats - in - trans_expr U e ##>> - (if k = 0 then pair [] else fold_map (trans_pattern T) cs) #>> - (fn (u', pats) => (mk_eqs u' pats, mk_full_def u' pats)) - end - - -(* normalization *) - -fun partition_eqs f = - let - fun part t (xs, ts) = - (case try HOLogic.dest_eq t of - SOME (l, r) => (case f l r of SOME x => (x::xs, ts) | _ => (xs, t::ts)) - | NONE => (xs, t :: ts)) - in (fn ts => fold part ts ([], [])) end - -fun first_eq pred = - let - fun part _ [] = NONE - | part us (t :: ts) = - (case try (pred o HOLogic.dest_eq) t of - SOME (SOME lr) => SOME (lr, fold cons us ts) - | _ => part (t :: us) ts) - in (fn ts => part [] ts) end - -fun replace_vars tab = - let - fun repl v = the_default v (AList.lookup (op aconv) tab v) - fun replace (v as Var _) = repl v - | replace (v as Free _) = repl v - | replace t = t - in map (Term.map_aterms replace) end - -fun remove_int_nat_coercions (eqs, defs) = - let - fun mk_nat_num t i = - (case try HOLogic.dest_number i of - SOME (_, n) => SOME (t, HOLogic.mk_number @{typ nat} n) - | NONE => NONE) - fun nat_of (@{const of_nat (int)} $ (t as Var _)) i = mk_nat_num t i - | nat_of (@{const nat} $ i) (t as Var _) = mk_nat_num t i - | nat_of _ _ = NONE - val (nats, eqs') = partition_eqs nat_of eqs - - fun is_coercion t = - (case try HOLogic.dest_eq t of - SOME (@{const of_nat (int)}, _) => true - | SOME (@{const nat}, _) => true - | _ => false) - in apply2 (replace_vars nats) (eqs', filter_out is_coercion defs) end - -fun unfold_funapp (eqs, defs) = - let - fun unfold_app (Const (@{const_name Old_SMT_patch.fun_app}, _) $ f $ t) = f $ t - | unfold_app t = t - fun unfold_eq ((eq as Const (@{const_name HOL.eq}, _)) $ t $ u) = - eq $ unfold_app t $ u - | unfold_eq t = t - - fun is_fun_app t = - (case try HOLogic.dest_eq t of - SOME (Const (@{const_name Old_SMT_patch.fun_app}, _), _) => true - | _ => false) - - in (map unfold_eq eqs, filter_out is_fun_app defs) end - -val unfold_eqs = - let - val is_ground = not o Term.exists_subterm Term.is_Var - fun is_non_rec (v, t) = not (Term.exists_subterm (equal v) t) - - fun rewr_var (l as Var _, r) = if is_ground r then SOME (l, r) else NONE - | rewr_var (r, l as Var _) = if is_ground r then SOME (l, r) else NONE - | rewr_var _ = NONE - - fun rewr_free' e = if is_non_rec e then SOME e else NONE - fun rewr_free (e as (Free _, _)) = rewr_free' e - | rewr_free (e as (_, Free _)) = rewr_free' (swap e) - | rewr_free _ = NONE - - fun is_trivial (Const (@{const_name HOL.eq}, _) $ t $ u) = t aconv u - | is_trivial _ = false - - fun replace r = replace_vars [r] #> filter_out is_trivial - - fun unfold_vars (es, ds) = - (case first_eq rewr_var es of - SOME (lr, es') => unfold_vars (apply2 (replace lr) (es', ds)) - | NONE => (es, ds)) - - fun unfold_frees ues (es, ds) = - (case first_eq rewr_free es of - SOME (lr, es') => - apply2 (replace lr) (es', ds) - |> unfold_frees (HOLogic.mk_eq lr :: replace lr ues) - | NONE => (ues @ es, ds)) - - in unfold_vars #> unfold_frees [] end - -fun swap_free ((eq as Const (@{const_name HOL.eq}, _)) $ t $ (u as Free _)) = - eq $ u $ t - | swap_free t = t - -fun frees_for_vars ctxt (eqs, defs) = - let - fun fresh_free i T (cx as (frees, ctxt)) = - (case Inttab.lookup frees i of - SOME t => (t, cx) - | NONE => - let - val (n, ctxt') = yield_singleton Variable.variant_fixes "" ctxt - val t = Free (n, T) - in (t, (Inttab.update (i, t) frees, ctxt')) end) - - fun repl_var (Var ((_, i), T)) = fresh_free i T - | repl_var (t $ u) = repl_var t ##>> repl_var u #>> op $ - | repl_var (Abs (n, T, t)) = repl_var t #>> (fn t' => Abs (n, T, t')) - | repl_var t = pair t - in - (Inttab.empty, ctxt) - |> fold_map repl_var eqs - ||>> fold_map repl_var defs - |> fst - end - - -(* overall procedure *) - -val is_free_constraint = Term.exists_subterm (fn Free _ => true | _ => false) - -fun is_free_def (Const (@{const_name HOL.eq}, _) $ Free _ $ _) = true - | is_free_def _ = false - -fun defined tp = - try (apply2 (fst o HOLogic.dest_eq)) tp - |> the_default false o Option.map (op aconv) - -fun add_free_defs free_cs defs = - let val (free_defs, defs') = List.partition is_free_def defs - in (free_cs @ filter_out (member defined free_cs) free_defs, defs') end - -fun is_const_def (Const (@{const_name HOL.eq}, _) $ Const _ $ _) = true - | is_const_def _ = false - -fun parse_counterex ctxt ({terms, ...} : Old_SMT_Translate.recon) ls = - read_cex terms ls - |> with_context terms translate - |> apfst flat o split_list - |> remove_int_nat_coercions - |> unfold_funapp - |> unfold_eqs - |>> map swap_free - |>> filter is_free_constraint - |-> add_free_defs - |> frees_for_vars ctxt - ||> filter is_const_def - -end - diff --git a/src/test-gen/src/main/smt_patch/old_z3_proof_parser_patch.ML b/src/test-gen/src/main/smt_patch/old_z3_proof_parser_patch.ML deleted file mode 100644 index 0cf3aa5..0000000 --- a/src/test-gen/src/main/smt_patch/old_z3_proof_parser_patch.ML +++ /dev/null @@ -1,446 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_z3_proof_parser.ML - Author: Sascha Boehme, TU Muenchen - -Parser for Z3 proofs. -*) - -signature OLD_Z3_PROOF_PARSER = -sig - (*proof rules*) - datatype rule = True_Axiom | Asserted | Goal | Modus_Ponens | Reflexivity | - Symmetry | Transitivity | Transitivity_Star | Monotonicity | Quant_Intro | - Distributivity | And_Elim | Not_Or_Elim | Rewrite | Rewrite_Star | - Pull_Quant | Pull_Quant_Star | Push_Quant | Elim_Unused_Vars | - Dest_Eq_Res | Quant_Inst | Hypothesis | Lemma | Unit_Resolution | - Iff_True | Iff_False | Commutativity | Def_Axiom | Intro_Def | Apply_Def | - Iff_Oeq | Nnf_Pos | Nnf_Neg | Nnf_Star | Cnf_Star | Skolemize | - Modus_Ponens_Oeq | Th_Lemma of string list - val string_of_rule: rule -> string - - (*proof parser*) - datatype proof_step = Proof_Step of { - rule: rule, - args: cterm list, - prems: int list, - prop: cterm } - val parse: Proof.context -> typ Symtab.table -> term Symtab.table -> - string list -> - (int * cterm) list * (int * proof_step) list * string list * Proof.context -end - -structure Old_Z3_Proof_Parser: OLD_Z3_PROOF_PARSER = -struct - - -(* proof rules *) - -datatype rule = True_Axiom | Asserted | Goal | Modus_Ponens | Reflexivity | - Symmetry | Transitivity | Transitivity_Star | Monotonicity | Quant_Intro | - Distributivity | And_Elim | Not_Or_Elim | Rewrite | Rewrite_Star | - Pull_Quant | Pull_Quant_Star | Push_Quant | Elim_Unused_Vars | Dest_Eq_Res | - Quant_Inst | Hypothesis | Lemma | Unit_Resolution | Iff_True | Iff_False | - Commutativity | Def_Axiom | Intro_Def | Apply_Def | Iff_Oeq | Nnf_Pos | - Nnf_Neg | Nnf_Star | Cnf_Star | Skolemize | Modus_Ponens_Oeq | - Th_Lemma of string list - -val rule_names = Symtab.make [ - ("true-axiom", True_Axiom), - ("asserted", Asserted), - ("goal", Goal), - ("mp", Modus_Ponens), - ("refl", Reflexivity), - ("symm", Symmetry), - ("trans", Transitivity), - ("trans*", Transitivity_Star), - ("monotonicity", Monotonicity), - ("quant-intro", Quant_Intro), - ("distributivity", Distributivity), - ("and-elim", And_Elim), - ("not-or-elim", Not_Or_Elim), - ("rewrite", Rewrite), - ("rewrite*", Rewrite_Star), - ("pull-quant", Pull_Quant), - ("pull-quant*", Pull_Quant_Star), - ("push-quant", Push_Quant), - ("elim-unused", Elim_Unused_Vars), - ("der", Dest_Eq_Res), - ("quant-inst", Quant_Inst), - ("hypothesis", Hypothesis), - ("lemma", Lemma), - ("unit-resolution", Unit_Resolution), - ("iff-true", Iff_True), - ("iff-false", Iff_False), - ("commutativity", Commutativity), - ("def-axiom", Def_Axiom), - ("intro-def", Intro_Def), - ("apply-def", Apply_Def), - ("iff~", Iff_Oeq), - ("nnf-pos", Nnf_Pos), - ("nnf-neg", Nnf_Neg), - ("nnf*", Nnf_Star), - ("cnf*", Cnf_Star), - ("sk", Skolemize), - ("mp~", Modus_Ponens_Oeq), - ("th-lemma", Th_Lemma [])] - -fun string_of_rule (Th_Lemma args) = space_implode " " ("th-lemma" :: args) - | string_of_rule r = - let fun eq_rule (s, r') = if r = r' then SOME s else NONE - in the (Symtab.get_first eq_rule rule_names) end - - - -(* certified terms and variables *) - -val (var_prefix, decl_prefix) = ("v", "sk") -(* - "decl_prefix" is for skolem constants (represented by free variables), - "var_prefix" is for pseudo-schematic variables (schematic with respect - to the Z3 proof, but represented by free variables). - - Both prefixes must be distinct to avoid name interferences. - More precisely, the naming of pseudo-schematic variables must be - context-independent modulo the current proof context to be able to - use fast inference kernel rules during proof reconstruction. -*) - -fun mk_inst ctxt vars = - let - val max = fold (Integer.max o fst) vars 0 - val ns = fst (Variable.variant_fixes (replicate (max + 1) var_prefix) ctxt) - fun mk (i, v) = - (dest_Var (Thm.term_of v), Thm.cterm_of ctxt (Free (nth ns i, Thm.typ_of_cterm v))) - in map mk vars end - -fun close ctxt (ct, vars) = - let - val inst = mk_inst ctxt vars - val names = fold (Term.add_free_names o Thm.term_of o snd) inst [] - in (Thm.instantiate_cterm ([], inst) ct, names) end - - -fun mk_bound ctxt (i, T) = - let val ct = Thm.cterm_of ctxt (Var ((Name.uu, 0), T)) - in (ct, [(i, ct)]) end - -local - fun mk_quant1 ctxt q T (ct, vars) = - let - val cv = - (case AList.lookup (op =) vars 0 of - SOME cv => cv - | _ => Thm.cterm_of ctxt (Var ((Name.uu, Thm.maxidx_of_cterm ct + 1), T))) - fun dec (i, v) = if i = 0 then NONE else SOME (i-1, v) - val vars' = map_filter dec vars - in (Thm.apply (Old_SMT_Utils.instT' cv q) (Thm.lambda cv ct), vars') end - - fun quant name = - Old_SMT_Utils.mk_const_pat @{theory} name (Old_SMT_Utils.destT1 o Old_SMT_Utils.destT1) - val forall = quant @{const_name All} - val exists = quant @{const_name Ex} -in - -fun mk_quant is_forall ctxt = - fold_rev (mk_quant1 ctxt (if is_forall then forall else exists)) - -end - -local - fun prep (ct, vars) (maxidx, all_vars) = - let - val maxidx' = maxidx + Thm.maxidx_of_cterm ct + 1 - - fun part (i, cv) = - (case AList.lookup (op =) all_vars i of - SOME cu => apfst (if cu aconvc cv then I else cons (cv, cu)) - | NONE => - let val cv' = Thm.incr_indexes_cterm maxidx cv - in apfst (cons (cv, cv')) #> apsnd (cons (i, cv')) end) - - val (inst, vars') = - if null vars then ([], vars) - else fold part vars ([], []) - - in - (Thm.instantiate_cterm ([], map (apfst (dest_Var o Thm.term_of)) inst) ct, - (maxidx', vars' @ all_vars)) - end -in -fun mk_fun f ts = - let val (cts, (_, vars)) = fold_map prep ts (0, []) - in f cts |> Option.map (rpair vars) end -end - - - -(* proof parser *) - -datatype proof_step = Proof_Step of { - rule: rule, - args: cterm list, - prems: int list, - prop: cterm } - - -(** parser context **) - -val not_false = Thm.cterm_of @{context} (@{const Not} $ @{const False}) - -fun make_context ctxt typs terms = - let - val ctxt' = - ctxt - |> Symtab.fold (Variable.declare_typ o snd) typs - |> Symtab.fold (Variable.declare_term o snd) terms - - fun cert @{const True} = not_false - | cert t = Thm.cterm_of ctxt' t - - in (typs, Symtab.map (K cert) terms, Inttab.empty, [], [], ctxt') end - -fun fresh_name n (typs, terms, exprs, steps, vars, ctxt) = - let val (n', ctxt') = yield_singleton Variable.variant_fixes n ctxt - in (n', (typs, terms, exprs, steps, vars, ctxt')) end - -fun context_of (_, _, _, _, _, ctxt) = ctxt - -fun add_decl (n, T) (cx as (_, terms, _, _, _, _)) = - (case Symtab.lookup terms n of - SOME _ => cx - | NONE => cx |> fresh_name (decl_prefix ^ n) - |> (fn (m, (typs, terms, exprs, steps, vars, ctxt)) => - let - val upd = Symtab.update (n, Thm.cterm_of ctxt (Free (m, T))) - in (typs, upd terms, exprs, steps, vars, ctxt) end)) - -fun mk_typ (typs, _, _, _, _, ctxt) (s as Old_Z3_Interface.Sym (n, _)) = - (case Old_Z3_Interface.mk_builtin_typ ctxt s of - SOME T => SOME T - | NONE => Symtab.lookup typs n) - -fun mk_num (_, _, _, _, _, ctxt) (i, T) = - mk_fun (K (Old_Z3_Interface.mk_builtin_num ctxt i T)) [] - -fun mk_app (_, terms, _, _, _, ctxt) (s as Old_Z3_Interface.Sym (n, _), es) = - mk_fun (fn cts => - (case Old_Z3_Interface.mk_builtin_fun ctxt s cts of - SOME ct => SOME ct - | NONE => - Symtab.lookup terms n |> Option.map (Drule.list_comb o rpair cts))) es - -fun add_expr k t (typs, terms, exprs, steps, vars, ctxt) = - (typs, terms, Inttab.update (k, t) exprs, steps, vars, ctxt) - -fun lookup_expr (_, _, exprs, _, _, _) = Inttab.lookup exprs - -fun add_proof_step k ((r, args), prop) cx = - let - val (typs, terms, exprs, steps, vars, ctxt) = cx - val (ct, vs) = close ctxt prop - fun part (SOME e, _) (cts, ps) = (close ctxt e :: cts, ps) - | part (NONE, i) (cts, ps) = (cts, i :: ps) - val (args', prems) = fold (part o `(lookup_expr cx)) args ([], []) - val (cts, vss) = split_list args' - val step = Proof_Step {rule=r, args=rev cts, prems=rev prems, - prop = Old_SMT_Utils.mk_cprop ct} - val vars' = fold (union (op =)) (vs :: vss) vars - in (typs, terms, exprs, (k, step) :: steps, vars', ctxt) end - -fun finish (_, _, _, steps, vars, ctxt) = - let - fun coll (p as (k, Proof_Step {prems, rule, prop, ...})) (ars, ps, ids) = - (case rule of - Asserted => ((k, prop) :: ars, ps, ids) - | Goal => ((k, prop) :: ars, ps, ids) - | _ => - if Inttab.defined ids k then - (ars, p :: ps, fold (Inttab.update o rpair ()) prems ids) - else (ars, ps, ids)) - - val (ars, steps', _) = fold coll steps ([], [], Inttab.make [(~1, ())]) - in (ars, steps', vars, ctxt) end - - -(** core parser **) - -fun parse_exn line_no msg = raise Old_SMT_patch_Failure.SMT (Old_SMT_patch_Failure.Other_Failure - ("Z3 proof parser (line " ^ string_of_int line_no ^ "): " ^ msg)) - -fun scan_exn msg ((line_no, _), _) = parse_exn line_no msg - -fun with_info f cx = - (case f ((NONE, 1), cx) of - ((SOME _, _), cx') => cx' - | ((_, line_no), _) => parse_exn line_no "bad proof") - -fun parse_line _ _ (st as ((SOME _, _), _)) = st - | parse_line scan line ((_, line_no), cx) = - let val st = ((line_no, cx), raw_explode line) - in - (case Scan.catch (Scan.finite' Symbol.stopper (Scan.option scan)) st of - (SOME r, ((_, cx'), _)) => ((r, line_no+1), cx') - | (NONE, _) => parse_exn line_no ("bad proof line: " ^ quote line)) - end - -fun with_context f x ((line_no, cx), st) = - let val (y, cx') = f x cx - in (y, ((line_no, cx'), st)) end - - -fun lookup_context f x (st as ((_, cx), _)) = (f cx x, st) - - -(** parser combinators and parsers for basic entities **) - -fun $$ s = Scan.lift (Scan.$$ s) -fun this s = Scan.lift (Scan.this_string s) -val is_blank = Symbol.is_ascii_blank -fun blank st = Scan.lift (Scan.many1 is_blank) st -fun sep scan = blank |-- scan -fun seps scan = Scan.repeat (sep scan) -fun seps1 scan = Scan.repeat1 (sep scan) -fun seps_by scan_sep scan = scan ::: Scan.repeat (scan_sep |-- scan) - -val lpar = "(" and rpar = ")" -val lbra = "[" and rbra = "]" -fun par scan = $$ lpar |-- scan --| $$ rpar -fun bra scan = $$ lbra |-- scan --| $$ rbra - -val digit = (fn - "0" => SOME 0 | "1" => SOME 1 | "2" => SOME 2 | "3" => SOME 3 | - "4" => SOME 4 | "5" => SOME 5 | "6" => SOME 6 | "7" => SOME 7 | - "8" => SOME 8 | "9" => SOME 9 | _ => NONE) - -fun digits st = (Scan.lift (Scan.many1 Symbol.is_ascii_digit) >> implode) st - -fun nat_num st = (Scan.lift (Scan.repeat1 (Scan.some digit)) >> (fn ds => - fold (fn d => fn i => i * 10 + d) ds 0)) st - -fun int_num st = (Scan.optional ($$ "-" >> K (fn i => ~i)) I :|-- - (fn sign => nat_num >> sign)) st - -val is_char = Symbol.is_ascii_letter orf Symbol.is_ascii_digit orf - member (op =) (raw_explode "_+*-/%~=<>$&|?!.@^#") - -fun name st = (Scan.lift (Scan.many1 is_char) >> implode) st - -fun sym st = (name -- - Scan.optional (bra (seps_by ($$ ":") sym)) [] >> Old_Z3_Interface.Sym) st - -fun id st = ($$ "#" |-- nat_num) st - - -(** parsers for various parts of Z3 proofs **) - -fun sort st = Scan.first [ - this "array" |-- bra (sort --| $$ ":" -- sort) >> (op -->), - par (this "->" |-- seps1 sort) >> ((op --->) o split_last), - sym :|-- (fn s as Old_Z3_Interface.Sym (n, _) => lookup_context mk_typ s :|-- (fn - SOME T => Scan.succeed T - | NONE => scan_exn ("unknown sort: " ^ quote n)))] st - -fun bound st = (par (this ":var" |-- sep nat_num -- sep sort) :|-- - lookup_context (mk_bound o context_of)) st - -fun numb (n as (i, _)) = lookup_context mk_num n :|-- (fn - SOME n' => Scan.succeed n' - | NONE => scan_exn ("unknown number: " ^ quote (string_of_int i))) - -fun appl (app as (Old_Z3_Interface.Sym (n, _), _)) = - lookup_context mk_app app :|-- (fn - SOME app' => Scan.succeed app' - | NONE => scan_exn ("unknown function symbol: " ^ quote n)) - -fun bv_size st = (digits >> (fn sz => - Old_Z3_Interface.Sym ("bv", [Old_Z3_Interface.Sym (sz, [])]))) st - -fun bv_number_sort st = (bv_size :|-- lookup_context mk_typ :|-- (fn - SOME cT => Scan.succeed cT - | NONE => scan_exn ("unknown sort: " ^ quote "bv"))) st - -fun bv_number st = - (this "bv" |-- bra (nat_num --| $$ ":" -- bv_number_sort) :|-- numb) st - -fun frac_number st = ( - int_num --| $$ "/" -- int_num --| this "::" -- sort :|-- (fn ((i, j), T) => - numb (i, T) -- numb (j, T) :|-- (fn (n, m) => - appl (Old_Z3_Interface.Sym ("/", []), [n, m])))) st - -fun plain_number st = (int_num --| this "::" -- sort :|-- numb) st - -fun number st = Scan.first [bv_number, frac_number, plain_number] st - -fun constant st = ((sym >> rpair []) :|-- appl) st - -fun expr_id st = (id :|-- (fn i => lookup_context lookup_expr i :|-- (fn - SOME e => Scan.succeed e - | NONE => scan_exn ("unknown term id: " ^ quote (string_of_int i))))) st - -fun arg st = Scan.first [expr_id, number, constant] st - -fun application st = par ((sym -- Scan.repeat1 (sep arg)) :|-- appl) st - -fun variables st = par (this "vars" |-- seps1 (par (name |-- sep sort))) st - -fun pats st = seps (par ((this ":pat" || this ":nopat") |-- seps1 id)) st - -val ctrue = Thm.cterm_of @{context} @{const True} - -fun pattern st = par (this "pattern" |-- Scan.repeat1 (sep arg) >> - (the o mk_fun (K (SOME ctrue)))) st - -fun quant_kind st = st |> ( - this "forall" >> K (mk_quant true o context_of) || - this "exists" >> K (mk_quant false o context_of)) - -fun quantifier st = - (par (quant_kind -- sep variables --| pats -- sep arg) :|-- - lookup_context (fn cx => fn ((mk_q, Ts), body) => mk_q cx Ts body)) st - -fun expr k = - Scan.first [bound, quantifier, pattern, application, number, constant] :|-- - with_context (pair NONE oo add_expr k) - -val rule_arg = id - (* if this is modified, then 'th_lemma_arg' needs reviewing *) - -fun th_lemma_arg st = Scan.unless (sep rule_arg >> K "" || $$ rbra) (sep name) st - -fun rule_name st = ((name >> `(Symtab.lookup rule_names)) :|-- (fn - (SOME (Th_Lemma _), _) => Scan.repeat th_lemma_arg >> Th_Lemma - | (SOME r, _) => Scan.succeed r - | (NONE, n) => scan_exn ("unknown proof rule: " ^ quote n))) st - -fun rule f k = - bra (rule_name -- seps id) --| $$ ":" -- sep arg #-> - with_context (pair (f k) oo add_proof_step k) - -fun decl st = (this "decl" |-- sep name --| sep (this "::") -- sep sort :|-- - with_context (pair NONE oo add_decl)) st - -fun def st = (id --| sep (this ":=")) st - -fun node st = st |> ( - decl || - def :|-- (fn k => sep (expr k) || sep (rule (K NONE) k)) || - rule SOME ~1) - - -(** overall parser **) - -(* - Currently, terms are parsed bottom-up (i.e., along with parsing the proof - text line by line), but proofs are reconstructed top-down (i.e. by an - in-order top-down traversal of the proof tree/graph). The latter approach - was taken because some proof texts comprise irrelevant proof steps which - will thus not be reconstructed. This approach might also be beneficial - for constructing terms, but it would also increase the complexity of the - (otherwise rather modular) code. -*) - -fun parse ctxt typs terms proof_text = - make_context ctxt typs terms - |> with_info (fold (parse_line node) proof_text) - |> finish - -end diff --git a/src/test-gen/src/main/smt_patch/old_z3_proof_reconstruction_patch.ML b/src/test-gen/src/main/smt_patch/old_z3_proof_reconstruction_patch.ML deleted file mode 100644 index 4f9a81f..0000000 --- a/src/test-gen/src/main/smt_patch/old_z3_proof_reconstruction_patch.ML +++ /dev/null @@ -1,891 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_z3_proof_reconstruction.ML - Author: Sascha Boehme, TU Muenchen - -Proof reconstruction for proofs found by Z3. -*) - -signature OLD_Z3_PROOF_RECONSTRUCTION = -sig - val add_z3_rule: thm -> Context.generic -> Context.generic - val reconstruct: Proof.context -> Old_SMT_Translate.recon -> string list -> int list * thm -end - -structure Old_Z3_Proof_Reconstruction: OLD_Z3_PROOF_RECONSTRUCTION = -struct - - -fun z3_exn msg = raise Old_SMT_patch_Failure.SMT (Old_SMT_patch_Failure.Other_Failure - ("Z3 proof reconstruction: " ^ msg)) - - - -(* net of schematic rules *) - -local - val description = "declaration of Z3 proof rules" - - val eq = Thm.eq_thm - - structure Old_Z3_Rules = Generic_Data - ( - type T = thm Net.net - val empty = Net.empty - val extend = I - val merge = Net.merge eq - ) - - fun prep context = - `Thm.prop_of o rewrite_rule (Context.proof_of context) [Old_Z3_Proof_Literals.rewrite_true] - - fun ins thm context = - context |> Old_Z3_Rules.map (fn net => Net.insert_term eq (prep context thm) net handle Net.INSERT => net) - fun rem thm context = - context |> Old_Z3_Rules.map (fn net => Net.delete_term eq (prep context thm) net handle Net.DELETE => net) - - val add = Thm.declaration_attribute ins - val del = Thm.declaration_attribute rem -in - -val add_z3_rule = ins - -fun by_schematic_rule ctxt ct = - the (Old_Z3_Proof_Tools.net_instance (Old_Z3_Rules.get (Context.Proof ctxt)) ct) - -val _ = Theory.setup - (Attrib.setup @{binding old_z3_rule} (Attrib.add_del add del) description #> - Global_Theory.add_thms_dynamic (@{binding old_z3_rule}, Net.content o Old_Z3_Rules.get)) - -end - - - -(* proof tools *) - -fun named ctxt name prover ct = - let val _ = Old_SMT_Config.trace_msg ctxt I ("Z3: trying " ^ name ^ " ...") - in prover ct end - -fun NAMED ctxt name tac i st = - let val _ = Old_SMT_Config.trace_msg ctxt I ("Z3: trying " ^ name ^ " ...") - in tac i st end - -fun pretty_goal ctxt thms t = - [Pretty.block [Pretty.str "proposition: ", Syntax.pretty_term ctxt t]] - |> not (null thms) ? cons (Pretty.big_list "assumptions:" - (map (Thm.pretty_thm ctxt) thms)) - -fun try_apply ctxt thms = - let - fun try_apply_err ct = Pretty.string_of (Pretty.chunks [ - Pretty.big_list ("Z3 found a proof," ^ - " but proof reconstruction failed at the following subgoal:") - (pretty_goal ctxt thms (Thm.term_of ct)), - Pretty.str ("Declaring a rule as [old_z3_rule] might solve this problem.")]) - - fun apply [] ct = error (try_apply_err ct) - | apply (prover :: provers) ct = - (case try prover ct of - SOME thm => (Old_SMT_Config.trace_msg ctxt I "Z3: succeeded"; thm) - | NONE => apply provers ct) - - fun schematic_label full = "schematic rules" |> full ? suffix " (full)" - fun schematic ctxt full ct = - ct - |> full ? fold_rev (curry Drule.mk_implies o Thm.cprop_of) thms - |> named ctxt (schematic_label full) (by_schematic_rule ctxt) - |> fold Thm.elim_implies thms - - in apply o cons (schematic ctxt false) o cons (schematic ctxt true) end - -local - val rewr_if = - @{lemma "(if P then Q1 else Q2) = ((P --> Q1) & (~P --> Q2))" by simp} -in - -fun HOL_fast_tac ctxt = - Classical.fast_tac (put_claset HOL_cs ctxt) - -fun simp_fast_tac ctxt = - Simplifier.simp_tac (put_simpset HOL_ss ctxt addsimps [rewr_if]) - THEN_ALL_NEW HOL_fast_tac ctxt - -end - - - -(* theorems and proofs *) - -(** theorem incarnations **) - -datatype theorem = - Thm of thm | (* theorem without special features *) - MetaEq of thm | (* meta equality "t == s" *) - Literals of thm * Old_Z3_Proof_Literals.littab - (* "P1 & ... & Pn" and table of all literals P1, ..., Pn *) - -fun thm_of (Thm thm) = thm - | thm_of (MetaEq thm) = thm COMP @{thm meta_eq_to_obj_eq} - | thm_of (Literals (thm, _)) = thm - -fun meta_eq_of (MetaEq thm) = thm - | meta_eq_of p = mk_meta_eq (thm_of p) - -fun literals_of (Literals (_, lits)) = lits - | literals_of p = Old_Z3_Proof_Literals.make_littab [thm_of p] - - - -(** core proof rules **) - -(* assumption *) - -local - val remove_trigger = mk_meta_eq @{thm trigger_def} - val remove_weight = mk_meta_eq @{thm weight_def} - val remove_fun_app = mk_meta_eq @{thm fun_app_def} - - fun rewrite_conv _ [] = Conv.all_conv - | rewrite_conv ctxt eqs = Simplifier.full_rewrite (empty_simpset ctxt addsimps eqs) - - val prep_rules = [@{thm Let_def}, remove_trigger, remove_weight, - remove_fun_app, Old_Z3_Proof_Literals.rewrite_true] - - fun rewrite _ [] = I - | rewrite ctxt eqs = Conv.fconv_rule (rewrite_conv ctxt eqs) - - fun lookup_assm assms_net ct = - Old_Z3_Proof_Tools.net_instances assms_net ct - |> map (fn ithm as (_, thm) => (ithm, Thm.cprop_of thm aconvc ct)) -in - -fun add_asserted outer_ctxt rewrite_rules assms asserted ctxt = - let - val eqs = map (rewrite ctxt [Old_Z3_Proof_Literals.rewrite_true]) rewrite_rules - val eqs' = union Thm.eq_thm eqs prep_rules - - val assms_net = - assms - |> map (apsnd (rewrite ctxt eqs')) - |> map (apsnd (Conv.fconv_rule Thm.eta_conversion)) - |> Old_Z3_Proof_Tools.thm_net_of snd - - fun revert_conv ctxt = rewrite_conv ctxt eqs' then_conv Thm.eta_conversion - - fun assume thm ctxt = - let - val ct = Thm.cprem_of thm 1 - val (thm', ctxt') = yield_singleton Assumption.add_assumes ct ctxt - in (Thm.implies_elim thm thm', ctxt') end - - fun add1 idx thm1 ((i, th), exact) ((is, thms), (ctxt, ptab)) = - let - val (thm, ctxt') = - if exact then (Thm.implies_elim thm1 th, ctxt) - else assume thm1 ctxt - val thms' = if exact then thms else th :: thms - in - ((insert (op =) i is, thms'), - (ctxt', Inttab.update (idx, Thm thm) ptab)) - end - - fun add (idx, ct) (cx as ((is, thms), (ctxt, ptab))) = - let - val thm1 = - Thm.trivial ct - |> Conv.fconv_rule (Conv.arg1_conv (revert_conv outer_ctxt)) - val thm2 = singleton (Variable.export ctxt outer_ctxt) thm1 - in - (case lookup_assm assms_net (Thm.cprem_of thm2 1) of - [] => - let val (thm, ctxt') = assume thm1 ctxt - in ((is, thms), (ctxt', Inttab.update (idx, Thm thm) ptab)) end - | ithms => fold (add1 idx thm1) ithms cx) - end - in fold add asserted (([], []), (ctxt, Inttab.empty)) end - -end - - -(* P = Q ==> P ==> Q or P --> Q ==> P ==> Q *) -local - val precomp = Old_Z3_Proof_Tools.precompose2 - val comp = Old_Z3_Proof_Tools.compose - - val meta_iffD1 = @{lemma "P == Q ==> P ==> (Q::bool)" by simp} - val meta_iffD1_c = precomp Thm.dest_binop meta_iffD1 - - val iffD1_c = precomp (Thm.dest_binop o Thm.dest_arg) @{thm iffD1} - val mp_c = precomp (Thm.dest_binop o Thm.dest_arg) @{thm mp} -in -fun mp (MetaEq thm) p = Thm (Thm.implies_elim (comp meta_iffD1_c thm) p) - | mp p_q p = - let - val pq = thm_of p_q - val thm = comp iffD1_c pq handle THM _ => comp mp_c pq - in Thm (Thm.implies_elim thm p) end -end - - -(* and_elim: P1 & ... & Pn ==> Pi *) -(* not_or_elim: ~(P1 | ... | Pn) ==> ~Pi *) -local - fun is_sublit conj t = Old_Z3_Proof_Literals.exists_lit conj (fn u => u aconv t) - - fun derive conj t lits idx ptab = - let - val lit = the (Old_Z3_Proof_Literals.get_first_lit (is_sublit conj t) lits) - val ls = Old_Z3_Proof_Literals.explode conj false false [t] lit - val lits' = fold Old_Z3_Proof_Literals.insert_lit ls - (Old_Z3_Proof_Literals.delete_lit lit lits) - - fun upd thm = Literals (thm_of thm, lits') - val ptab' = Inttab.map_entry idx upd ptab - in (the (Old_Z3_Proof_Literals.lookup_lit lits' t), ptab') end - - fun lit_elim conj (p, idx) ct ptab = - let val lits = literals_of p - in - (case Old_Z3_Proof_Literals.lookup_lit lits (Old_SMT_Utils.term_of ct) of - SOME lit => (Thm lit, ptab) - | NONE => apfst Thm (derive conj (Old_SMT_Utils.term_of ct) lits idx ptab)) - end -in -val and_elim = lit_elim true -val not_or_elim = lit_elim false -end - - -(* P1, ..., Pn |- False ==> |- ~P1 | ... | ~Pn *) -local - fun step lit thm = - Thm.implies_elim (Thm.implies_intr (Thm.cprop_of lit) thm) lit - val explode_disj = Old_Z3_Proof_Literals.explode false false false - fun intro hyps thm th = fold step (explode_disj hyps th) thm - - fun dest_ccontr ct = [Thm.dest_arg (Thm.dest_arg (Thm.dest_arg1 ct))] - val ccontr = Old_Z3_Proof_Tools.precompose dest_ccontr @{thm ccontr} -in -fun lemma thm ct = - let - val cu = Old_Z3_Proof_Literals.negate (Thm.dest_arg ct) - val hyps = map_filter (try HOLogic.dest_Trueprop) (Thm.hyps_of thm) - val th = Old_Z3_Proof_Tools.under_assumption (intro hyps thm) cu - in Thm (Old_Z3_Proof_Tools.compose ccontr th) end -end - - -(* \/{P1, ..., Pn, Q1, ..., Qn}, ~P1, ..., ~Pn ==> \/{Q1, ..., Qn} *) -local - val explode_disj = Old_Z3_Proof_Literals.explode false true false - val join_disj = Old_Z3_Proof_Literals.join false - fun unit thm thms th = - let - val t = @{const Not} $ Old_SMT_Utils.prop_of thm - val ts = map Old_SMT_Utils.prop_of thms - in - join_disj (Old_Z3_Proof_Literals.make_littab (thms @ explode_disj ts th)) t - end - - fun dest_arg2 ct = Thm.dest_arg (Thm.dest_arg ct) - fun dest ct = apply2 dest_arg2 (Thm.dest_binop ct) - val contrapos = - Old_Z3_Proof_Tools.precompose2 dest @{lemma "(~P ==> ~Q) ==> Q ==> P" by fast} -in -fun unit_resolution thm thms ct = - Old_Z3_Proof_Literals.negate (Thm.dest_arg ct) - |> Old_Z3_Proof_Tools.under_assumption (unit thm thms) - |> Thm o Old_Z3_Proof_Tools.discharge thm o Old_Z3_Proof_Tools.compose contrapos -end - - -(* P ==> P == True or P ==> P == False *) -local - val iff1 = @{lemma "P ==> P == (~ False)" by simp} - val iff2 = @{lemma "~P ==> P == False" by simp} -in -fun iff_true thm = MetaEq (thm COMP iff1) -fun iff_false thm = MetaEq (thm COMP iff2) -end - - -(* distributivity of | over & *) -fun distributivity ctxt = Thm o try_apply ctxt [] [ - named ctxt "fast" (Old_Z3_Proof_Tools.by_tac ctxt (HOL_fast_tac ctxt))] - (* FIXME: not very well tested *) - - -(* Tseitin-like axioms *) -local - val disjI1 = @{lemma "(P ==> Q) ==> ~P | Q" by fast} - val disjI2 = @{lemma "(~P ==> Q) ==> P | Q" by fast} - val disjI3 = @{lemma "(~Q ==> P) ==> P | Q" by fast} - val disjI4 = @{lemma "(Q ==> P) ==> P | ~Q" by fast} - - fun prove' conj1 conj2 ct2 thm = - let - val littab = - Old_Z3_Proof_Literals.explode conj1 true (conj1 <> conj2) [] thm - |> cons Old_Z3_Proof_Literals.true_thm - |> Old_Z3_Proof_Literals.make_littab - in Old_Z3_Proof_Literals.join conj2 littab (Thm.term_of ct2) end - - fun prove rule (ct1, conj1) (ct2, conj2) = - Old_Z3_Proof_Tools.under_assumption (prove' conj1 conj2 ct2) ct1 COMP rule - - fun prove_def_axiom ct = - let val (ct1, ct2) = Thm.dest_binop (Thm.dest_arg ct) - in - (case Thm.term_of ct1 of - @{const Not} $ (@{const HOL.conj} $ _ $ _) => - prove disjI1 (Thm.dest_arg ct1, true) (ct2, true) - | @{const HOL.conj} $ _ $ _ => - prove disjI3 (Old_Z3_Proof_Literals.negate ct2, false) (ct1, true) - | @{const Not} $ (@{const HOL.disj} $ _ $ _) => - prove disjI3 (Old_Z3_Proof_Literals.negate ct2, false) (ct1, false) - | @{const HOL.disj} $ _ $ _ => - prove disjI2 (Old_Z3_Proof_Literals.negate ct1, false) (ct2, true) - | Const (@{const_name distinct}, _) $ _ => - let - fun dis_conv cv = Conv.arg_conv (Conv.arg1_conv cv) - val unfold_dis_conv = dis_conv Old_Z3_Proof_Tools.unfold_distinct_conv - fun prv cu = - let val (cu1, cu2) = Thm.dest_binop (Thm.dest_arg cu) - in prove disjI4 (Thm.dest_arg cu2, true) (cu1, true) end - in Old_Z3_Proof_Tools.with_conv unfold_dis_conv prv ct end - | @{const Not} $ (Const (@{const_name distinct}, _) $ _) => - let - fun dis_conv cv = Conv.arg_conv (Conv.arg1_conv (Conv.arg_conv cv)) - val unfold_dis_conv = dis_conv Old_Z3_Proof_Tools.unfold_distinct_conv - fun prv cu = - let val (cu1, cu2) = Thm.dest_binop (Thm.dest_arg cu) - in prove disjI1 (Thm.dest_arg cu1, true) (cu2, true) end - in Old_Z3_Proof_Tools.with_conv unfold_dis_conv prv ct end - | _ => raise CTERM ("prove_def_axiom", [ct])) - end -in -fun def_axiom ctxt = Thm o try_apply ctxt [] [ - named ctxt "conj/disj/distinct" prove_def_axiom, - Old_Z3_Proof_Tools.by_abstraction 0 (true, false) ctxt [] (fn ctxt' => - named ctxt' "simp+fast" (Old_Z3_Proof_Tools.by_tac ctxt (simp_fast_tac ctxt')))] -end - - -(* local definitions *) -local - val intro_rules = [ - @{lemma "n == P ==> (~n | P) & (n | ~P)" by simp}, - @{lemma "n == (if P then s else t) ==> (~P | n = s) & (P | n = t)" - by simp}, - @{lemma "n == P ==> n = P" by (rule meta_eq_to_obj_eq)} ] - - val apply_rules = [ - @{lemma "(~n | P) & (n | ~P) ==> P == n" by (atomize(full)) fast}, - @{lemma "(~P | n = s) & (P | n = t) ==> (if P then s else t) == n" - by (atomize(full)) fastforce} ] - - val inst_rule = Old_Z3_Proof_Tools.match_instantiate Thm.dest_arg - - fun apply_rule ct = - (case get_first (try (inst_rule ct)) intro_rules of - SOME thm => thm - | NONE => raise CTERM ("intro_def", [ct])) -in -fun intro_def ct = Old_Z3_Proof_Tools.make_hyp_def (apply_rule ct) #>> Thm - -fun apply_def thm = - get_first (try (fn rule => MetaEq (thm COMP rule))) apply_rules - |> the_default (Thm thm) -end - - -(* negation normal form *) -local - val quant_rules1 = ([ - @{lemma "(!!x. P x == Q) ==> ALL x. P x == Q" by simp}, - @{lemma "(!!x. P x == Q) ==> EX x. P x == Q" by simp}], [ - @{lemma "(!!x. P x == Q x) ==> ALL x. P x == ALL x. Q x" by simp}, - @{lemma "(!!x. P x == Q x) ==> EX x. P x == EX x. Q x" by simp}]) - - val quant_rules2 = ([ - @{lemma "(!!x. ~P x == Q) ==> ~(ALL x. P x) == Q" by simp}, - @{lemma "(!!x. ~P x == Q) ==> ~(EX x. P x) == Q" by simp}], [ - @{lemma "(!!x. ~P x == Q x) ==> ~(ALL x. P x) == EX x. Q x" by simp}, - @{lemma "(!!x. ~P x == Q x) ==> ~(EX x. P x) == ALL x. Q x" by simp}]) - - fun nnf_quant_tac ctxt thm (qs as (qs1, qs2)) i st = ( - resolve_tac ctxt [thm] ORELSE' - (match_tac ctxt qs1 THEN' nnf_quant_tac ctxt thm qs) ORELSE' - (match_tac ctxt qs2 THEN' nnf_quant_tac ctxt thm qs)) i st - - fun nnf_quant_tac_varified ctxt vars eq = - nnf_quant_tac ctxt (Old_Z3_Proof_Tools.varify vars eq) - - fun nnf_quant ctxt vars qs p ct = - Old_Z3_Proof_Tools.as_meta_eq ct - |> Old_Z3_Proof_Tools.by_tac ctxt (nnf_quant_tac_varified ctxt vars (meta_eq_of p) qs) - - fun prove_nnf ctxt = try_apply ctxt [] [ - named ctxt "conj/disj" Old_Z3_Proof_Literals.prove_conj_disj_eq, - Old_Z3_Proof_Tools.by_abstraction 0 (true, false) ctxt [] (fn ctxt' => - named ctxt' "simp+fast" (Old_Z3_Proof_Tools.by_tac ctxt' (simp_fast_tac ctxt')))] -in -fun nnf ctxt vars ps ct = - (case Old_SMT_Utils.term_of ct of - _ $ (l as Const _ $ Abs _) $ (r as Const _ $ Abs _) => - if l aconv r - then MetaEq (Thm.reflexive (Thm.dest_arg (Thm.dest_arg ct))) - else MetaEq (nnf_quant ctxt vars quant_rules1 (hd ps) ct) - | _ $ (@{const Not} $ (Const _ $ Abs _)) $ (Const _ $ Abs _) => - MetaEq (nnf_quant ctxt vars quant_rules2 (hd ps) ct) - | _ => - let - val nnf_rewr_conv = Conv.arg_conv (Conv.arg_conv - (Old_Z3_Proof_Tools.unfold_eqs ctxt - (map (Thm.symmetric o meta_eq_of) ps))) - in Thm (Old_Z3_Proof_Tools.with_conv nnf_rewr_conv (prove_nnf ctxt) ct) end) -end - - - -(** equality proof rules **) - -(* |- t = t *) -fun refl ct = MetaEq (Thm.reflexive (Thm.dest_arg (Thm.dest_arg ct))) - - -(* s = t ==> t = s *) -local - val symm_rule = @{lemma "s = t ==> t == s" by simp} -in -fun symm (MetaEq thm) = MetaEq (Thm.symmetric thm) - | symm p = MetaEq (thm_of p COMP symm_rule) -end - - -(* s = t ==> t = u ==> s = u *) -local - val trans1 = @{lemma "s == t ==> t = u ==> s == u" by simp} - val trans2 = @{lemma "s = t ==> t == u ==> s == u" by simp} - val trans3 = @{lemma "s = t ==> t = u ==> s == u" by simp} -in -fun trans (MetaEq thm1) (MetaEq thm2) = MetaEq (Thm.transitive thm1 thm2) - | trans (MetaEq thm) q = MetaEq (thm_of q COMP (thm COMP trans1)) - | trans p (MetaEq thm) = MetaEq (thm COMP (thm_of p COMP trans2)) - | trans p q = MetaEq (thm_of q COMP (thm_of p COMP trans3)) -end - - -(* t1 = s1 ==> ... ==> tn = sn ==> f t1 ... tn = f s1 .. sn - (reflexive antecendents are droppped) *) -local - exception MONO - - fun prove_refl (ct, _) = Thm.reflexive ct - fun prove_comb f g cp = - let val ((ct1, ct2), (cu1, cu2)) = apply2 Thm.dest_comb cp - in Thm.combination (f (ct1, cu1)) (g (ct2, cu2)) end - fun prove_arg f = prove_comb prove_refl f - - fun prove f cp = prove_comb (prove f) f cp handle CTERM _ => prove_refl cp - - fun prove_nary is_comb f = - let - fun prove (cp as (ct, _)) = f cp handle MONO => - if is_comb (Thm.term_of ct) - then prove_comb (prove_arg prove) prove cp - else prove_refl cp - in prove end - - fun prove_list f n cp = - if n = 0 then prove_refl cp - else prove_comb (prove_arg f) (prove_list f (n-1)) cp - - fun with_length f (cp as (cl, _)) = - f (length (HOLogic.dest_list (Thm.term_of cl))) cp - - fun prove_distinct f = prove_arg (with_length (prove_list f)) - - fun prove_eq exn lookup cp = - (case lookup (Logic.mk_equals (apply2 Thm.term_of cp)) of - SOME eq => eq - | NONE => if exn then raise MONO else prove_refl cp) - - val prove_exn = prove_eq true - and prove_safe = prove_eq false - - fun mono f (cp as (cl, _)) = - (case Term.head_of (Thm.term_of cl) of - @{const HOL.conj} => prove_nary Old_Z3_Proof_Literals.is_conj (prove_exn f) - | @{const HOL.disj} => prove_nary Old_Z3_Proof_Literals.is_disj (prove_exn f) - | Const (@{const_name distinct}, _) => prove_distinct (prove_safe f) - | _ => prove (prove_safe f)) cp -in -fun monotonicity eqs ct = - let - fun and_symmetric (t, thm) = [(t, thm), (t, Thm.symmetric thm)] - val teqs = maps (and_symmetric o `Thm.prop_of o meta_eq_of) eqs - val lookup = AList.lookup (op aconv) teqs - val cp = Thm.dest_binop (Thm.dest_arg ct) - in MetaEq (prove_exn lookup cp handle MONO => mono lookup cp) end -end - - -(* |- f a b = f b a (where f is equality) *) -local - val rule = @{lemma "a = b == b = a" by (atomize(full)) (rule eq_commute)} -in -fun commutativity ct = - MetaEq (Old_Z3_Proof_Tools.match_instantiate I - (Old_Z3_Proof_Tools.as_meta_eq ct) rule) -end - - - -(** quantifier proof rules **) - -(* P ?x = Q ?x ==> (ALL x. P x) = (ALL x. Q x) - P ?x = Q ?x ==> (EX x. P x) = (EX x. Q x) *) -local - val rules = [ - @{lemma "(!!x. P x == Q x) ==> (ALL x. P x) == (ALL x. Q x)" by simp}, - @{lemma "(!!x. P x == Q x) ==> (EX x. P x) == (EX x. Q x)" by simp}] -in -fun quant_intro ctxt vars p ct = - let - val thm = meta_eq_of p - val rules' = Old_Z3_Proof_Tools.varify vars thm :: rules - val cu = Old_Z3_Proof_Tools.as_meta_eq ct - val tac = REPEAT_ALL_NEW (match_tac ctxt rules') - in MetaEq (Old_Z3_Proof_Tools.by_tac ctxt tac cu) end -end - - -(* |- ((ALL x. P x) | Q) = (ALL x. P x | Q) *) -fun pull_quant ctxt = Thm o try_apply ctxt [] [ - named ctxt "fast" (Old_Z3_Proof_Tools.by_tac ctxt (HOL_fast_tac ctxt))] - (* FIXME: not very well tested *) - - -(* |- (ALL x. P x & Q x) = ((ALL x. P x) & (ALL x. Q x)) *) -fun push_quant ctxt = Thm o try_apply ctxt [] [ - named ctxt "fast" (Old_Z3_Proof_Tools.by_tac ctxt (HOL_fast_tac ctxt))] - (* FIXME: not very well tested *) - - -(* |- (ALL x1 ... xn y1 ... yn. P x1 ... xn) = (ALL x1 ... xn. P x1 ... xn) *) -local - val elim_all = @{lemma "P = Q ==> (ALL x. P) = Q" by fast} - val elim_ex = @{lemma "P = Q ==> (EX x. P) = Q" by fast} - - fun elim_unused_tac ctxt i st = ( - match_tac ctxt [@{thm refl}] - ORELSE' (match_tac ctxt [elim_all, elim_ex] THEN' elim_unused_tac ctxt) - ORELSE' ( - match_tac ctxt [@{thm iff_allI}, @{thm iff_exI}] - THEN' elim_unused_tac ctxt)) i st -in - -fun elim_unused_vars ctxt = Thm o Old_Z3_Proof_Tools.by_tac ctxt (elim_unused_tac ctxt) - -end - - -(* |- (ALL x1 ... xn. ~(x1 = t1 & ... xn = tn) | P x1 ... xn) = P t1 ... tn *) -fun dest_eq_res ctxt = Thm o try_apply ctxt [] [ - named ctxt "fast" (Old_Z3_Proof_Tools.by_tac ctxt (HOL_fast_tac ctxt))] - (* FIXME: not very well tested *) - - -(* |- ~(ALL x1...xn. P x1...xn) | P a1...an *) -local - val rule = @{lemma "~ P x | Q ==> ~(ALL x. P x) | Q" by fast} -in -fun quant_inst ctxt = Thm o Old_Z3_Proof_Tools.by_tac ctxt ( - REPEAT_ALL_NEW (match_tac ctxt [rule]) - THEN' resolve_tac ctxt @{thms excluded_middle}) -end - - -(* |- (EX x. P x) = P c |- ~(ALL x. P x) = ~ P c *) -local - val forall = - Old_SMT_Utils.mk_const_pat @{theory} @{const_name Pure.all} - (Old_SMT_Utils.destT1 o Old_SMT_Utils.destT1) - fun mk_forall cv ct = - Thm.apply (Old_SMT_Utils.instT' cv forall) (Thm.lambda cv ct) - - fun get_vars f mk pred ctxt t = - Term.fold_aterms f t [] - |> map_filter (fn v => - if pred v then SOME (Thm.cterm_of ctxt (mk v)) else NONE) - - fun close vars f ct ctxt = - let - val frees_of = get_vars Term.add_frees Free (member (op =) vars o fst) - val vs = frees_of ctxt (Thm.term_of ct) - val (thm, ctxt') = f (fold_rev mk_forall vs ct) ctxt - val vars_of = get_vars Term.add_vars Var (K true) ctxt' - in - (Thm.instantiate ([], map (dest_Var o Thm.term_of) (vars_of (Thm.prop_of thm)) ~~ vs) thm, - ctxt') - end - - val sk_rules = @{lemma - "c = (SOME x. P x) ==> (EX x. P x) = P c" - "c = (SOME x. ~P x) ==> (~(ALL x. P x)) = (~P c)" - by (metis someI_ex)+} -in - -fun skolemize vars = - apfst Thm oo close vars (yield_singleton Assumption.add_assumes) - -fun discharge_sk_tac ctxt i st = ( - resolve_tac ctxt @{thms trans} i - THEN resolve_tac ctxt sk_rules i - THEN (resolve_tac ctxt @{thms refl} ORELSE' discharge_sk_tac ctxt) (i+1) - THEN resolve_tac ctxt @{thms refl} i) st - -end - - - -(** theory proof rules **) - -(* theory lemmas: linear arithmetic, arrays *) -fun th_lemma ctxt simpset thms = Thm o try_apply ctxt thms [ - Old_Z3_Proof_Tools.by_abstraction 0 (false, true) ctxt thms (fn ctxt' => - Old_Z3_Proof_Tools.by_tac ctxt' ( - NAMED ctxt' "arith" (Arith_Data.arith_tac ctxt') - ORELSE' NAMED ctxt' "simp+arith" ( - Simplifier.asm_full_simp_tac (put_simpset simpset ctxt') - THEN_ALL_NEW Arith_Data.arith_tac ctxt')))] - - -(* rewriting: prove equalities: - * ACI of conjunction/disjunction - * contradiction, excluded middle - * logical rewriting rules (for negation, implication, equivalence, - distinct) - * normal forms for polynoms (integer/real arithmetic) - * quantifier elimination over linear arithmetic - * ... ? **) -local - fun spec_meta_eq_of thm = - (case try (fn th => th RS @{thm spec}) thm of - SOME thm' => spec_meta_eq_of thm' - | NONE => mk_meta_eq thm) - - fun prep (Thm thm) = spec_meta_eq_of thm - | prep (MetaEq thm) = thm - | prep (Literals (thm, _)) = spec_meta_eq_of thm - - fun unfold_conv ctxt ths = - Conv.arg_conv (Conv.binop_conv (Old_Z3_Proof_Tools.unfold_eqs ctxt - (map prep ths))) - - fun with_conv _ [] prv = prv - | with_conv ctxt ths prv = - Old_Z3_Proof_Tools.with_conv (unfold_conv ctxt ths) prv - - val unfold_conv = - Conv.arg_conv (Conv.binop_conv - (Conv.try_conv Old_Z3_Proof_Tools.unfold_distinct_conv)) - val prove_conj_disj_eq = - Old_Z3_Proof_Tools.with_conv unfold_conv Old_Z3_Proof_Literals.prove_conj_disj_eq - - fun declare_hyps ctxt thm = - (thm, snd (Assumption.add_assumes (Thm.chyps_of thm) ctxt)) -in - -val abstraction_depth = 3 - (* - This value was chosen large enough to potentially catch exceptions, - yet small enough to not cause too much harm. The value might be - increased in the future, if reconstructing 'rewrite' fails on problems - that get too much abstracted to be reconstructable. - *) - -fun rewrite simpset ths ct ctxt = - apfst Thm (declare_hyps ctxt (with_conv ctxt ths (try_apply ctxt [] [ - named ctxt "conj/disj/distinct" prove_conj_disj_eq, - named ctxt "pull-ite" Old_Z3_Proof_Methods.prove_ite ctxt, - Old_Z3_Proof_Tools.by_abstraction 0 (true, false) ctxt [] (fn ctxt' => - Old_Z3_Proof_Tools.by_tac ctxt' ( - NAMED ctxt' "simp (logic)" (Simplifier.simp_tac (put_simpset simpset ctxt')) - THEN_ALL_NEW NAMED ctxt' "fast (logic)" (fast_tac ctxt'))), - Old_Z3_Proof_Tools.by_abstraction 0 (false, true) ctxt [] (fn ctxt' => - Old_Z3_Proof_Tools.by_tac ctxt' ( - (resolve_tac ctxt' @{thms iff_allI} ORELSE' K all_tac) - THEN' NAMED ctxt' "simp (theory)" (Simplifier.simp_tac (put_simpset simpset ctxt')) - THEN_ALL_NEW ( - NAMED ctxt' "fast (theory)" (HOL_fast_tac ctxt') - ORELSE' NAMED ctxt' "arith (theory)" (Arith_Data.arith_tac ctxt')))), - Old_Z3_Proof_Tools.by_abstraction 0 (true, true) ctxt [] (fn ctxt' => - Old_Z3_Proof_Tools.by_tac ctxt' ( - (resolve_tac ctxt' @{thms iff_allI} ORELSE' K all_tac) - THEN' NAMED ctxt' "simp (full)" (Simplifier.simp_tac (put_simpset simpset ctxt')) - THEN_ALL_NEW ( - NAMED ctxt' "fast (full)" (HOL_fast_tac ctxt') - ORELSE' NAMED ctxt' "arith (full)" (Arith_Data.arith_tac ctxt')))), - named ctxt "injectivity" (Old_Z3_Proof_Methods.prove_injectivity ctxt), - Old_Z3_Proof_Tools.by_abstraction abstraction_depth (true, true) ctxt [] - (fn ctxt' => - Old_Z3_Proof_Tools.by_tac ctxt' ( - (resolve_tac ctxt' @{thms iff_allI} ORELSE' K all_tac) - THEN' NAMED ctxt' "simp (deepen)" (Simplifier.simp_tac (put_simpset simpset ctxt')) - THEN_ALL_NEW ( - NAMED ctxt' "fast (deepen)" (HOL_fast_tac ctxt') - ORELSE' NAMED ctxt' "arith (deepen)" (Arith_Data.arith_tac - ctxt'))))]) ct)) - -end - - - -(* proof reconstruction *) - -(** tracing and checking **) - -fun trace_before ctxt idx = Old_SMT_Config.trace_msg ctxt (fn r => - "Z3: #" ^ string_of_int idx ^ ": " ^ Old_Z3_Proof_Parser.string_of_rule r) - -fun check_after idx r ps ct (p, (ctxt, _)) = - if not (Config.get ctxt Old_SMT_Config.trace) then () - else - let val thm = thm_of p |> tap (Thm.join_proofs o single) - in - if (Thm.cprop_of thm) aconvc ct then () - else - z3_exn (Pretty.string_of (Pretty.big_list - ("proof step failed: " ^ quote (Old_Z3_Proof_Parser.string_of_rule r) ^ - " (#" ^ string_of_int idx ^ ")") - (pretty_goal ctxt (map (thm_of o fst) ps) (Thm.prop_of thm) @ - [Pretty.block [Pretty.str "expected: ", - Syntax.pretty_term ctxt (Thm.term_of ct)]]))) - end - - -(** overall reconstruction procedure **) - -local - fun not_supported r = raise Fail ("Z3: proof rule not implemented: " ^ - quote (Old_Z3_Proof_Parser.string_of_rule r)) - - fun prove_step simpset vars r ps ct (cxp as (cx, ptab)) = - (case (r, ps) of - (* core rules *) - (Old_Z3_Proof_Parser.True_Axiom, _) => (Thm Old_Z3_Proof_Literals.true_thm, cxp) - | (Old_Z3_Proof_Parser.Asserted, _) => raise Fail "bad assertion" - | (Old_Z3_Proof_Parser.Goal, _) => raise Fail "bad assertion" - | (Old_Z3_Proof_Parser.Modus_Ponens, [(p, _), (q, _)]) => - (mp q (thm_of p), cxp) - | (Old_Z3_Proof_Parser.Modus_Ponens_Oeq, [(p, _), (q, _)]) => - (mp q (thm_of p), cxp) - | (Old_Z3_Proof_Parser.And_Elim, [(p, i)]) => - and_elim (p, i) ct ptab ||> pair cx - | (Old_Z3_Proof_Parser.Not_Or_Elim, [(p, i)]) => - not_or_elim (p, i) ct ptab ||> pair cx - | (Old_Z3_Proof_Parser.Hypothesis, _) => (Thm (Thm.assume ct), cxp) - | (Old_Z3_Proof_Parser.Lemma, [(p, _)]) => (lemma (thm_of p) ct, cxp) - | (Old_Z3_Proof_Parser.Unit_Resolution, (p, _) :: ps) => - (unit_resolution (thm_of p) (map (thm_of o fst) ps) ct, cxp) - | (Old_Z3_Proof_Parser.Iff_True, [(p, _)]) => (iff_true (thm_of p), cxp) - | (Old_Z3_Proof_Parser.Iff_False, [(p, _)]) => (iff_false (thm_of p), cxp) - | (Old_Z3_Proof_Parser.Distributivity, _) => (distributivity cx ct, cxp) - | (Old_Z3_Proof_Parser.Def_Axiom, _) => (def_axiom cx ct, cxp) - | (Old_Z3_Proof_Parser.Intro_Def, _) => intro_def ct cx ||> rpair ptab - | (Old_Z3_Proof_Parser.Apply_Def, [(p, _)]) => (apply_def (thm_of p), cxp) - | (Old_Z3_Proof_Parser.Iff_Oeq, [(p, _)]) => (p, cxp) - | (Old_Z3_Proof_Parser.Nnf_Pos, _) => (nnf cx vars (map fst ps) ct, cxp) - | (Old_Z3_Proof_Parser.Nnf_Neg, _) => (nnf cx vars (map fst ps) ct, cxp) - - (* equality rules *) - | (Old_Z3_Proof_Parser.Reflexivity, _) => (refl ct, cxp) - | (Old_Z3_Proof_Parser.Symmetry, [(p, _)]) => (symm p, cxp) - | (Old_Z3_Proof_Parser.Transitivity, [(p, _), (q, _)]) => (trans p q, cxp) - | (Old_Z3_Proof_Parser.Monotonicity, _) => (monotonicity (map fst ps) ct, cxp) - | (Old_Z3_Proof_Parser.Commutativity, _) => (commutativity ct, cxp) - - (* quantifier rules *) - | (Old_Z3_Proof_Parser.Quant_Intro, [(p, _)]) => (quant_intro cx vars p ct, cxp) - | (Old_Z3_Proof_Parser.Pull_Quant, _) => (pull_quant cx ct, cxp) - | (Old_Z3_Proof_Parser.Push_Quant, _) => (push_quant cx ct, cxp) - | (Old_Z3_Proof_Parser.Elim_Unused_Vars, _) => (elim_unused_vars cx ct, cxp) - | (Old_Z3_Proof_Parser.Dest_Eq_Res, _) => (dest_eq_res cx ct, cxp) - | (Old_Z3_Proof_Parser.Quant_Inst, _) => (quant_inst cx ct, cxp) - | (Old_Z3_Proof_Parser.Skolemize, _) => skolemize vars ct cx ||> rpair ptab - - (* theory rules *) - | (Old_Z3_Proof_Parser.Th_Lemma _, _) => (* FIXME: use arguments *) - (th_lemma cx simpset (map (thm_of o fst) ps) ct, cxp) - | (Old_Z3_Proof_Parser.Rewrite, _) => rewrite simpset [] ct cx ||> rpair ptab - | (Old_Z3_Proof_Parser.Rewrite_Star, ps) => - rewrite simpset (map fst ps) ct cx ||> rpair ptab - - | (Old_Z3_Proof_Parser.Nnf_Star, _) => not_supported r - | (Old_Z3_Proof_Parser.Cnf_Star, _) => not_supported r - | (Old_Z3_Proof_Parser.Transitivity_Star, _) => not_supported r - | (Old_Z3_Proof_Parser.Pull_Quant_Star, _) => not_supported r - - | _ => raise Fail ("Z3: proof rule " ^ - quote (Old_Z3_Proof_Parser.string_of_rule r) ^ - " has an unexpected number of arguments.")) - - fun lookup_proof ptab idx = - (case Inttab.lookup ptab idx of - SOME p => (p, idx) - | NONE => z3_exn ("unknown proof id: " ^ quote (string_of_int idx))) - - fun prove simpset vars (idx, step) (_, cxp as (ctxt, ptab)) = - let - val Old_Z3_Proof_Parser.Proof_Step {rule=r, prems, prop, ...} = step - val ps = map (lookup_proof ptab) prems - val _ = trace_before ctxt idx r - val (thm, (ctxt', ptab')) = - cxp - |> prove_step simpset vars r ps prop - |> tap (check_after idx r ps prop) - in (thm, (ctxt', Inttab.update (idx, thm) ptab')) end - - fun make_discharge_rules rules = rules @ [@{thm allI}, @{thm refl}, - @{thm reflexive}, Old_Z3_Proof_Literals.true_thm] - - fun discharge_assms_tac ctxt rules = - REPEAT (HEADGOAL (resolve_tac ctxt rules ORELSE' SOLVED' (discharge_sk_tac ctxt))) - - fun discharge_assms ctxt rules thm = - if Thm.nprems_of thm = 0 then Goal.norm_result ctxt thm - else - (case Seq.pull (discharge_assms_tac ctxt rules thm) of - SOME (thm', _) => Goal.norm_result ctxt thm' - | NONE => raise THM ("failed to discharge premise", 1, [thm])) - - fun discharge rules outer_ctxt (p, (inner_ctxt, _)) = - thm_of p - |> singleton (Proof_Context.export inner_ctxt outer_ctxt) - |> discharge_assms outer_ctxt (make_discharge_rules rules) -in - -fun reconstruct outer_ctxt recon output = - let - val {context=ctxt, typs, terms, rewrite_rules, assms} = recon - val (asserted, steps, vars, ctxt1) = - Old_Z3_Proof_Parser.parse ctxt typs terms output - - val simpset = - Old_Z3_Proof_Tools.make_simpset ctxt1 (Named_Theorems.get ctxt1 @{named_theorems old_z3_simp}) - - val ((is, rules), cxp as (ctxt2, _)) = - add_asserted outer_ctxt rewrite_rules assms asserted ctxt1 - in - if Config.get ctxt2 Old_SMT_Config.filter_only_facts then (is, @{thm TrueI}) - else - (Thm @{thm TrueI}, cxp) - |> fold (prove simpset vars) steps - |> discharge rules outer_ctxt - |> pair [] - end - -end - -end diff --git a/src/test-gen/src/test/Automata.thy b/src/test-gen/src/test/Automata.thy deleted file mode 100644 index f3018b8..0000000 --- a/src/test-gen/src/test/Automata.thy +++ /dev/null @@ -1,391 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * Automata.thy --- the base testing theory for automatas in sequence testing. - * This file is part of HOL-TestGen. - * - * Copyright (c) 2005-2007 ETH Zurich, Switzerland - * 2009-2017 Univ. Paris-Sud, France - * 2009-2015 Achim D. Brucker, Germany - * 2015-2017 The University of Sheffield, UK - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id:$ *) - -chapter {* Basic Automaton Theory *} - -theory Automata imports Main -begin - -text{* Re-Definition of the following type synonyms from Monad-Theory - -apart from that, these theories are independent. *} -type_synonym ('o, '\) MON\<^sub>S\<^sub>E = "'\ \ ('o \ '\)" (* = '\ \ ('o \ '\)option *) -type_synonym ('o, '\) MON\<^sub>S\<^sub>B = "'\ \ ('o \ '\) set" -type_synonym ('o, '\) MON\<^sub>S\<^sub>B\<^sub>E = "'\ \ (('o \ '\) set) option" - - -subsection{* Deterministic I/O Automaton *} - - -class fin = - assumes finite: "finite(UNIV::'a set)" - - -record ('\, '\) DA = - init :: "'\" - step :: "'\ \ '\ \ '\" - accept :: "'\ set" - - -typedef ('\::fin, '\::fin) DFA = "{x::('\, '\) DA. True}" -proof - - show "\x. x \ {x. True}" by auto -qed - -record ('\, '\) pDA = - init :: "'\" - step :: "'\ \ '\ \ '\" - accept :: "'\ set" - -typ "'a option" -record ('\, '\) NDA = - init :: "'\" - step :: "('\ \ '\ \ '\)set" - accept :: "'\ set" - - -type_synonym ('\, 'o, '\) ioNDA = "('\ \ 'o, '\) NDA" - -record ('\, '\) EFSM = - init :: "'\" - step :: "('\ \ '\ \ '\)set" - accept :: "'\ set" - - -record ('\, 'o, '\) det_io_atm = - init :: "'\" - step :: "'\ \ ('o, '\) MON\<^sub>S\<^sub>E" - - -subsection{* Non-deterministic I/O automaton *} - -text{* - We will use two styles of non-deterministic automaton: Labelled Transition - Systems (LTS), which are intensively used in the literature, but tend to anihilate - the difference between input and output, and non-deterministic automaton, which - make this difference explicit and which have a closer connection to monads used - for the operational aspects of testing. -*} - -text{* - There we are: labelled transition systems. -*} - -record ('\, 'o, '\) lts = - init :: "'\ set" - step :: "('\ \ ('\ \ 'o) \ '\) set" (* false : this must be "('\ \ ('\ + 'o) \ '\) set" *) - -text{* And, equivalently; non-deterministic io automata. *} - -record ('\, 'o, '\) ndet_io_atm = - init :: "'\ set" - step :: "'\ \ ('o, '\) MON\<^sub>S\<^sub>B" - - -text{* - First, we will prove the fundamental equivalence of these two notions. - - We refrain from a formal definition of explicit conversion functions - and leave this internally in this proof (i.e. the existential witnesses). -*} - -definition det2ndet :: "('\, 'o, '\) det_io_atm \ ('\, 'o, '\) ndet_io_atm" -where "det2ndet A = \ndet_io_atm.init = {det_io_atm.init A}, - ndet_io_atm.step = - \ \ \. if \ \ dom(det_io_atm.step A \) - then {the(det_io_atm.step A \ \)} - else {} \" - -text{* - The following theorem establishes the fact that deterministic - automaton can be injectively embedded in non-deterministic ones. -*} -lemma det2ndet_injective : "inj det2ndet" - apply(auto simp: inj_on_def det2ndet_def) - apply(tactic {* Record.split_simp_tac @{context} [] (K ~1) 1*}, simp) - apply(simp (no_asm_simp) add: fun_eq_iff, auto) - apply(drule_tac x=x in fun_cong, drule_tac x=xa in fun_cong) - apply(case_tac "xa \ dom (step x)", simp_all) - apply(case_tac "xa \ dom (stepa x)", - simp_all add: fun_eq_iff[symmetric], auto) - apply(case_tac "xa \ dom (stepa x)", auto simp: fun_eq_iff[symmetric]) - apply(erule contrapos_np, simp) - apply(drule Product_Type.split_paired_All[THEN iffD2])+ - apply(simp only: Option.not_Some_eq) -done - - -text{* - We distinguish two forms of determinism - global determinism, where for each - state and input \emph{at most} one output-successor state - is assigned. -*} -definition deterministic :: "('\, 'o, '\) ndet_io_atm \ bool" -where "deterministic atm = (((\ x. ndet_io_atm.init atm = {x}) \ - (\ \ out. \ p1 \ step atm \ out. - \ p2 \ step atm \ out. - p1 = p2)))" - -text{* In contrast, transition relations *} -definition \deterministic :: "('\, 'o, '\) ndet_io_atm \ bool" -where "\deterministic atm = (\ x. ndet_io_atm.init atm = {x} \ - (\ \ out. - \ p1 \ step atm \ out. - \ p2 \ step atm \ out. - fst p1 = fst p2 \ snd p1 = snd p2))" - - -lemma det2ndet_deterministic: "deterministic (det2ndet atm)" - by(auto simp:deterministic_def det2ndet_def) - -lemma det2ndet_\deterministic: "\deterministic (det2ndet atm)" - by(auto simp: \deterministic_def det2ndet_def) - - -text{* - The following theorem establishes the isomorphism of the two concepts - IO-automata and LTS. We will therefore concentrate in the sequel on IO-Automata, - which have a slightly more realistic operational behaviour: you give the program - under test an input and get a possible set of responses rather than "agreeing - with the program under test" on a set of input-output-pairs. -*} - -definition ndet2lts :: "('\, 'o, '\) ndet_io_atm \ ('\, 'o, '\) lts" -where "ndet2lts A = \lts.init = init A, - lts.step = {(s,io,s').(snd io,s') \ step A (fst io) s}\" - -definition lts2ndet :: " ('\,'o,'\) lts \ ('\, 'o, '\) ndet_io_atm" -where "lts2ndet A = \init = lts.init A, - step = \ i s. {(out,s'). (s, (i,out), s') - \ lts.step A}\" - -lemma ndet_io_atm_isomorph_lts : "bij ndet2lts" - apply (auto simp: bij_def inj_on_def surj_def ndet2lts_def) - apply (tactic {* Record.split_simp_tac @{context} [] (K ~1) 1*}, simp) - apply (rule ext, rule ext, simp add: set_eq_iff) - apply (rule_tac x = "lts2ndet y" in exI) - apply (simp add: lts2ndet_def) - done - - -(* Lemma missing: for deterministic ndet_atm's, det2ndet is even bijective, -i.e the definition above is indeed a characterization. *) - -text{* - The following well-formedness property is important: for every state, - there is a valid transition. Otherwise, some states may never be part of an - (infinite) trace. -*} - -definition is_enabled :: "['\ \ ('o, '\) MON\<^sub>S\<^sub>B, '\ ] \ bool" -where "is_enabled rel \ = (\ \. rel \ \ \ {})" - -definition is_enabled' :: "['\ \ ('o, '\) MON\<^sub>S\<^sub>E, '\ ] \ bool" -where "is_enabled' rel \ = (\ \. \ \ dom(rel \))" - - -definition live_wff:: "('\, 'o, '\) ndet_io_atm \ bool" -where "live_wff atm = (\ \. \ \. step atm \ \ \ {})" - -lemma life_wff_charn: "live_wff atm = (\ \. is_enabled (step atm) \)" - by(auto simp: live_wff_def is_enabled_def) - -text{* - There are essentially two approaches: either we disallow non-enabled transition - systems---via life\_wff\_charn---or we restrict our machinery for traces and prefixed closed - sets of runs over them -*} - -section{* Rich Traces and its Derivatives *} -text{* - The easiest way to define the concept of traces is on LTS. Via the injections described - above, we can define notions like deterministic automata rich trace, and i/o automata rich - trace. Moreover, we can easily project event traces or state traces from rich traces. *} - -type_synonym ('\, 'o, '\) trace = "nat \ ('\ \ ('\ \ 'o) \ '\)" -type_synonym ('\, 'o) etrace = "nat \ ('\ \ 'o)" (* event traces *) -type_synonym '\ \trace = "nat \ '\" -type_synonym '\ in_trace = "nat \ '\" -type_synonym 'o out_trace = "nat \ 'o" -type_synonym ('\, 'o, '\) run = "('\ \ ('\ \ 'o) \ '\) list" -type_synonym ('\, 'o) erun = "('\ \ 'o) list" -type_synonym '\ \run = "'\ list" -type_synonym '\ in_run = "'\ list" -type_synonym 'o out_run = "'o list" - -definition rtraces ::"('\, 'o, '\) ndet_io_atm \ ('\, 'o, '\) trace set" -where "rtraces atm = { t. fst(t 0) \ init atm \ - (\ n. fst(t (Suc n)) = snd(snd(t n))) \ - (\ n. if is_enabled (step atm) (fst(t n)) - then t n \ {(s,io,s'). (snd io,s') - \ step atm (fst io) s} - else t n = (fst(t n),undefined,fst(t n)))}" - -lemma init_rtraces[elim!]: "t \ rtraces atm \ fst(t 0) \ init atm" -by(auto simp: rtraces_def) - -lemma post_is_pre_state[elim!]: "t \ rtraces atm \ fst(t (Suc n)) = snd(snd(t n))" -by(auto simp: rtraces_def) - - -lemma enabled_transition[elim!]: -"\t \ rtraces atm; is_enabled (step atm) (fst(t n)) \ - \ t n \ {(s,io,s'). (snd io,s') \ step atm (fst io) s}" -apply(simp add: rtraces_def split_def, safe) -apply (erule allE)+ -apply (auto simp add: split: if_split_asm) -done - -lemma nonenabled_transition[elim!]: "\t \ rtraces atm; \ is_enabled (step atm) (fst(t n)) \ - \ t n = (fst(t n),undefined,fst(t n))" -by(simp add: rtraces_def split_def) - - -text{* - The latter definition solves the problem of inherently finite traces, i.e. those that reach - a state in which they are no longer enabled. They are represented by stuttering steps on - the same state. -*} - -definition fin_rtraces :: "('\, 'o, '\) ndet_io_atm \ ('\, 'o, '\) trace set" -where "fin_rtraces atm = { t . t \ rtraces atm \ - (\ n. \ is_enabled (step atm) (fst(t n)))}" - -lemma fin_rtraces_are_rtraces : "fin_rtraces atm \ rtraces atm" -by(auto simp: rtraces_def fin_rtraces_def) - - -definition \traces ::"('\, 'o, '\) ndet_io_atm \ '\ \trace set" -where "\traces atm = {t . \ rt \ rtraces atm. t = fst o rt }" - -definition etraces ::"('\, 'o, '\) ndet_io_atm \ ('\, 'o) etrace set" -where "etraces atm = {t . \ rt \ rtraces atm. t = fst o snd o rt }" - -definition in_trace :: "('\, 'o) etrace \ '\ in_trace" -where "in_trace rt = fst o rt" - -definition out_trace :: "('\, 'o) etrace \ 'o out_trace" -where "out_trace rt = snd o rt" - -definition prefixes :: "(nat \ '\) set \ '\ list set" -where "prefixes ts = {l. \ t \ ts. \ (n::int). l = map (t o nat) [0..n]}" - -definition rprefixes :: "['\ \ ('o, '\) MON\<^sub>S\<^sub>B, - ('\, 'o, '\) trace set] \ ('\, 'o, '\) run set" -where "rprefixes rel ts = {l. \ t \ ts. \ n. (is_enabled rel (fst(t (nat n))) \ - l = map (t o nat) [0..n])}" -definition eprefixes :: "['\ \ ('o, '\) MON\<^sub>S\<^sub>B, - ('\, 'o, '\) trace set] \ ('\, 'o) erun set" -where "eprefixes rel ts = (map (fst o snd)) ` (rprefixes rel ts)" - -definition \prefixes :: "['\ \ ('o, '\) MON\<^sub>S\<^sub>B, - ('\, 'o, '\) trace set] \ '\ \run set" -where "\prefixes rel ts = (map fst) ` (rprefixes rel ts)" - - - -section{* Extensions: Automata with Explicit Final States *} - - -text{* - We model a few widely used variants of automata as record extensions. In particular, we - define automata with final states and internal (output) actions. -*} - -record ('\, 'o, '\) det_io_atm' = "('\, 'o, '\) det_io_atm" + - final :: "'\ set" - -text{* - A natural well-formedness property to be required from this type of atm is as follows: whenever - an atm' is in a final state, the transition operation is undefined. -*} - -definition final_wff:: "('\, 'o, '\) det_io_atm' \ bool" -where "final_wff atm' = - (\\ \ final atm'. \\. \ \ dom (det_io_atm.step atm' \))" - -text{* - Another extension provides the concept of internal actions, which are considered as part of - the output alphabet here. If internal actions are also used for synchronization, further - extensions admitting internal input actions will be necessary, too, which we do not model here. -*} - -record ('\, 'o, '\) det_io_atm'' = "('\, 'o, '\) det_io_atm'" + - internal :: "'o set" - -text{* - A natural well-formedness property to be required from this type of atm is as follows: whenever - an atm' is in a final state, the transition operation is required to provide a state that is - again final and an output that is considered internal. -*} - -definition final_wff2:: "('\, 'o, '\) det_io_atm'' \ bool" -where "final_wff2 atm'' = (\\ \ final atm''. - \ \. \ \ dom (det_io_atm.step atm'' \) \ - (let (out, \') = the(det_io_atm.step atm'' \ \) - in out \ internal atm'' \ \' \ final atm''))" - -text{* - Of course, for this type of extended automata, it is also possible to impose the additional - requirement that the step function is total---undefined steps would then be represented as - steps leading to final states. - - The standard extensions on deterministic automata are also redefined for the non-deterministic - (specification) case. -*} - -record ('\, 'o, '\) ndet_io_atm' = "('\, 'o, '\) ndet_io_atm" + - final :: "'\ set" - -definition final_wff_ndet_io_atm2 :: "('\, 'o, '\) ndet_io_atm' \ bool" - where "final_wff_ndet_io_atm2 atm' = (\\ \ final atm'. \\. ndet_io_atm.step atm' \ \ = {})" - -record ('\, 'o, '\) ndet_io_atm'' = "('\, 'o, '\) ndet_io_atm'" + - internal :: "'o set" - -definition final_wff2_ndet_io_atm2 :: "('\, 'o, '\) ndet_io_atm'' \ bool" - where "final_wff2_ndet_io_atm2 atm'' = - (\\ \ final atm''. - \\. step atm'' \ \ \ {} \ step atm'' \ \ \ internal atm'' \ final atm'')" - -end diff --git a/src/test-gen/src/test/EFSM_Toolkit.thy b/src/test-gen/src/test/EFSM_Toolkit.thy deleted file mode 100644 index 67205c8..0000000 --- a/src/test-gen/src/test/EFSM_Toolkit.thy +++ /dev/null @@ -1,167 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * TestRefinements.thy --- for sequence testing. - * This file is part of HOL-TestGen. - * - * Copyright (c) 2005-2007, ETH Zurich, Switzerland - * 2009 B. Wolff, Univ. Paris-Sud, France - * 2009 Achim D. Brucker, Germany - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id:$ *) - -chapter {* Test-Refinements (IOCO and Friends) *} - -theory EFSM_Toolkit imports Monads Automata -begin - - -definition impl :: "['\\'\\bool, '\ \ ('o,'\)MON\<^sub>S\<^sub>B] \ '\ \ ('o,'\)MON\<^sub>S\<^sub>E" -where "impl pre post \ = (\ \. if pre \ \ - then Some(SOME(out,\'). (out,\') \ post \ \) - else undefined)" - -definition strong_impl :: "['\\'\\bool, '\\('o,'\)MON\<^sub>S\<^sub>B] \ '\\('o, '\)MON\<^sub>S\<^sub>E" -where "strong_impl pre post \ = - (\ \. if pre \ \ - then Some(SOME(out,\'). (out,\') \ post \ \) - else None)" - -definition strong_impl' :: "['\\'\\bool, '\\('o,'\)MON\<^sub>S\<^sub>B] \ '\\('o, '\)MON\<^sub>S\<^sub>E" -where "strong_impl' pre post \ = - (\ \. if pre \ \ - then Some(SOME(out,\'). (out,\') \ post \ \) - else - (if post \ \ \ {} - then Some(SOME(out,\'). (out,\') \ post \ \) - else None))" - - -definition implementable :: "['\ \ '\ \ bool,'\ \ ('o,'\)MON\<^sub>S\<^sub>B] \ bool" -where "implementable pre post =(\ \ \. pre \ \ \(\ out \'. (out,\') \ post \ \ ))" - -definition is_strong_impl :: "['\ \ '\ \ bool, - '\ \ ('o,'\)MON\<^sub>S\<^sub>B, - '\ \ ('o, '\)MON\<^sub>S\<^sub>E] \ bool" -where "is_strong_impl pre post ioprog = - (\ \ \. (\pre \ \ \ ioprog \ \ = None) \ - (pre \ \ \ (\ x. ioprog \ \ = Some x)))" - -lemma is_strong_impl : - "is_strong_impl pre post (strong_impl pre post)" -by(simp add: is_strong_impl_def strong_impl_def) - - -text{* This following characterization of implementable -specifications has actually a quite complicated form due to the fact -that post expects its arguments in curried form - should -be improved \ldots *} -lemma implementable_charn: - "\implementable pre post; pre \ \ \ \ - (the(strong_impl pre post \ \)) \ post \ \" -apply(auto simp: implementable_def strong_impl_def) -apply(erule_tac x=\ in allE) -apply(erule_tac x=\ in allE) -apply(simp add: Eps_case_prod) -apply(rule someI_ex, auto) -done - - -locale efsm_det = - fixes pre :: "'\ \ '\ \ bool" - fixes post :: "'\ \ '\ \ ('o \ '\) set" - fixes efsm :: "'\ \ '\ \ ('o \ '\) option" - fixes in_ev:: "'\" - fixes out_ev:: "'\ \ 'o" - fixes upd :: "'\ \ '\" - fixes E :: "'\ \ bool" - assumes SPEC: "efsm \ (strong_impl pre post)" - assumes pre_red : "\ \. pre \ in_ev = E \" - assumes post_red : "\ \. pre \ in_ev \ (SOME(res,\'). (res,\') \ post in_ev \) = (out_ev \,upd \)" -begin - -lemma impl_def:"efsm in_ev = (\\. if E \ then Some(out_ev \, upd \) else None)" - by(rule ext, auto simp: SPEC strong_impl_def pre_red post_red) - -lemma exec_succes: - "(efsm in_ev \ = Some(b,\')) = (E \ \ (b=out_ev \) \ \'= upd \)" - by(auto simp: impl_def split:if_split_asm) - - -lemma exec_failure: - "(efsm in_ev \ = None) = (\ E \)" - by(auto simp: impl_def split:if_split_asm) - - - -lemma exec_mbindFSave_If[simp]: -"(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (in_ev # S) efsm; return (P s))) = - (if E \ - then ((upd \) \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S efsm; return (P (out_ev \ # s)))) - else (\ \ (return (P []))))" -by(auto simp: exec_mbindFSave impl_def) - -lemma exec_mbindFSave_E: -assumes A:"(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (in_ev # S) efsm; return (P s)))" - and B:"E \ \ ((upd \) \ (s\mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S efsm;return(P(out_ev \ # s)))) \ Q" - and C:"\ E \ \\ \ (return (P [])) \ Q" -shows "Q" -apply(insert A, subst (asm) exec_mbindFSave_If, case_tac "E \", simp_all only: if_True if_False) -by(auto elim: B C) - - - -lemma exec_mbindFStop[simp]: -"(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (in_ev#S) efsm; return (P s))) = - (E \ \ ((upd \) \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S efsm; return (P (out_ev \ # s)))))" -apply(rule iffI) -apply(erule Monads.exec_mbindFStop_E,simp add: exec_succes,auto) -apply(subst exec_bind_SE_success[OF exec_succes[THEN iffD2]], auto) -done - - -lemma exec_mbindFStop_E: -assumes A:"(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (in_ev # S) efsm; return (P s)))" - and B:"E \ \ ((upd \) \ (s\mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S efsm;return(P(out_ev \ # s)))) \ Q" -shows "Q" -by(insert A, rule B, simp_all del: mbind'_bind) - -(* -lemmas impl_1'' = exec_failure -lemmas impl_1' = exec_succes -*) - -end - -end diff --git a/src/test-gen/src/test/Interleaving.thy b/src/test-gen/src/test/Interleaving.thy deleted file mode 100644 index f63c0ea..0000000 --- a/src/test-gen/src/test/Interleaving.thy +++ /dev/null @@ -1,244 +0,0 @@ -theory Interleaving -imports Main -begin - -(* -fun interleave :: "'a list => 'a list => 'a list set" -where "interleave [] [] = {[]}" - |"interleave (A) [] = {A}" - |"interleave [] B = {B}" - |"interleave (a#A)(b#B) = (let S = interleave A (b#B); - S' = interleave (a#A) (B) - in (\ x. a # x) ` S \ (\ x. b # x) ` S')" -*) - -fun interleave :: "'a list => 'a list => 'a list set" -where "interleave [] [] = {[]}" - | "interleave (A) [] = {A}" - | "interleave [] B = {B}" - |"interleave (a # A) (b # B) = - (\x. a # x) ` interleave A (b # B) \ - (\x. b # x) ` interleave (a # A) B" - -find_theorems interleave - -value"interleave [1::int,2,3] [11,12]" - - -fun interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t :: "'a list => 'a list => 'a list list" -where "interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t [] [] = [[]]" - |"interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t (A) [] = [A]" - |"interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t [] B = [B]" - |"interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t (a#A)(b#B) = (let S = interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t A (b#B); - S' = interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t (a#A) (B) - in map (\ x. a # x) S @ map (\ x. b # x) S')" - - -value "interleave' [1::int,2,1,2] [2,1]" -value "interleave [1::int,2] [1]" - -(* Junk --- should be associativ application of interleave *) -fun interleave2 :: "'a list => 'a list => 'a list => 'a list set" -where "interleave2 [] [] [] = {[]}" - |"interleave2 (A) [] [] = {A}" - |"interleave2 [] B [] = {B}" - |"interleave2 [] [] C = {C}" - |"interleave2 (A)(B) [] = interleave A B" - |"interleave2 (A)[] (C) = interleave A C" - |"interleave2 [] B (C) = interleave B C" - |"interleave2 (a#A)(b#B) (c#C) = (let S = interleave2 A (b#B) (c#C) ; - S' = interleave2 (a#A) (B) (c#C) ; - S'' = interleave2 (a#A) (b#B) (C) - in (\ x. a # x) ` ( S) \ (\ x. b # x) ` S' \ (\ x. c # x) ` S'')" - -lemma replace : "interleave2 A B C = ((\x\interleave A B. interleave X C))" -oops - -(* -fun interleave1 :: "'a list => 'a list => 'a list => 'a list => 'a list set" -where "interleave1 [] [] [] [] = {[]}" - |"interleave1 (A) [] [] [] = {A}" - |"interleave1 [] B [] []= {B}" - |"interleave1 [] [] C []= {C}" - |"interleave1 [] [] [] D= {D}" - |"interleave1 (A)(B) (C) []= interleave2 A B C" - |"interleave1 (A)(B) [] D = interleave2 A B D" - |"interleave1 (A)[] (C) D= interleave2 A C D" - |"interleave1 [] B (C) D= interleave2 B C D" - |"interleave1 (a#A)(b#B) (c#C) (d#D) = (let S = interleave1 A (b#B) (c#C) (d#D); - S' = interleave1 (a#A) (B) (c#C) (d#D); - S'' = interleave1 (a#A) (b#B) (C) (d#D); - S'''= interleave1 (a#A) (b#B) (c#C) (D) - in (\ x. a # x) ` S \ (\ x. b # x) ` S' \ (\ x. c # x) ` S'' \ (\ x. d # x) ` S''')" - -value "int(card(interleave1 [] [] [2::int,1,2,1] [1,2]))" -*) -fun bus_interleave :: " ('a::order) list \ 'a list \ 'a list set" -where "bus_interleave [] [] = {[]}" - |"bus_interleave (A) [] = {A}" - |"bus_interleave [] B = {B}" - |"bus_interleave (a#A)(b#B) = - (if a \ b then (\ x. a # x) ` bus_interleave A (b#B) - else if b \ a then (\ x. b # x) ` bus_interleave (a#A) (B) - else (let S = bus_interleave A (b#B); - S' = bus_interleave (a#A) (B) - in (\ x. a # x) ` S \ (\ x. b # x) ` S'))" - -fun sync_interleave :: "('a \ bool) \ ('a \ 'a \ bool) \ ('a \ bool) \'a list \ 'a list \ 'a list set" -where "sync_interleave L S R [] [] = {[]}" - |"sync_interleave L S R (A) [] = {A}" - |"sync_interleave L S R [] B = {B}" - |"sync_interleave L S R (a#A)(b#B) = - (if L a \ \ L b then sync_interleave L S R (A) (b#B) - else if \ L a \ L b then sync_interleave L S R (a#A) (B) - else - if L a \ L b \ S a b then (\ x. a # x) ` sync_interleave L S R (A) (B) - else (let IL = sync_interleave L S R A (b#B); - IL' = sync_interleave L S R (a#A) (B) - in (\ x. a # x) ` IL \ (\ x. b # x) ` IL'))" - -fun sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t :: "'a \'a list \ 'a list \ 'a list list" -where "sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n [] [] = [[]]" - |"sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n (A) [] = [A]" - |"sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n [] B = [B]" - |"sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n (a#A)(b#B) = - (if a = n \ b \ n then sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n (A) (b#B) - else if a \ n \ b = n then sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n (a#A) (B) - else - if a = n \ b= n then map (\ x. n # x) (sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n (A) (B) ) - else (let S = sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n A (b#B); - S' = sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t n (a#A) (B) - in map (\ x. a # x) S @ map (\ x. b # x) S'))" - -value "((sync_interleave\<^sub>l\<^sub>i\<^sub>s\<^sub>t 2 [1::int,3,2 ] [1,2]))" - - - -value "sync_interleave' 2 [1::int,3,2 ] [1,2]" - -fun sync_interleave1 :: "'a list \'a list \ 'a list \ 'a list set" -where - "sync_interleave1 [] [] [] = {[]}" - |"sync_interleave1 n [] [] = {[]}" - |"sync_interleave1 [] A [] = {A}" - |"sync_interleave1 [] [] B = {B}" - |"sync_interleave1 n (A) [] = {A}" - |"sync_interleave1 n [] B = {B}" - |"sync_interleave1 [] A B = interleave A B" - |"sync_interleave1 (n#N) (a#A)(b#B) = - (if a = n \ b \ n then sync_interleave1 (n#N) (A) (b#B) - else if a \ n \ b = n then sync_interleave1 (n#N) (a#A) (B) - else - if a = n \ b= n then (\ x. n # x) ` sync_interleave1 (n#N) (A) (B) - else - (let S = sync_interleave1 (n#N) A (b#B); - S' = sync_interleave1 (n#N) (a#A) (B) - in (\ x. a # x) ` S \ (\ x. b # x) ` S'))" - -value "((sync_interleave1 [1,3] [1::int,3,2 ] [1,2]))" -value "((sync_interleave1 [3,1] [1::int,3,2 ] [1,2]))" -value "(sync_interleave1 [3,1] [1::int,2 ] [1,3,2])" - -find_theorems " interleave" -value "int (card (interleave [1::int,2,3,4 ] [8,9,10,11]))" - -term" \ X" - - -lemma "x \ set A \ x \ \(set ` interleave A B)" -proof (induct A B rule: interleave.induct) - case 1 - then show ?case by simp -next - case (2 a A) - then show ?case by simp -next - case (3 b B) - then show ?case by simp -next - case (4 a A b B) - from 4 have "x \ \(set ` interleave (a # A) B)" by simp - then show ?case by auto -qed - - -lemma commute1[simp]: "interleave A [] = interleave [] A" by(case_tac A, simp_all) - - -lemma interleave_commute'[rule_format]: "\B. interleave A B = interleave B A" -proof(induct A) - show "\B. interleave [] B = interleave B []" by(simp) -next - show "\a A. \B. interleave A B = interleave B A \ \B. interleave (a # A) B = interleave B (a # A)" - apply(rule allI,induct_tac B, simp_all) - apply(subst Set.Un_commute) - apply (elim allE) - apply (erule ssubst)+ - apply (simp only: triv_forall_equality) - done -qed - - -lemma interleave1 [simp] : "(x \ interleave [] A) = (x = A)" -by(induct A, simp_all only: interleave.simps, simp_all) - - -lemma interleave2 [simp] : "(x \ interleave A []) = (x = A)" -by(induct A, simp_all only: interleave.simps, simp_all) - - - -lemma length_interleave_lemma: - "\n < k . length A + length B \ n - \ (x::'a list) \ (interleave A B) - \ length x = (length A + length B)" -proof(induct k arbitrary: A B x) - case 0 fix A::"'a list" and B::"'a list" and x::"'a list" - have * : "\A B. length A + length B \ 0 \ length A = 0" by auto - have ** : "\A B. length A + length B \ 0 \ length B = 0" by auto - show "\n<0. length A + length B \ n - \ x \ interleave A B - \ length x = length A + length B" - by auto - case (Suc k) fix A::"'a list" and B::"'a list" and x::"'a list" - assume H: "\(A::'a list) B x. \n n - \ x \ interleave A B - \ length x = length A + length B" - have H': "\(A::'a list) B x n. x \ interleave A B \ n length A + length B \ n - \ length x = length A + length B" - by(erule H[THEN spec, THEN mp, THEN mp, THEN mp], auto) - - show "\n n - \ x \ interleave A B - \ length x = length A + length B" - apply(case_tac "A", simp_all) - apply(case_tac "B", simp_all) - apply(auto) - apply(case_tac "n", simp_all) - apply(case_tac "list",auto) - apply(rule trans, erule H', auto) - apply(rule trans, erule H', auto) - apply(case_tac "n", simp_all) - apply(case_tac "list",auto) - apply(rule trans, erule H', auto) - apply(rule trans, erule H', auto) - done -qed - - -lemma length_interleave: "x \ (interleave A B) \ length x = (length A + length B)" -apply(rule length_interleave_lemma[rule_format, of "length A + length B" "length A + length B + 1"]) -by auto - - -lemma cardinality: "card(interleave A B) = ( length A * length B + 1)" (* really ? *) -oops - -lemma causality : - "\ k i. i < k \ (\ C \ (interleave A B). (\ l m. l < m \ A!i = C!l \ A!k = C!m))" (* really ? *) -oops - - - -end - diff --git a/src/test-gen/src/test/Monads.thy b/src/test-gen/src/test/Monads.thy deleted file mode 100644 index b03d619..0000000 --- a/src/test-gen/src/test/Monads.thy +++ /dev/null @@ -1,1256 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * Monads.thy --- a base testing theory for sequential computations. - * This file is part of HOL-TestGen. - * - * Copyright (c) 2005-2007 ETH Zurich, Switzerland - * 2009-2017 Univ. Paris-Sud, France - * 2009-2012 Achim D. Brucker, Germany - * 2015-2017 University Sheffield, UK - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id:$ *) - -chapter {* Basic Monad Theory for Sequential Computations *} - -theory Monads imports Main -begin - -section{* A Generic (Approximative) Notion of Monad *} - -text {* - A (fake) monad is a structure with a return and a bind satisfying three laws. - Since we cannot use generic type constructors in HOL, - we formulate monads using only one type 'a and its monadic representation 'am. -*} - -locale Monad = -fixes returm :: "'a \ 'am" - and bind :: "'am \ ('a \ 'am) \ 'am" (infixr ">>=" 70) -assumes left_unit : "(returm x) >>= f = f x" - and right_unit: "m >>= returm = m" - and assoc : "(m >>= f) >>= g = m >>= (\ x . f x >>= g)" -begin - -definition pipe :: "'am \ 'am \ 'am" -where "pipe f g \ bind f (\ _. g)" - -lemma pipe_left_unit : "pipe (returm x) f = f" -by(simp add: left_unit pipe_def) - - -lemma pipe_right_unit : "pipe f (returm x) = f" -oops (* Not possible to formulate in this Monad Framework generically ! ! ! - In the concrete SE-Monad, we have the (interesting) special case: - "(m;- (return ())) = m" *) - -lemma pipe_assoc : "pipe f (pipe g h) = pipe (pipe f g) h" -by(simp add: assoc pipe_def) - -primrec Mrun :: "('b \ 'a \ 'am) \ 'b list \ 'a \ 'am" -where "Mrun m [] = returm" - | "Mrun m (b#bs) = (\ a . Mrun m bs a >>= m b)" - -end - -section{*General Framework for Monad-based Sequence-Test *} -text{* As such, Higher-order Logic as a purely functional specification - formalism has no built-in mechanism for state and state-transitions. - Forms of testing involving state require therefore explicit mechanisms - for their treatment inside the logic; a well-known technique to model - states inside purely functional languages are \emph{monads} made popular - by Wadler and Moggi and extensively used in Haskell. \HOL is powerful - enough to represent the most important \emph{instances} of standard monads; - however, it is not possible to represent monads as such due to well-known - limitations of the Hindley-Milner type-system (no type-constructor-classes). *} - -text{* Here is a variant for state-exception monads, that models precisely - transition functions with preconditions. Next, we declare the - state-backtrack-monad. In all of them, our concept of i/o stepping - functions can be formulated; these are functions mapping input - to a given monad. Later on, we will build the usual concepts of: - \begin{enumerate} - \item deterministic i/o automata, - \item non-deterministic i/o automata, and - \item labelled transition systems (LTS) - \end{enumerate} -*} - -subsection{* Definition : Standard State Exception Monads *} - -subsubsection{* Definition : Core Types and Operators *} - -type_synonym ('o, '\) MON\<^sub>S\<^sub>E = "'\ \ ('o \ '\)" (* = '\ \ ('o \ '\)option *) - -definition bind_SE :: "('o,'\)MON\<^sub>S\<^sub>E \ ('o \ ('o','\)MON\<^sub>S\<^sub>E) \ ('o','\)MON\<^sub>S\<^sub>E" -where "bind_SE f g = (\\. case f \ of None \ None - | Some (out, \') \ g out \')" - -notation bind_SE ("bind\<^sub>S\<^sub>E") - -syntax (xsymbols) - "_bind_SE" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>E,('o','\)MON\<^sub>S\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>E" - ("(2 _ \ _; _)" [5,8,8]8) -translations - "x \ f; g" == "CONST bind_SE f (% x . g)" - -definition unit_SE :: "'o \ ('o, '\)MON\<^sub>S\<^sub>E" ("(return _)" 8) -where "unit_SE e = (\\. Some(e,\))" -notation unit_SE ("unit\<^sub>S\<^sub>E") - -text{* In the following, we prove the required Monad-laws *} - -lemma bind_right_unit[simp]: "(x \ m; return x) = m" - apply (simp add: unit_SE_def bind_SE_def) - apply (rule ext) - apply (case_tac "m \", simp_all) - done - -lemma bind_left_unit [simp]: "(x \ return c; P x) = P c" - by (simp add: unit_SE_def bind_SE_def) - -lemma bind_assoc[simp]: "(y \ (x \ m; k x); h y) = (x \ m; (y \ k x; h y))" - apply (simp add: unit_SE_def bind_SE_def, rule ext) - apply (case_tac "m \", simp_all) - apply (case_tac "a", simp_all) - done - -interpretation SE : Monad unit_SE bind_SE - by unfold_locales (simp_all) - - -subsubsection{* Definition : More Operators and their Properties *} - -definition fail_SE :: "('o, '\)MON\<^sub>S\<^sub>E" -where "fail_SE = (\\. None)" -notation fail_SE ("fail\<^sub>S\<^sub>E") - -definition assert_SE :: "('\ \ bool) \ (bool, '\)MON\<^sub>S\<^sub>E" -where "assert_SE P = (\\. if P \ then Some(True,\) else None)" -notation assert_SE ("assert\<^sub>S\<^sub>E") - -definition assume_SE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>E" -where "assume_SE P = (\\. if \\ . P \ then Some((), SOME \ . P \) else None)" -notation assume_SE ("assume\<^sub>S\<^sub>E") - - -lemma bind_left_fail_SE[simp] : "(x \ fail\<^sub>S\<^sub>E; P x) = fail\<^sub>S\<^sub>E" - by (simp add: fail_SE_def bind_SE_def) - - -text{* We also provide a "Pipe-free" - variant of the bind operator. -Just a "standard" programming sequential operator without output frills. *} -(* TODO: Eliminate/Modify this. Is a consequence of the Monad-Instantiation. *) - - -definition bind_SE' :: "[('\, '\)MON\<^sub>S\<^sub>E, ('\, '\)MON\<^sub>S\<^sub>E] \ ('\, '\)MON\<^sub>S\<^sub>E" (infixr ";-" 60) -where "f ;- g = (_ \ f ; g)" - -lemma bind_assoc'[simp]: "((m;- k);- h ) = (m;- (k;- h))" -by(simp add:bind_SE'_def) - - -lemma bind_left_unit' [simp]: "((return c);- P) = P" - by (simp add: bind_SE'_def) - - -lemma bind_left_fail_SE'[simp]: "(fail\<^sub>S\<^sub>E;- P) = fail\<^sub>S\<^sub>E" - by (simp add: bind_SE'_def) - -lemma bind_right_unit'[simp]: "(m;- (return ())) = m" - by (simp add: bind_SE'_def) - -text{* The bind-operator in the state-exception monad yields already - a semantics for the concept of an input sequence on the meta-level: *} -lemma syntax_test: "(o1 \ f1 ; o2 \ f2; return (post o1 o2)) = X" -oops - -definition if_SE :: "['\ \ bool, ('\, '\)MON\<^sub>S\<^sub>E, ('\, '\)MON\<^sub>S\<^sub>E] \ ('\, '\)MON\<^sub>S\<^sub>E" -where "if_SE c E F = (\\. if c \ then E \ else F \)" - -syntax (xsymbols) - "_if_SE" :: "['\ \ bool,('o,'\)MON\<^sub>S\<^sub>E,('o','\)MON\<^sub>S\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>E" - ("(if\<^sub>S\<^sub>E _ then _ else _fi)" [5,8,8]8) -translations - "(if\<^sub>S\<^sub>E cond then T1 else T2 fi)" == "CONST if_SE cond T1 T2" - - -subsubsection{* Theory of a Monadic While *} - -text{* Prerequisites *} -fun replicator :: "[('a, '\)MON\<^sub>S\<^sub>E, nat] \ (unit, '\)MON\<^sub>S\<^sub>E" (infixr "^^^" 60) -where "f ^^^ 0 = (return ())" - | "f ^^^ (Suc n) = (f ;- f ^^^ n)" - - -fun replicator2 :: "[('a, '\)MON\<^sub>S\<^sub>E, nat, ('b, '\)MON\<^sub>S\<^sub>E] \ ('b, '\)MON\<^sub>S\<^sub>E" (infixr "^:^" 60) -where "(f ^:^ 0) M = (M )" - | "(f ^:^ (Suc n)) M = (f ;- ((f ^:^ n) M))" - - -text{* First Step : Establishing an embedding between partial functions and relations *} -(* plongement *) -definition Mon2Rel :: "(unit, '\)MON\<^sub>S\<^sub>E \ ('\ \ '\) set" -where "Mon2Rel f = {(x, y). (f x = Some((), y))}" -(* ressortir *) -definition Rel2Mon :: " ('\ \ '\) set \ (unit, '\)MON\<^sub>S\<^sub>E " -where "Rel2Mon S = (\ \. if \\'. (\, \') \ S then Some((), SOME \'. (\, \') \ S) else None)" - -lemma Mon2Rel_Rel2Mon_id: assumes det:"single_valued R" shows "(Mon2Rel \ Rel2Mon) R = R" -apply (simp add: comp_def Mon2Rel_def Rel2Mon_def,auto) -apply (case_tac "\\'. (a, \') \ R", auto) -apply (subst (2) some_eq_ex) -using det[simplified single_valued_def] by auto - - -lemma Rel2Mon_Id: "(Rel2Mon \ Mon2Rel) x = x" -apply (rule ext) -apply (auto simp: comp_def Mon2Rel_def Rel2Mon_def) -apply (erule contrapos_pp, drule HOL.not_sym, simp) -done - -lemma single_valued_Mon2Rel: "single_valued (Mon2Rel B)" -by (auto simp: single_valued_def Mon2Rel_def) - -text{* Second Step : Proving an induction principle allowing to establish that lfp remains - deterministic *} - - -(* A little complete partial order theory due to Tobias Nipkow *) -definition chain :: "(nat \ 'a set) \ bool" -where "chain S = (\i. S i \ S(Suc i))" - -lemma chain_total: "chain S ==> S i \ S j \ S j \ S i" -by (metis chain_def le_cases lift_Suc_mono_le) - -definition cont :: "('a set => 'b set) => bool" -where "cont f = (\S. chain S \ f(UN n. S n) = (UN n. f(S n)))" - -lemma mono_if_cont: fixes f :: "'a set \ 'b set" - assumes "cont f" shows "mono f" -proof - fix a b :: "'a set" assume "a \ b" - let ?S = "\n::nat. if n=0 then a else b" - have "chain ?S" using `a \ b` by(auto simp: chain_def) - hence "f(UN n. ?S n) = (UN n. f(?S n))" - using assms by(simp add: cont_def) - moreover have "(UN n. ?S n) = b" using `a \ b` by (auto split: if_splits) - moreover have "(UN n. f(?S n)) = f a \ f b" by (auto split: if_splits) - ultimately show "f a \ f b" by (metis Un_upper1) -qed - -lemma chain_iterates: fixes f :: "'a set \ 'a set" - assumes "mono f" shows "chain(\n. (f^^n) {})" -proof- - { fix n have "(f ^^ n) {} \ (f ^^ Suc n) {}" using assms - by(induction n) (auto simp: mono_def) } - thus ?thesis by(auto simp: chain_def) -qed - -theorem lfp_if_cont: - assumes "cont f" shows "lfp f = (\n. (f ^^ n) {})" (is "_ = ?U") -proof - show "lfp f \ ?U" - proof (rule lfp_lowerbound) - have "f ?U = (UN n. (f^^Suc n){})" - using chain_iterates[OF mono_if_cont[OF assms]] assms - by(simp add: cont_def) - also have "\ = (f^^0){} \ \" by simp - also have "\ = ?U" - apply(auto simp del: funpow.simps) - by (metis empty_iff funpow_0 old.nat.exhaust) - finally show "f ?U \ ?U" by simp - qed -next - { fix n p assume "f p \ p" - have "(f^^n){} \ p" - proof(induction n) - case 0 show ?case by simp - next - case Suc - from monoD[OF mono_if_cont[OF assms] Suc] `f p \ p` - show ?case by simp - qed - } - thus "?U \ lfp f" by(auto simp: lfp_def) -qed - - -lemma single_valued_UN_chain: - assumes "chain S" "(!!n. single_valued (S n))" - shows "single_valued(UN n. S n)" -proof(auto simp: single_valued_def) - fix m n x y z assume "(x, y) \ S m" "(x, z) \ S n" - with chain_total[OF assms(1), of m n] assms(2) - show "y = z" by (auto simp: single_valued_def) -qed - -lemma single_valued_lfp: -fixes f :: "('a \ 'a) set \ ('a \ 'a) set" -assumes "cont f" "\r. single_valued r \ single_valued (f r)" -shows "single_valued(lfp f)" -unfolding lfp_if_cont[OF assms(1)] -proof(rule single_valued_UN_chain[OF chain_iterates[OF mono_if_cont[OF assms(1)]]]) - fix n show "single_valued ((f ^^ n) {})" - by(induction n)(auto simp: assms(2)) -qed - - -text{* Third Step: Definition of the Monadic While *} -definition \ :: "['\ \ bool,('\ \ '\) set] \ (('\ \ '\) set \ ('\ \ '\) set)" -where "\ b cd = (\cw. {(s,t). if b s then (s, t) \ cd O cw else s = t})" - - -definition while_SE :: "['\ \ bool, (unit, '\)MON\<^sub>S\<^sub>E] \ (unit, '\)MON\<^sub>S\<^sub>E" -where "while_SE c B \ (Rel2Mon(lfp(\ c (Mon2Rel B))))" - -syntax (xsymbols) - "_while_SE" :: "['\ \ bool, (unit, '\)MON\<^sub>S\<^sub>E] \ (unit, '\)MON\<^sub>S\<^sub>E" - ("(while\<^sub>S\<^sub>E _ do _ od)" [8,8]8) -translations - "while\<^sub>S\<^sub>E c do b od" == "CONST while_SE c b" - -lemma cont_\: "cont (\ c b)" -by (auto simp: cont_def \_def) - -text{* The fixpoint theory now allows us to establish that the lfp constructed over - @{term Mon2Rel} remains deterministic *} - -theorem single_valued_lfp_Mon2Rel: "single_valued (lfp(\ c (Mon2Rel B)))" -apply(rule single_valued_lfp, simp_all add: cont_\) -apply(auto simp: \_def single_valued_def) -apply(metis single_valued_Mon2Rel[of "B"] single_valued_def) -done - - -lemma Rel2Mon_if: - "Rel2Mon {(s, t). if b s then (s, t) \ Mon2Rel c O lfp (\ b (Mon2Rel c)) else s = t} \ = - (if b \ then Rel2Mon (Mon2Rel c O lfp (\ b (Mon2Rel c))) \ else Some ((), \))" -by (simp add: Rel2Mon_def) - -lemma Rel2Mon_homomorphism: - assumes determ_X: "single_valued X" and determ_Y: "single_valued Y" - shows "Rel2Mon (X O Y) = (Rel2Mon X) ;- (Rel2Mon Y)" -proof - - have relational_partial_next_in_O: "\x E F. (\y. (x, y) \ (E O F)) \ (\y. (x, y) \ E)" - by (auto) - have some_eq_intro: "\ X x y . single_valued X \ (x, y) \ X \ (SOME y. (x, y) \ X) = y" - by (auto simp: single_valued_def) - - show ?thesis -apply (simp add: Rel2Mon_def bind_SE'_def bind_SE_def) -apply (rule ext, rename_tac "\") -apply (case_tac " \ \'. (\, \') \ X O Y") -apply (simp only: HOL.if_True) -apply (frule relational_partial_next_in_O) -apply (auto simp: single_valued_relcomp some_eq_intro determ_X determ_Y relcomp.relcompI) -by blast -qed - - - -text{* Putting everything together, the theory of embedding and the invariance of - determinism of the while-body, gives us the usual unfold-theorem: *} -theorem while_SE_unfold: -"(while\<^sub>S\<^sub>E b do c od) = (if\<^sub>S\<^sub>E b then (c ;- (while\<^sub>S\<^sub>E b do c od)) else return () fi)" -apply (simp add: if_SE_def bind_SE'_def while_SE_def unit_SE_def) -apply (subst lfp_unfold [OF mono_if_cont, OF cont_\]) -apply (rule ext) -apply (subst \_def) -apply (auto simp: Rel2Mon_if Rel2Mon_homomorphism bind_SE'_def Rel2Mon_Id [simplified comp_def] - single_valued_Mon2Rel single_valued_lfp_Mon2Rel ) -done - - -subsection{* Chaining Executions : Definitions of Multi-bind Operators *} - -text{* In order to express execution sequences inside \HOL --- rather -than arguing over a certain pattern of terms on the meta-level --- and -in order to make our theory amenable to formal reasoning over execution sequences, -we represent them as lists of input and generalize the bind-operator -of the state-exception monad accordingly. The approach is straightforward, -but comes with a price: we have to encapsulate all input and output data -into one type, and restrict ourselves to a uniform step function. -Assume that we have a typed interface to a module with -the operations $op_1$, $op_2$, \ldots, $op_n$ with the inputs -$\iota_1$, $\iota_2$, \ldots, $\iota_n$ (outputs are treated analogously). -Then we can encode for this interface the general input - type: -\begin{displaymath} -\texttt{datatype}\ \texttt{in}\ =\ op_1\ ::\ \iota_1\ |\ ...\ |\ \iota_n -\end{displaymath} -Obviously, we loose some type-safety in this approach; we have to express -that in traces only \emph{corresponding} input and output belonging to the -same operation will occur; this form of side-conditions have to be expressed -inside \HOL. From the user perspective, this will not make much difference, -since junk-data resulting from too weak typing can be ruled out by adopted -front-ends. -*} - -text{* Note that the subsequent notion of a test-sequence allows the io stepping -function (and the special case of a program under test) to stop execution -\emph{within} the sequence; such premature terminations are characterized by an -output list which is shorter than the input list. - -Intuitively, @{text mbind} corresponds to a sequence of operation calls, separated by -";", in Java. The operation calls may fail (raising an exception), which means that -the state is maintained and the exception can still be caught at the end of the -execution sequence. - -*} - -fun mbind :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" -where "mbind [] iostep \ = Some([], \)" - | "mbind (a#S) iostep \ = - (case iostep a \ of - None \ Some([], \) - | Some (out, \') \ (case mbind S iostep \' of - None \ Some([out],\') - | Some(outs,\'') \ Some(out#outs,\'')))" - -notation mbind ("mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e") (* future name: mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e *) - -text{* This definition is fail-safe; in case of an exception, the current state is maintained, - the computation as a whole is marked as success. - Compare to the fail-strict variant @{text "mbind'"}: *} - -lemma mbind_unit [simp]: - "mbind [] f = (return [])" - by(rule ext, simp add: unit_SE_def) - -text{* The characteristic property of @{term mbind} --- which distinguishes it from - @{text mbind} defined in the sequel --- is that it never fails; it ``swallows'' internal - errors occuring during the computation. *} -lemma mbind_nofailure [simp]: - "mbind S f \ \ None" - apply(rule_tac x=\ in spec) - apply(induct S, auto simp:unit_SE_def) - apply(case_tac "f a x", auto) - apply(erule_tac x="b" in allE) - apply(erule exE, erule exE, simp) - done - -text{* In contrast, we define a fail-strict sequential execution operator. -He has more the characteristic to fail globally whenever one of its operation -steps fails. - -Intuitively speaking, @{text mbind'} corresponds to an execution of operations -where a results in a System-Halt. Another interpretation of @{text mbind'} is to -view it as a kind of @{term foldl} foldl over lists via @{term bind\<^sub>S\<^sub>E}.*} - -fun mbind' :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" -where "mbind' [] iostep \ = Some([], \)" | - "mbind' (a#S) iostep \ = - (case iostep a \ of - None \ None - | Some (out, \') \ (case mbind' S iostep \' of - None \ None (* fail-strict *) - | Some(outs,\'') \ Some(out#outs,\'')))" -notation mbind' ("mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p") (* future name: mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p *) - -lemma mbind'_unit [simp]: - "mbind' [] f = (return [])" - by(rule ext, simp add: unit_SE_def) - -lemma mbind'_bind [simp]: - "(x \ mbind' (a#S) F; M x) = (a \ (F a); (x \ mbind' S F; M (a # x)))" - by(rule ext, rename_tac "z",simp add: bind_SE_def split: option.split) - -declare mbind'.simps[simp del] (* use only more abstract definitions *) - -text{* The next @{text mbind} sequential execution operator is called -Fail-Purge. He has more the characteristic to never fail, just "stuttering" -above operation steps that fail. Another alternative in modeling. *} - -fun mbind'' :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" -where "mbind'' [] iostep \ = Some([], \)" | - "mbind'' (a#S) iostep \ = - (case iostep a \ of - None \ mbind'' S iostep \ - | Some (out, \') \ (case mbind'' S iostep \' of - None \ None (* does not occur *) - | Some(outs,\'') \ Some(out#outs,\'')))" - -notation mbind'' ("mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e") (* future name: mbind\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e\<^sub>F\<^sub>a\<^sub>i\<^sub>l *) -declare mbind''.simps[simp del] (* use only more abstract definitions *) - - -text{* mbind' as failure strict operator can be seen as a foldr on bind - - if the types would match \ldots *} - -subsubsection{* Definition : Miscellaneous Operators and their Properties *} - -definition try_SE :: "('o,'\) MON\<^sub>S\<^sub>E \ ('o option,'\) MON\<^sub>S\<^sub>E" ("try\<^sub>S\<^sub>E") -where "try\<^sub>S\<^sub>E ioprog = (\\. case ioprog \ of - None \ Some(None, \) - | Some(outs, \') \ Some(Some outs, \'))" -text{* In contrast, mbind as a failure safe operator can roughly be seen - as a foldr on bind - try: - m1 ; try m2 ; try m3; ... Note, that the rough equivalence only holds for - certain predicates in the sequence - length equivalence modulo None, - for example. However, if a conditional is added, the equivalence - can be made precise: *} - -lemma mbind_try: - "(x \ mbind (a#S) F; M x) = - (a' \ try\<^sub>S\<^sub>E(F a); - if a' = None - then (M []) - else (x \ mbind S F; M (the a' # x)))" -apply(rule ext) -apply(simp add: bind_SE_def try_SE_def) -apply(case_tac "F a x", auto) -apply(simp add: bind_SE_def try_SE_def) -apply(case_tac "mbind S F b", auto) -done - -text{* On this basis, a symbolic evaluation scheme can be established - that reduces mbind-code to try\_SE\_code and ite-cascades. *} - -definition alt_SE :: "[('o, '\)MON\<^sub>S\<^sub>E, ('o, '\)MON\<^sub>S\<^sub>E] \ ('o, '\)MON\<^sub>S\<^sub>E" (infixl "\\<^sub>S\<^sub>E" 10) -where "(f \\<^sub>S\<^sub>E g) = (\ \. case f \ of None \ g \ - | Some H \ Some H)" - -definition malt_SE :: "('o, '\)MON\<^sub>S\<^sub>E list \ ('o, '\)MON\<^sub>S\<^sub>E" -where "malt_SE S = foldr alt_SE S fail\<^sub>S\<^sub>E" -notation malt_SE ("\\<^sub>S\<^sub>E") - -lemma malt_SE_mt [simp]: "\\<^sub>S\<^sub>E [] = fail\<^sub>S\<^sub>E" -by(simp add: malt_SE_def) - -lemma malt_SE_cons [simp]: "\\<^sub>S\<^sub>E (a # S) = (a \\<^sub>S\<^sub>E (\\<^sub>S\<^sub>E S))" -by(simp add: malt_SE_def) - - - - -subsection{* Definition and Properties of Valid Execution Sequences*} - -text{* A key-notion in our framework is the \emph{valid} execution -sequence, \ie{} a sequence that: -\begin{enumerate} -\item terminates (not obvious since while), -\item results in a final @{term True}, -\item does not fail globally (but recall the FailSave and FailPurge - variants of @{term mbind}-operators, that handle local exceptions in - one or another way). -\end{enumerate} -Seen from an automata perspective (where the monad - operations correspond to -the step function), valid execution sequences can be used to model ``feasible paths'' -across an automaton. *} - -definition valid_SE :: "'\ \ (bool,'\) MON\<^sub>S\<^sub>E \ bool" (infix "\" 15) -where "(\ \ m) = (m \ \ None \ fst(the (m \)))" -text{* This notation consideres failures as valid -- a definition -inspired by I/O conformance. *} - -subsubsection{* Valid Execution Sequences and their Symbolic Execution *} -lemma exec_unit_SE [simp]: "(\ \ (return P)) = (P)" -by(auto simp: valid_SE_def unit_SE_def) - -lemma exec_unit_SE' [simp]: "(\\<^sub>0 \ (\\. Some (f \, \))) = (f \\<^sub>0)" -by(simp add: valid_SE_def ) - -lemma exec_fail_SE [simp]: "(\ \ fail\<^sub>S\<^sub>E) = False" -by(auto simp: valid_SE_def fail_SE_def) - -lemma exec_fail_SE'[simp]: "\(\\<^sub>0 \ (\\. None))" -by(simp add: valid_SE_def ) - -text{* The following the rules are in a sense the heart of the entire symbolic execution approach *} -lemma exec_bind_SE_failure: -"A \ = None \ \(\ \ ((s \ A ; M s)))" -by(simp add: valid_SE_def unit_SE_def bind_SE_def) - -lemma exec_bind_SE_success: -"A \ = Some(b,\') \ (\ \ ((s \ A ; M s))) = (\' \ (M b))" -by(simp add: valid_SE_def unit_SE_def bind_SE_def ) - -lemma exec_bind_SE_success': (* atomic boolean Monad "Query Functions" *) -"M \ = Some(f \,\) \ (\ \ M) = f \" -by(simp add: valid_SE_def unit_SE_def bind_SE_def ) - - - - -lemma exec_bind_SE_success'': -"\ \ ((s \ A ; M s)) \ \ v \'. the(A \) = (v,\') \ \' \ (M v)" -apply(auto simp: valid_SE_def unit_SE_def bind_SE_def) -apply(cases "A \", simp_all) -apply(drule_tac x="A \" and f=the in arg_cong, simp) -apply(rule_tac x="fst aa" in exI) -apply(rule_tac x="snd aa" in exI, auto) -done - - -lemma exec_bind_SE_success''': -"\ \ ((s \ A ; M s)) \ \ a. (A \) = Some a \ (snd a) \ (M (fst a))" -apply(auto simp: valid_SE_def unit_SE_def bind_SE_def) -apply(cases "A \", simp_all) -apply(drule_tac x="A \" and f=the in arg_cong, simp) -apply(rule_tac x="fst aa" in exI) -apply(rule_tac x="snd aa" in exI, auto) -done - - -lemma exec_bind_SE_success'''' : -"\ \ ((s \ A ; M s)) \ \ v \'. A \ = Some(v,\') \ \' \ (M v)" -apply(auto simp: valid_SE_def unit_SE_def bind_SE_def) -apply(cases "A \", simp_all) -apply(drule_tac x="A \" and f=the in arg_cong, simp) -apply(rule_tac x="fst aa" in exI) -apply(rule_tac x="snd aa" in exI, auto) -done - - - - -text{* Recall \verb+mbind_unit+ for the base case. *} - -lemma valid_mbind_mt : "(\ \ ( s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e [] f; unit\<^sub>S\<^sub>E (P s))) = P [] " by simp -lemma valid_mbind_mtE: "\ \ ( s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e [] f; unit\<^sub>S\<^sub>E (P s)) \ (P [] \ Q) \ Q" -by(auto simp: valid_mbind_mt) - -lemma valid_mbind'_mt : "(\ \ ( s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p [] f; unit\<^sub>S\<^sub>E (P s))) = P [] " by simp -lemma valid_mbind'_mtE: "\ \ ( s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p [] f; unit\<^sub>S\<^sub>E (P s)) \ (P [] \ Q) \ Q" -by(auto simp: valid_mbind'_mt) - -lemma valid_mbind''_mt : "(\ \ ( s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e [] f; unit\<^sub>S\<^sub>E (P s))) = P [] " -by(simp add: mbind''.simps valid_SE_def bind_SE_def unit_SE_def) -lemma valid_mbind''_mtE: "\ \ ( s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e [] f; unit\<^sub>S\<^sub>E (P s)) \ (P [] \ Q) \ Q" -by(auto simp: valid_mbind''_mt) - - -lemma exec_mbindFSave_failure: -"ioprog a \ = None \ - (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; M s)) = (\ \ (M []))" -by(simp add: valid_SE_def unit_SE_def bind_SE_def) - -lemma exec_mbindFStop_failure: -"ioprog a \ = None \ - (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (a#S) ioprog ; M s)) = (False)" -by(simp add: exec_bind_SE_failure) - -lemma exec_mbindFPurge_failure: -"ioprog a \ = None \ - (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (a#S) ioprog ; M s)) = (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (S) ioprog ; M s))" -by(simp add: valid_SE_def unit_SE_def bind_SE_def mbind''.simps) - - -lemma exec_mbindFSave_success : -"ioprog a \ = Some(b,\') \ - (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; M s)) = - (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog ; M (b#s)))" -unfolding valid_SE_def unit_SE_def bind_SE_def -by(cases "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog \'", auto) - -lemma exec_mbindFStop_success : -"ioprog a \ = Some(b,\') \ - (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (a#S) ioprog ; M s)) = - (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S ioprog ; M (b#s)))" -unfolding valid_SE_def unit_SE_def bind_SE_def -by(cases "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S ioprog \'", auto simp: mbind'.simps) - -lemma exec_mbindFPurge_success : -"ioprog a \ = Some(b,\') \ - (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (a#S) ioprog ; M s)) = - (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog ; M (b#s)))" -unfolding valid_SE_def unit_SE_def bind_SE_def -by(cases "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog \'", auto simp: mbind''.simps) - -lemma exec_mbindFSave: -"(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; return (P s))) = - (case ioprog a \ of - None \ (\ \ (return (P []))) - | Some(b,\') \ (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog ; return (P (b#s)))))" -apply(case_tac "ioprog a \") -apply(auto simp: exec_mbindFSave_failure exec_mbindFSave_success split: prod.splits) -done - -lemma mbind_eq_sexec: -assumes * : "\b \'. f a \ = Some(b,\') \ - (os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S f; P (b#os)) = (os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S f; P' (b#os))" -shows "( a \ f a; x \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S f; P (a # x)) \ = - ( a \ f a; x \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S f; P'(a # x)) \" - apply(cases "f a \ = None") - apply(subst bind_SE_def, simp) - apply(subst bind_SE_def, simp) - apply auto - apply(subst bind_SE_def, simp) - apply(subst bind_SE_def, simp) -apply(simp add: *) -done - - -lemma mbind_eq_sexec': -assumes * : "\b \'. f a \ = Some(b,\') \ - (P (b))\' = (P' (b))\'" -shows "( a \ f a; P (a)) \ = - ( a \ f a; P'(a)) \" - apply(cases "f a \ = None") - apply(subst bind_SE_def, simp) - apply(subst bind_SE_def, simp) - apply auto - apply(subst bind_SE_def, simp) - apply(subst bind_SE_def, simp) - apply(simp add: *) -done - -lemma mbind'_concat: -"(os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (S@T) f; P os) = (os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S f; os' \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p T f; P (os @ os'))" -proof (rule ext, rename_tac "\", induct S arbitrary: \ P) - case Nil show ?case by simp -next - case (Cons a S) show ?case - apply(insert Cons.hyps, simp) - by(rule mbind_eq_sexec',simp) -qed - -lemma assert_suffix_inv : - "\ \ ( _ \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p xs istep; assert\<^sub>S\<^sub>E (P)) - \ \\. P \ \ (\ \ (_ \ istep x; assert\<^sub>S\<^sub>E (P))) - \ \ \ ( _ \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (xs @ [x]) istep; assert\<^sub>S\<^sub>E (P))" -apply(subst mbind'_concat, simp) -unfolding bind_SE_def assert_SE_def valid_SE_def -apply(auto split: option.split option.split_asm) -apply(case_tac "aa",simp_all) -apply(case_tac "P bb",simp_all) -apply (metis option.distinct(1)) -apply(case_tac "aa",simp_all) -apply(case_tac "P bb",simp_all) -by (metis option.distinct(1)) - - - -text{* Universal splitting and symbolic execution rule *} -lemma exec_mbindFSave_E: -assumes seq : "(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; (P s)))" - and none: "ioprog a \ = None \ (\ \ (P [])) \ Q" - and some: "\ b \'. ioprog a \ = Some(b,\') \ (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog;(P (b#s)))) \ Q " -shows "Q" -using seq -proof(cases "ioprog a \") - case None assume ass:"ioprog a \ = None" show "Q" - apply(rule none[OF ass]) - apply(insert ass, erule_tac ioprog1=ioprog in exec_mbindFSave_failure[THEN iffD1],rule seq) - done -next - case (Some aa) assume ass:"ioprog a \ = Some aa" show "Q" - apply(insert ass,cases "aa",simp, rename_tac "out" "\'") - apply(erule some) - apply(insert ass,simp) - apply(erule_tac ioprog1=ioprog in exec_mbindFSave_success[THEN iffD1],rule seq) - done -qed - -text{* The next rule reveals the particular interest in deduction; - as an elimination rule, it allows for a linear conversion of a validity judgement - @{term "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p"} over an input list @{term "S"} into a constraint system; without any - branching ... Symbolic execution can even be stopped tactically whenever - @{term "ioprog a \ = Some(b,\')"} comes to a contradiction. *} -lemma exec_mbindFStop_E: -assumes seq : "(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (a#S) ioprog ; (P s)))" - and some: "\b \'. ioprog a \ = Some(b,\') \ (\'\ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S ioprog;(P(b#s)))) \ Q" -shows "Q" -using seq -proof(cases "ioprog a \") - case None assume ass:"ioprog a \ = None" show "Q" - apply(insert ass seq) - apply(drule_tac \=\ and S=S and M=P in exec_mbindFStop_failure, simp) - done -next - case (Some aa) assume ass:"ioprog a \ = Some aa" show "Q" - apply(insert ass,cases "aa",simp, rename_tac "out" "\'") - apply(erule some) - apply(insert ass,simp) - apply(erule_tac ioprog1=ioprog in exec_mbindFStop_success[THEN iffD1],rule seq) - done -qed - - -lemma exec_mbindFPurge_E: -assumes seq : "(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (a#S) ioprog ; (P s)))" - and none: "ioprog a \ = None \ (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog;(P (s)))) \ Q" - and some: "\ b \'. ioprog a \ = Some(b,\') \ (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog;(P (b#s)))) \ Q " -shows "Q" -using seq -proof(cases "ioprog a \") - case None assume ass:"ioprog a \ = None" show "Q" - apply(rule none[OF ass]) - apply(insert ass, erule_tac ioprog1=ioprog in exec_mbindFPurge_failure[THEN iffD1],rule seq) - done -next - case (Some aa) assume ass:"ioprog a \ = Some aa" show "Q" - apply(insert ass,cases "aa",simp, rename_tac "out" "\'") - apply(erule some) - apply(insert ass,simp) - apply(erule_tac ioprog1=ioprog in exec_mbindFPurge_success[THEN iffD1],rule seq) - done -qed - - -lemma assert_disch1 :" P \ \ (\ \ (x \ assert\<^sub>S\<^sub>E P; M x)) = (\ \ (M True))" -by(auto simp: bind_SE_def assert_SE_def valid_SE_def) - -lemma assert_disch2 :" \ P \ \ \ (\ \ (x \ assert\<^sub>S\<^sub>E P ; M s))" -by(auto simp: bind_SE_def assert_SE_def valid_SE_def) - -lemma assert_disch3 :" \ P \ \ \ (\ \ (assert\<^sub>S\<^sub>E P))" -by(auto simp: bind_SE_def assert_SE_def valid_SE_def) - -lemma assert_disch4 :" P \ \ (\ \ (assert\<^sub>S\<^sub>E P))" -by(auto simp: bind_SE_def assert_SE_def valid_SE_def) - -lemma assert_simp : "(\ \ assert\<^sub>S\<^sub>E P) = P \" -by (meson assert_disch3 assert_disch4) - -lemmas assert_D = assert_simp[THEN iffD1] (* legacy *) - -lemma assert_bind_simp : "(\ \ (x \ assert\<^sub>S\<^sub>E P; M x)) = (P \ \ (\ \ (M True)))" -by(auto simp: bind_SE_def assert_SE_def valid_SE_def split: HOL.if_split_asm) - -lemmas assert_bindD = assert_bind_simp[THEN iffD1] (* legacy *) - - -lemma assume_D : "(\ \ (_ \ assume\<^sub>S\<^sub>E P; M)) \ \ \. (P \ \ (\ \ M) )" -apply(auto simp: bind_SE_def assume_SE_def valid_SE_def split: HOL.if_split_asm) -apply(rule_tac x="Eps P" in exI, auto) -apply(subst Hilbert_Choice.someI,assumption,simp) -done - - -lemma assume_E : -assumes * : "\ \ ( _ \ assume\<^sub>S\<^sub>E P; M) " -and ** : "\ \. P \ \ \ \ M \ Q" -shows "Q" -apply(insert *) -by(insert *[THEN assume_D], auto intro: **) - -lemma assume_E' : -assumes * : "\ \ assume\<^sub>S\<^sub>E P ;- M" -and ** : "\ \. P \ \ \ \ M \ Q" -shows "Q" -by(insert *[simplified "bind_SE'_def", THEN assume_D], auto intro: **) - - -text{* These two rule prove that the SE Monad in connection with the notion of valid sequence -is actually sufficient for a representation of a Boogie-like language. The SBE monad with explicit -sets of states --- to be shown below --- is strictly speaking not necessary (and will therefore -be discontinued in the development). *} - -term "if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi" - -lemma if_SE_D1 : "P \ \ (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = (\ \ B\<^sub>1)" -by(auto simp: if_SE_def valid_SE_def) - -lemma if_SE_D1' : "P \ \ (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi);-M) = (\ \ (B\<^sub>1;-M))" -by(auto simp: if_SE_def valid_SE_def bind_SE'_def bind_SE_def) - - -lemma if_SE_D2 : "\ P \ \ (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = (\ \ B\<^sub>2)" -by(auto simp: if_SE_def valid_SE_def) - -lemma if_SE_D2' : "\ P \ \ (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi);-M) = (\ \ B\<^sub>2;-M)" -by(auto simp: if_SE_def valid_SE_def bind_SE'_def bind_SE_def) - - -lemma if_SE_split_asm : -"(\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = ((P \ \ (\ \ B\<^sub>1)) \ (\ P \ \ (\ \ B\<^sub>2)))" -by(cases "P \",auto simp: if_SE_D1 if_SE_D2) - -lemma if_SE_split_asm': -"(\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi);-M) = ((P \ \ (\ \ B\<^sub>1;-M)) \ (\ P \ \ (\ \ B\<^sub>2;-M)))" -by(cases "P \",auto simp: if_SE_D1' if_SE_D2') - - -lemma if_SE_split: -"(\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = ((P \ \ (\ \ B\<^sub>1)) \ (\ P \ \ (\ \ B\<^sub>2)))" -by(cases "P \", auto simp: if_SE_D1 if_SE_D2) - - -lemma if_SE_split': -"(\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi);-M) = ((P \ \ (\ \ B\<^sub>1;-M)) \ (\ P \ \ (\ \ B\<^sub>2;-M)))" -by(cases "P \", auto simp: if_SE_D1' if_SE_D2') - -lemma if_SE_execE: - assumes A: "\ \ ((if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi))" - and B: "P \ \ \ \ (B\<^sub>1) \ Q" - and C: "\ P \\ \ \ (B\<^sub>2) \ Q" - shows "Q" -by(insert A [simplified if_SE_split],cases "P \", simp_all, auto elim: B C) - - -lemma if_SE_execE': - assumes A: "\ \ ((if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi);-M)" - and B: "P \ \ \ \ (B\<^sub>1;-M) \ Q" - and C: "\ P \\ \ \ (B\<^sub>2;-M) \ Q" - shows "Q" -by(insert A [simplified if_SE_split'],cases "P \", simp_all, auto elim: B C) - - -lemma exec_while : -"(\ \ ((while\<^sub>S\<^sub>E b do c od) ;- M)) = - (\ \ ((if\<^sub>S\<^sub>E b then c ;- (while\<^sub>S\<^sub>E b do c od) else unit\<^sub>S\<^sub>E ()fi) ;- M))" -apply(subst while_SE_unfold) -by(simp add: bind_SE'_def ) - -lemmas exec_whileD = exec_while[THEN iffD1] - -lemma if_SE_execE'': -"\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi) ;- M -\ (P \ \ \ \ B\<^sub>1 ;- M \ Q) -\ (\ P \ \ \ \ B\<^sub>2 ;- M \ Q) -\ Q" -by(auto elim: if_SE_execE') - - -lemma [code]: - "(\ \ m) = (case (m \) of None \ False | (Some (x,y)) \ x)" - apply(simp add: valid_SE_def) - apply(cases "m \ = None", simp_all) - apply(insert not_None_eq, auto) -done - - -text{* Test-Refinements will be stated in terms of the failsave @{term mbind}, opting - more generality. The following lemma allows for an optimization both in - test execution as well as in symbolic execution for an important special case of - the post-codition: Whenever the latter has the constraint that the length of input and - output sequence equal each other (that is to say: no failure occured), failsave mbind - can be reduced to failstop mbind ... *} -lemma mbindFSave_vs_mbindFStop : - "(\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s ioprog); return(length \s = length os \ P \s os))) = - (\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s ioprog); return(P \s os)))" - apply(rule_tac x=P in spec) - apply(rule_tac x=\ in spec) - proof(induct "\s") - case Nil show ?case by(simp_all add: mbind_try try_SE_def del: Monads.mbind.simps) - case (Cons a \s) show ?case - apply(rule allI, rename_tac "\",rule allI, rename_tac "P") - apply(insert Cons.hyps) - apply(case_tac "ioprog a \") - apply(simp only: exec_mbindFSave_failure exec_mbindFStop_failure, simp) - apply(simp add: split_paired_all del: Monads.mbind.simps ) - apply(rename_tac "\'") - apply(subst exec_mbindFSave_success, assumption) - apply(subst (2) exec_bind_SE_success, assumption) - apply(erule_tac x="\'" in allE) - apply(erule_tac x="\\s s. P (a # \s) (aa # s)" in allE) (* heureka ! *) - apply(simp) - done - qed - - -lemma mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e_vs_mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p: -assumes A: "\ \ \. ioprog \ \ \ None" -shows "(\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s ioprog); P os)) = - (\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s ioprog); P os))" -proof(induct "\s") - case Nil show ?case by simp -next - case (Cons a \s) - from Cons.hyps - have B:"\ S f \. mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S f \ \ None " by simp - have C:"\\. mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s ioprog \ = mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s ioprog \" - apply(induct \s, simp) - apply(rule allI,rename_tac "\") - apply(simp add: Monads.mbind'.simps(2)) - apply(insert A, erule_tac x="a" in allE) - apply(erule_tac x="\" and P="\\ . ioprog a \ \ None" in allE) - apply(auto split:option.split) - done - show ?case - apply(insert A,erule_tac x="a" in allE,erule_tac x="\" in allE) - apply(simp, elim exE) - apply(rename_tac "out" "\'") - apply(insert B, erule_tac x=\s in allE, erule_tac x=ioprog in allE, erule_tac x=\' in allE) - apply(subst(asm) not_None_eq, elim exE) - apply(subst Monads.exec_bind_SE_success) - apply(simp split: option.split, auto) - apply(rule_tac s="(\ a b c. a # (fst c)) out \' (aa, b)" in trans, simp,rule refl) - apply(rule_tac s="(\ a b c. (snd c)) out \' (aa, b)" in trans, simp,rule refl) - apply(simp_all) - apply(subst Monads.exec_bind_SE_success, assumption) - apply(subst Monads.exec_bind_SE_success) - apply(rule_tac s="Some (aa, b)" in trans,simp_all add:C) - apply(subst(asm) Monads.exec_bind_SE_success, assumption) - apply(subst(asm) Monads.exec_bind_SE_success) - apply(rule_tac s="Some (aa, b)" in trans,simp_all add:C) - done -qed - - -section{* Valid Test Sequences in the State Exception Backtrack Monad *} -text{* This is still an unstructured merge of executable monad concepts -and specification oriented high-level properties initiating test procedures. *} - - - -section{* Hoare *} -(* STILL VERY EXPERIMENTAL STUFF *) - -definition hoare\<^sub>1 :: "('\ \ bool) \ ('\, '\)MON\<^sub>S\<^sub>E \ ('\ \ '\ \ bool) \ bool" ("\\<^sub>1 ({(1_)}/ (_)/ {(1_)})" 50) -where "(\\<^sub>1{P} M {Q} ) = (\\. \ \ (_ \ assume\<^sub>S\<^sub>E P ; x \ M; assert\<^sub>S\<^sub>E (Q x)))" - - -definition hoare\<^sub>2 :: "('\ \ bool) \ ('\, '\)MON\<^sub>S\<^sub>E \ ('\ \ '\ \ bool) \ bool" ("\\<^sub>2 ({(1_)}/ (_)/ {(1_)})" 50) -where "(\\<^sub>2{P} M {Q} ) = (\\. P \ \ (\ \ (x \ M; assert\<^sub>S\<^sub>E (Q x))))" - - - -find_theorems "assert\<^sub>S\<^sub>E" - -find_theorems "assume\<^sub>S\<^sub>E" - -lemma "P \ \ (_ \ assume\<^sub>S\<^sub>E P ; x \ M; assert\<^sub>S\<^sub>E (\\. (x=X) \ Q x \))" -oops - -lemma "\\. \ X. \ \ (_ \ assume\<^sub>S\<^sub>E P ; x \ M; assert\<^sub>S\<^sub>E (\\. x=X \ Q x \))" -oops - - -lemma "\ X. \ \ (_ \ assume\<^sub>S\<^sub>E P ; x \ M; assert\<^sub>S\<^sub>E (\\. x=X \ Q x \)) - \ - \ \ (_ \ assume\<^sub>S\<^sub>E P ; x \ M; assert\<^sub>S\<^sub>E (\\. Q x \))" -unfolding valid_SE_def assume_SE_def assert_SE_def bind_SE_def -by(auto split: if_split HOL.if_split_asm Option.option.split Option.option.split_asm) - - -lemma monadic_sequence_rule: - "\ X \\<^sub>1. (\ \ (_ \ assume\<^sub>S\<^sub>E (\\'. (\=\') \ P \) ; x \ M; assert\<^sub>S\<^sub>E (\\. (x=X) \ (\=\\<^sub>1) \ Q x \))) - \ - (\\<^sub>1 \ (_ \ assume\<^sub>S\<^sub>E (\\. (\=\\<^sub>1) \ Q x \) ; y \ M'; assert\<^sub>S\<^sub>E (\\. R x y \))) - \ - \ \ (_ \ assume\<^sub>S\<^sub>E (\\'. (\=\') \ P \) ; x \ M; y \ M'; assert\<^sub>S\<^sub>E (R x y))" -apply(elim exE impE conjE) -apply(drule Monads.assume_D) -apply(elim exE impE conjE) -unfolding valid_SE_def assume_SE_def assert_SE_def bind_SE_def -apply(auto split: if_split HOL.if_split_asm Option.option.split Option.option.split_asm) -apply (metis (mono_tags, lifting) option.simps(3) someI_ex) -oops - - - -subsection{* Legacy Bindings *} - - -lemma valid_true[simp]: (* legacy: special case *) - "(\ \ (s \ return x ; return (P s))) = P x" by simp - - -(* -lemmas valid_both = exec_mbindFSave (* legacy *) -lemmas valid_success = exec_mbindFSave_success (* legacy *) -lemmas valid_success'' = exec_mbindFSave_success(* legacy *) -lemmas valid_success' = exec_bind_SE_success (* legacy *) -lemmas valid_failure = exec_mbindFSave_failure (* legacy : *) -lemmas valid_failure' = exec_bind_SE_failure (* legacy *) -lemmas valid_failure''=valid_failure (* legacy : *) -lemmas valid_failure''' = exec_mbindFStop_failure (* legacy : *) -lemmas valid_propagate_fail = exec_fail_SE (* legacy *) -lemmas valid_propagate_fail' = exec_fail_SE' (* legacy *) -lemmas valid_propoagate_3' = valid_propagate_fail' (* legacy *) -lemmas valid_propagate_2 = exec_bind_SE_success''(* legacy *) -lemmas valid_propagate_1 = exec_unit_SE (* legacy *) -lemmas valid_successElem = exec_bind_SE_success' (* legacy *) -lemmas valid_propagate_2' = exec_bind_SE_success'''(* legacy *) -lemmas valid_propagate_2'' = exec_bind_SE_success'''' (* legacy *) -lemmas valid_propoagate_3 = exec_unit_SE' (* legacy *) - *) -(* legacy ?: -lemma valid_success'': -"ioprog a \ = Some(b,\') \ - (\ \ (s \ mbind (a#S) ioprog ; return (P s))) = - (\' \ (s \ mbind S ioprog ; return (P (b#s))))" -unfolding valid_SE_def unit_SE_def bind_SE_def -by(cases "mbind S ioprog \'", auto) -*) - -subsection{* State Backtrack Monads *} -text{*This subsection is still rudimentary and as such an interesting -formal analogue to the previous monad definitions. It is doubtful that it is -interesting for testing and as a cmputational stucture at all. -Clearly more relevant is ``sequence'' instead of ``set,'' which would -rephrase Isabelle's internal tactic concept. *} - -type_synonym ('o, '\) MON\<^sub>S\<^sub>B = "'\ \ ('o \ '\) set" - -definition bind_SB :: "('o, '\)MON\<^sub>S\<^sub>B \ ('o \ ('o', '\)MON\<^sub>S\<^sub>B) \ ('o', '\)MON\<^sub>S\<^sub>B" -where "bind_SB f g \ = \ ((\(out, \). (g out \)) ` (f \))" -notation bind_SB ("bind\<^sub>S\<^sub>B") - -definition unit_SB :: "'o \ ('o, '\)MON\<^sub>S\<^sub>B" ("(returns _)" 8) -where "unit_SB e = (\\. {(e,\)})" -notation unit_SB ("unit\<^sub>S\<^sub>B") - -syntax (xsymbols) - "_bind_SB" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>B,('o','\)MON\<^sub>S\<^sub>B] \ ('o','\)MON\<^sub>S\<^sub>B" - ("(2 _ := _; _)" [5,8,8]8) -translations - "x := f; g" == "CONST bind_SB f (% x . g)" - - - -lemma bind_left_unit_SB : "(x := returns a; m x) = m a" - by (rule ext,simp add: unit_SB_def bind_SB_def) - -lemma bind_right_unit_SB: "(x := m; returns x) = m" - by (rule ext, simp add: unit_SB_def bind_SB_def) - - -lemma bind_assoc_SB: "(y := (x := m; k x); h y) = (x := m; (y := k x; h y))" - by (rule ext, simp add: unit_SB_def bind_SB_def split_def) - - - -subsection{* State Backtrack Exception Monad (vulgo: Boogie-PL) *} -text{* The following combination of the previous two Monad-Constructions -allows for the semantic foundation of a simple generic assertion language -in the style of Schirmers Simpl-Language or Rustan Leino's Boogie-PL language. -The key is to use the exceptional element None for violations of -the assert-statement. *} -type_synonym ('o, '\) MON\<^sub>S\<^sub>B\<^sub>E = "'\ \ (('o \ '\) set) option" - - -definition bind_SBE :: "('o,'\)MON\<^sub>S\<^sub>B\<^sub>E \ ('o \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E) \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E" -where "bind_SBE f g = (\\. case f \ of None \ None - | Some S \ (let S' = (\(out, \'). g out \') ` S - in if None \ S' then None - else Some(\ (the ` S'))))" - -syntax (xsymbols) - "_bind_SBE" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>B\<^sub>E,('o','\)MON\<^sub>S\<^sub>B\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E" - ("(2 _ :\ _; _)" [5,8,8]8) -translations - "x :\ f; g" == "CONST bind_SBE f (% x . g)" - -definition unit_SBE :: "'o \ ('o, '\)MON\<^sub>S\<^sub>B\<^sub>E" ("(returning _)" 8) -where "unit_SBE e = (\\. Some({(e,\)}))" - -definition assert_SBE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" -where "assert_SBE e = (\\. if e \ then Some({((),\)}) - else None)" -notation assert_SBE ("assert\<^sub>S\<^sub>B\<^sub>E") - -definition assume_SBE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" -where "assume_SBE e = (\\. if e \ then Some({((),\)}) - else Some {})" -notation assume_SBE ("assume\<^sub>S\<^sub>B\<^sub>E") - - -definition havoc_SBE :: " (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" -where "havoc_SBE = (\\. Some({x. True}))" -notation havoc_SBE ("havoc\<^sub>S\<^sub>B\<^sub>E") - - -lemma bind_left_unit_SBE : "(x :\ returning a; m x) = m a" - by (rule ext,simp add: unit_SBE_def bind_SBE_def) - -lemma bind_right_unit_SBE: "(x :\ m; returning x) = m" - apply (rule ext, simp add: unit_SBE_def bind_SBE_def) - apply (case_tac "m x", simp_all add:Let_def) - apply (rule HOL.ccontr, simp add: Set.image_iff) - done - - -lemmas aux = trans[OF HOL.neq_commute,OF Option.not_None_eq] - - -lemma bind_assoc_SBE: "(y :\ (x :\ m; k x); h y) = (x :\ m; (y :\ k x; h y))" -proof (rule ext, rename_tac z, simp add: unit_SBE_def bind_SBE_def, - case_tac "m z", simp_all add: Let_def Set.image_iff, safe, goal_cases) print_cases - case (1 z a aa b ab ba aaa baa) then show ?case - using case_prod_conv option.case_eq_if by force -next - case (2 z a aa b ab ba) then show ?case - apply(rule_tac x="(aa,b)" in bexI, simp_all add:split_def) - apply(erule_tac x="(aa,b)" in ballE) - apply(auto simp: aux image_def split_def intro!: rev_bexI) - done -next - case (3 z a aa b) then show ?case - by (metis (no_types, lifting) case_prod_conv option.case_eq_if) -next - case (4 z a aa b) then show ?case - apply(auto simp: aux image_def split_def intro!: rev_bexI) - apply (auto simp add: Let_def split: if_split_asm option.split_asm) - apply fastforce - done -next - case (5 z a aa b ab ba aaa baa) then show ?case - apply simp apply((erule_tac x="(ab,ba)" in ballE)+) - apply(simp_all add: aux Option.not_None_eq, (erule exE)+, simp add:split_def) - apply(erule rev_bexI,case_tac "None\(\p. h (fst p) (snd p))`y",auto simp:split_def) - done - -next - case (6 z a aa b aaa ba) then show ?case - apply simp apply((erule_tac x="(aaa,ba)" in ballE)+) - apply(simp_all add: aux, (erule exE)+, simp add:split_def) - apply(erule rev_bexI, case_tac "None\(\p. h(fst p)(snd p))`y",auto simp:split_def) - done -qed - - -interpretation SBE : Monad unit_SBE bind_SBE - by unfold_locales (simp_all add: bind_left_unit_SBE bind_right_unit_SBE bind_assoc_SBE) - - -definition valid_SBE :: "'\ \ ('a,'\) MON\<^sub>S\<^sub>B\<^sub>E \ bool" (infix "\\<^sub>S\<^sub>B\<^sub>E" 15) -where "\ \\<^sub>S\<^sub>B\<^sub>E m \ (m \ \ None)" -text{* This notation consideres all non-failures as valid. *} - - -lemma assume_assert: "(\ \\<^sub>S\<^sub>B\<^sub>E ( _ :\ assume\<^sub>S\<^sub>B\<^sub>E P ; assert\<^sub>S\<^sub>B\<^sub>E Q)) = (P \ \ Q \)" - by(simp add: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) - -lemma assert_intro: "Q \ \ \ \\<^sub>S\<^sub>B\<^sub>E (assert\<^sub>S\<^sub>B\<^sub>E Q)" - by(simp add: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) - -lemma assume_dest: - "\ \ \\<^sub>S\<^sub>B\<^sub>E (x :\ assume\<^sub>S\<^sub>B\<^sub>E Q; M x); Q \' \ \ \ \\<^sub>S\<^sub>B\<^sub>E M ()" - apply(auto simp: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) - apply(cases "Q \",simp_all) - oops - -text{* This still needs work. What would be needed is a kind - of wp - calculus that comes out of that. So far: nope. *} - -(* TODO: IF THEN ELSE and WHILE + Monadic Laws + Operational Rules. *) - - - - - -end diff --git a/src/test-gen/src/test/Observers.thy b/src/test-gen/src/test/Observers.thy deleted file mode 100644 index aa38341..0000000 --- a/src/test-gen/src/test/Observers.thy +++ /dev/null @@ -1,200 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * Observers.thy --- the base testing theory for reactive sequence testing. - * This file is part of HOL-TestGen. - * - * Copyright (c) 2005-2007, ETH Zurich, Switzerland - * 2009 B. Wolff, Univ. Paris-Sud, France - * 2009 Achim D. Brucker, Germany - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id:$ *) - -chapter {* Observers managing Abstract State*} - -theory Observers imports Monads -begin - - -section{* IO-stepping Function Transfomers *} - -text{* The following adaption combinator converts an input-output - program under test of type: $\iota \Rightarrow \sigma - \rightharpoonup o \times \sigma$ with program state $\sigma$ into a - state transition program that can be processed by mbind. The key - idea to turn mbind into a test-driver for a reactive system is by - providing an internal state $\sigma'$, managed by the test driver, - and external, problem-specific functions ``rebind'' and - ``substitute'' that operate on this internal state. For example, - this internal state can be instantiated with an environment $var - \rightharpoonup value$. The output (or parts of it) can then be - bound to vars in the environment. In contrast, substitute can then - explicit substitute variables occuring in value representations - into pure values, e.g. is can substitue $c~("X")$ into $c~3$ - provided the environment contained the map with $X \leadsto 3$. *} - -text{* The state of the test-driver consists of two parts: the state - of the observer (or: adaptor) $\sigma$ and the internal state $\sigma'$ of the - the step-function of the system under test $ioprog$ is allowed to use. *} - -definition observer :: "['\ \ 'o \ '\, '\ \ '\ \ '\, '\\'\' \ '\ \ 'o \ bool] - \ ('\ \ '\' \ 'o \'\') - \ ('\ \ ('\\'\' \ '\\'\'))" - -where "observer rebind substitute postcond ioprog = - (\ input. (\ (\, \'). let input'= substitute \ input in - case ioprog input' \' of - None \ None (* ioprog failure - eg. timeout ... *) - | Some (output, \''') \ let \'' = rebind \ output in - (if postcond (\'',\''') input' output - then Some(\'', \''') - else None (* postcond failure *) )))" - -text{* The subsequent $observer$ version is more powerful: it admits also preconditions -of $ioprog$, which make reference to the observer state $\sigma_{obs}$. The observer-state -may contain an environment binding values to explicit variables. In such a scenario, the -$precond\_solve$ may consist of a \emph{solver} that constructs a solution from -\begin{enumerate} -\item this environment, -\item the observable state of the $ioprog$, -\item the abstract input - (which may be related to a precondition which contains references to explicit - variables) -\end{enumerate} -such that all the explicit variables contained in the preconditions and the -explicit variables in the abstract input are substituted against values -that make the preconditions true. The values must be stored in the -environment and are reported in the observer-state $\sigma_{obs}$. -*} - -definition observer1 :: "['\_obs \ 'o_c \ '\_obs, - '\_obs \ '\ \ '\_a \ ('\_c \ '\_obs), - '\_obs \ '\ \ '\_c \ 'o_c \ bool] - \ ('\_c \ ('o_c, '\)MON\<^sub>S\<^sub>E) - \ ('\_a \ ('o_c, '\_obs \'\)MON\<^sub>S\<^sub>E) " - -where "observer1 rebind precond_solve postcond ioprog = - (\ in_a. (\ (\_obs, \). let (in_c,\_obs') = precond_solve \_obs \ in_a - in case ioprog in_c \ of - None \ None (* ioprog failure - eg. timeout ... *) - | Some (out_c, \') \(let \_obs'' = rebind \_obs' out_c - in if postcond \_obs'' \' in_c out_c - then Some(out_c, (\_obs', \')) - else None (* postcond failure *) )))" - - -definition observer2 :: "['\_obs \ 'o_c \ '\_obs, '\_obs \ '\_a \ '\_c, '\_obs \ '\ \ '\_c \ 'o_c \ bool] - \ ('\_c \ ('o_c, '\)MON\<^sub>S\<^sub>E) - \ ('\_a \ ('o_c, '\_obs \'\)MON\<^sub>S\<^sub>E) " - -where "observer2 rebind substitute postcond ioprog = - (\ in_a. (\ (\_obs, \). let in_c = substitute \_obs in_a - in case ioprog in_c \ of - None \ None (* ioprog failure - eg. timeout ... *) - | Some (out_c, \') \(let \_obs' = rebind \_obs out_c - in if postcond \_obs' \' in_c out_c - then Some(out_c, (\_obs', \')) - else None (* postcond failure *) )))" - -text{* Note that this version of the observer is just a -monad-transformer; it transforms the i/o stepping function $ioprog$ -into another stepping function, which is the combined sub-system -consisting of the observer and, for example, a program under test -$\PUT$. The observer takes the \emph{abstract} input $in_a$, -substitutes explicit variables in it by concrete values stored by its -own state $\sigma_{obs}$ and constructs \emph{concrete} input $in_c$, -runs $ioprog$ in this context, and evaluates the return: the concrete -output $out_c$ and the successor state $\sigma'$ are used to extract -from concrete output concrete values and stores them inside its own -successor state $\sigma_{obs}'$. Provided that a post-condition is -passed succesfully, the output and the combined successor-state is -reported as success. - -Note that we made the following testability assumptions: -\begin{enumerate} -\item $ioprog$ behaves wrt. to the reported state and input as a function, i.e. it behaves - deterministically, and -\item it is not necessary to destinguish internal failure and post-condition-failure. - (Modelling Bug? This is superfluous and blind featurism ... One could do this by - introducing an own "weakening"-monad endo-transformer.) -\end{enumerate} - -*} - -text{* observer2 can actually be decomposed into two combinators - one -dealing with the management of explicit variables and one that tackles -post-conditions. *} - -definition observer3 :: "['\_obs \ 'o \ '\_obs, '\_obs \ '\_a \ '\_c] - \ ('\_c \ ('o, '\)MON\<^sub>S\<^sub>E) - \ ('\_a \ ('o, '\_obs \'\)MON\<^sub>S\<^sub>E) " - -where "observer3 rebind substitute ioprog = - (\ in_a. (\ (\_obs, \). - let in_c = substitute \_obs in_a - in case ioprog in_c \ of - None \ None (* ioprog failure - eg. timeout ... *) - | Some (out_c, \') \(let \_obs' = rebind \_obs out_c - in Some(out_c, (\_obs', \')) )))" - - - -definition observer4 :: "['\ \ '\ \ 'o \ bool] - \ ('\ \ ('o, '\)MON\<^sub>S\<^sub>E) - \ ('\ \ ('o, '\)MON\<^sub>S\<^sub>E)" - -where "observer4 postcond ioprog = - (\ input. (\ \. case ioprog input \ of - None \ None (* ioprog failure - eg. timeout ... *) - | Some (output, \') \ (if postcond \' input output - then Some(output, \') - else None (* postcond failure *) )))" - -text{* The following lemma explains the relationsship between $observer2$ and the -decoposed versions $observer3$ and $observer4$. The full equality does not hold - -the reason is that the two kinds of preconditions are different in a subtle way: -the postcondition may make reference to the abstract state. (See our example -\verb+Sequence_test+ based on a symbolic environment in the observer state.) -If the postcondition does not do this, they are equivalent. *} - -find_theorems name:"prod" name:"case" name:"beta" -lemma observer_decompose: - " observer2 r s (\ x. pc) io = (observer3 r s (observer4 pc io))" - apply(rule ext, rule ext) - apply(auto simp: observer2_def observer3_def - observer4_def Let_def case_prod_beta) - apply(case_tac "io (s a x) b", auto) -done - -end diff --git a/src/test-gen/src/test/TestEnv.ML b/src/test-gen/src/test/TestEnv.ML deleted file mode 100644 index ce89494..0000000 --- a/src/test-gen/src/test/TestEnv.ML +++ /dev/null @@ -1,458 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * TestEnv.ML --- environment for configuration parameters - * This file is part of HOL-TestGen. - * - * Copyright (c) 2005-2007 ETH Zurich, Switzerland - * 2008-2013 Achim D. Brucker, Germany - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) - -signature TESTENV = -sig - type test_derivation_rule = Proof.context -> int -> thm -> (int * tactic) list - - (* Core data structure for the test environment. - Contains tables, parameters, all sorts of "screws" - to control the test generation process. *) - type testinfo = - {test_thm_tab : thm Symtab.table, (* name to testthm *) - - type_candicates : typ list, (* type grounding list *) - - cod_term_tab : (typ list -> term) Symtab.table, - (* random generators for terms, - tab assigns to each type constuctor - name a generator that gets the - instances of type constructor as args.*) - abstest_data_tab: (thm list) Symtab.table, - (* assigns name to list of - abstract (local) test data statements *) - test_data_tab : (thm list) Symtab.table, - - pre_safe_tac : Proof.context -> tactic, - pre_normalize_TNF_tac : Proof.context -> tactic, - pre_minimize_TNF_tac : Proof.context -> tactic, - test_derivation_rules : test_derivation_rule Unsynchronized.ref list - }; - - val prelude : bool Config.T - val wrapper : bool Config.T - val toString : string Config.T - val setup_code : string Config.T - val dataconv_code : string Config.T - - val depth : int Config.T (* data structure depth *) - val breadth : int Config.T (* no of free's in testspec *) - - val bound : int Config.T (* global bound for data statements *) - val case_breadth : int Config.T (* no of test data per case, - weakening uniformity *) - val iterations : int Config.T (* number of random attempts to solve a case *) - - val type_range_bound : int Config.T (* effectively used elements - type grounding list *) - - val SMT : bool Config.T - val get_smt_facts : Proof.context -> thm list - - type testenv - val rep_testenv : testenv -> testinfo - - val get_data : Proof.context -> testenv - val map_data : (testenv -> testenv) -> Context.generic -> Context.generic - val get_data_global : theory -> testenv - val map_data_global : (testenv -> testenv) -> theory -> theory - val print_data : Proof.context -> unit - - - val add_test_case : string * thm -> testenv -> testenv - (* add test theorem of "name". - The test-theorem is assumed to consist of either clauses in TNF or - of Test Hypotheses. *) - - val del_test_case : string -> testenv -> testenv - val add_test_data : string * thm -> testenv -> testenv - (* used to insert abstract test data registrated under - "string" into the test environment; abstract test data - (or "local test theorems") were used in gen_test_data *) - val del_test_data : string -> testenv -> testenv - (* erase abstract test data from test environment *) - - val thm_tab : testenv -> thm Symtab.table - val absdata_tab : testenv -> thm list Symtab.table - val jdmt_tab : testenv -> thm list Symtab.table - - val thm_tab_update : thm Symtab.table -> testenv -> testenv - val jdmt_tab_update : thm list Symtab.table -> testenv -> testenv - - val get_pre_safe_tac : testenv -> Proof.context -> tactic - val get_pre_normalize_TNF_tac : testenv -> Proof.context -> tactic - val get_pre_minimize_TNF_tac : testenv -> Proof.context -> tactic - - val get_test_derivation_rules : testenv -> test_derivation_rule list - val add_test_derivation_rule : test_derivation_rule -> testenv -> testenv - - val type_candidates_update : typ list -> testenv -> testenv - val pre_safe_tac_update : (Proof.context -> tactic) -> testenv -> testenv - val pre_normalize_TNF_tac_update : (Proof.context -> tactic) -> testenv -> testenv - val pre_minimize_TNF_tac_update : (Proof.context -> tactic) -> testenv -> testenv - - val setup: theory -> theory -end - -structure TestEnv : TESTENV = -struct - -open HOLogic; - -type test_derivation_rule = Proof.context -> int -> thm -> (int * tactic) list - -type testinfo ={test_thm_tab : thm Symtab.table, (* name to testthm *) - - type_candicates : typ list, (* type grounding list *) - - cod_term_tab : (typ list -> term) Symtab.table, - (* random generators for terms, - tab assigns to each type constuctor - name a generator that gets the - instances of type constructor as args.*) - abstest_data_tab: (thm list) Symtab.table, - (* assigns name to list of - data statements *) - test_data_tab : (thm list) Symtab.table, - pre_safe_tac : Proof.context -> tactic, - pre_normalize_TNF_tac : Proof.context -> tactic, - pre_minimize_TNF_tac : Proof.context -> tactic, - test_derivation_rules : test_derivation_rule Unsynchronized.ref list - }; - -val (prelude, prelude_setup) = Attrib.config_bool @{binding testgen_prelude} (K true); -val (wrapper, wrapper_setup) = Attrib.config_bool @{binding testgen_wrapper} (K true); -val (toString, toString_setup) = Attrib.config_string @{binding testgen_toString} (K ""); -val (setup_code, setup_code_setup) = Attrib.config_string @{binding testgen_setup_code} (K ""); -val (dataconv_code, dataconv_code_setup) = Attrib.config_string @{binding testgen_dataconv_code} (K ""); -val (depth, depth_setup) = Attrib.config_int @{binding testgen_depth} (K 3); -val (breadth, breadth_setup) = Attrib.config_int @{binding testgen_breadth} (K 1); -val (bound, bound_setup) = Attrib.config_int @{binding testgen_bound} (K 200); -val (case_breadth, case_breadth_setup) = Attrib.config_int @{binding testgen_case_breadth} (K 1); -val (iterations, iterations_setup) = Attrib.config_int @{binding testgen_iterations} (K 25); -val (type_range_bound, type_range_bound_setup) = Attrib.config_int @{binding testgen_type_range_bound} (K 1); -val (SMT, SMT_setup) = Attrib.config_bool @{binding testgen_SMT} (K false); - -structure TestGen_SMT - = Named_Thms (val name = @{binding "testgen_smt_facts"} val description = "Facts for HOL-TestGen SMT solving"); - -val get_smt_facts = TestGen_SMT.get; - -datatype testenv = Testenv of testinfo - -fun rep_testenv (Testenv X) = X; - - -val initial_testenv = Testenv - {test_thm_tab = Symtab.empty, - - type_candicates = [HOLogic.intT,HOLogic.unitT, - HOLogic.boolT, - HOLogic.mk_setT HOLogic.intT, - HOLogic.listT HOLogic.intT], - cod_term_tab = Symtab.empty, - abstest_data_tab= Symtab.empty, - test_data_tab = Symtab.empty, - pre_safe_tac = fn ctxt => all_tac, - pre_normalize_TNF_tac = fn ctxt => all_tac, - pre_minimize_TNF_tac = fn ctxt => all_tac, - test_derivation_rules = [] -}; - -fun merge_testenv - (Testenv{test_thm_tab=tttab, - type_candicates=tc, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdr}, - Testenv{test_thm_tab=tttab', - type_candicates=tc', - cod_term_tab=ctt', abstest_data_tab=tdt', - test_data_tab=tjt',pre_safe_tac=pst',pre_normalize_TNF_tac=pnt',pre_minimize_TNF_tac=pmt', - test_derivation_rules = tdr'}) = - - Testenv{test_thm_tab = Symtab.merge (Thm.eq_thm) (tttab,tttab'), - type_candicates = distinct (op=) (tc@tc'), - cod_term_tab = Symtab.empty, - (* don't know what a senseful - definition of override could - be; therefore constraint to the - simplest and most conservative - one *) - abstest_data_tab = Symtab.merge (fn (t1, t2) => List.all Thm.eq_thm (map2 (fn a => fn b => (a, b)) t1 t2)) (tdt,tdt'), - test_data_tab = Symtab.merge (fn (t1, t2) => List.all Thm.eq_thm (map2 (fn a => fn b => (a, b)) t1 t2)) (tjt,tjt'), - pre_safe_tac = pst, - pre_normalize_TNF_tac = pnt, - pre_minimize_TNF_tac = pmt, - test_derivation_rules = distinct (op=) (tdr@tdr')}; - - -(* Context data *) - -structure Data = Generic_Data -( - type T = testenv - val empty = initial_testenv - val extend = I - val merge = merge_testenv -); - -val get_data = Data.get o Context.Proof; -val map_data = Data.map; - -val get_data_global = Data.get o Context.Theory; -val map_data_global = Context.theory_map o map_data; - -fun print_data ctxt = - let val {test_thm_tab, - type_candicates, - cod_term_tab, abstest_data_tab,test_data_tab,pre_safe_tac, - pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules} = - rep_testenv (get_data ctxt) - val depth = Config.get ctxt depth - val breadth = Config.get ctxt breadth - val bound = Config.get ctxt bound - val iterations = Config.get ctxt iterations - fun H (n,thm) = [Pretty.str (n^":"), Display.pretty_thm ctxt thm] - fun H2(n,thms)= - [Pretty.str (n^":"), Pretty.str "======"] @ - map (Display.pretty_thm ctxt) thms @ [Pretty.str "======"] - in [Pretty.str ">>> Testenvironment >>>>>>>>>>>>>>>>>", - Pretty.str "+++ Control Data: +++++++++++++++++++", - Pretty.str ("*** default depth: "^Int.toString(depth)), - Pretty.str ("*** default breadth: "^Int.toString(breadth)), - Pretty.str ("*** bound: "^Int.toString(bound)), - Pretty.str ("*** iterations: "^Int.toString(iterations)), - Pretty.str "+++ Testtheoremtable: +++++++++++++++"] @ - maps H (Symtab.dest test_thm_tab) @ - [Pretty.str "+++ Testjudgements: +++++++++++++++++"] @ - maps H2 (Symtab.dest test_data_tab) @ - [Pretty.str "+++ Testdatatable: ++++++++++++++++++"] @ - maps H2 (Symtab.dest abstest_data_tab) @ - [Pretty.str "<<< Testenvironment <<<<<<<<<<<<<<<<<"] - end |> Pretty.chunks |> Pretty.writeln; - - -fun add_test_case (name,thm) - (Testenv {test_thm_tab, - type_candicates, - cod_term_tab, abstest_data_tab,test_data_tab,pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, - test_derivation_rules}) = - Testenv({test_thm_tab=Symtab.update(name,thm)test_thm_tab, - type_candicates=type_candicates, - cod_term_tab=cod_term_tab, - abstest_data_tab=abstest_data_tab, - test_data_tab=test_data_tab, pre_safe_tac=pre_safe_tac,pre_normalize_TNF_tac=pre_normalize_TNF_tac, - pre_minimize_TNF_tac=pre_minimize_TNF_tac, - test_derivation_rules = test_derivation_rules}); - - -fun del_test_case name (Testenv {test_thm_tab, - type_candicates,cod_term_tab, abstest_data_tab, - test_data_tab,pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, - test_derivation_rules}) = - Testenv({test_thm_tab=Symtab.delete_safe name test_thm_tab, - type_candicates=type_candicates, - cod_term_tab=cod_term_tab, abstest_data_tab=abstest_data_tab, - test_data_tab=test_data_tab, pre_safe_tac=pre_safe_tac,pre_normalize_TNF_tac=pre_normalize_TNF_tac, - pre_minimize_TNF_tac=pre_minimize_TNF_tac, - test_derivation_rules = test_derivation_rules}); - - - - -fun add_test_data (name,thm) - (Testenv {test_thm_tab, - type_candicates, - cod_term_tab, abstest_data_tab,test_data_tab,pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, - test_derivation_rules}) = - Testenv({test_thm_tab=test_thm_tab, - type_candicates=type_candicates, - cod_term_tab=cod_term_tab, - abstest_data_tab=Symtab.cons_list (name,thm) abstest_data_tab, - test_data_tab=test_data_tab,pre_safe_tac=pre_safe_tac,pre_normalize_TNF_tac=pre_normalize_TNF_tac, - pre_minimize_TNF_tac=pre_minimize_TNF_tac, - test_derivation_rules = test_derivation_rules}); - - -fun del_test_data name - (Testenv {test_thm_tab, - type_candicates, - cod_term_tab,abstest_data_tab,test_data_tab,pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, - test_derivation_rules}) = - Testenv({test_thm_tab=test_thm_tab, - type_candicates=type_candicates, - cod_term_tab=cod_term_tab, - abstest_data_tab=Symtab.delete_safe name abstest_data_tab, - test_data_tab=test_data_tab,pre_safe_tac=pre_safe_tac,pre_normalize_TNF_tac=pre_normalize_TNF_tac, - pre_minimize_TNF_tac=pre_minimize_TNF_tac, - test_derivation_rules = test_derivation_rules}); - - -fun thm_tab te = #test_thm_tab (rep_testenv te) -fun absdata_tab te = #abstest_data_tab(rep_testenv te) -fun jdmt_tab te = #test_data_tab(rep_testenv te) -(* In the sequel we also use the term "test judgement" synonymously - for "test data statement". *) - -fun thm_tab_update thm_tab - (Testenv{test_thm_tab=tttab, - type_candicates=tc, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdr}) = - (Testenv{test_thm_tab=thm_tab, - type_candicates=tc, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdr}); - -fun jdmt_tab_update jdmt_tab - (Testenv{test_thm_tab=tttab, - type_candicates=tc, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdr}) = - (Testenv{test_thm_tab=tttab, - type_candicates=tc, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_data_tab=jdmt_tab,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdr}); - - -fun type_candidates_update (type_candidates) - (Testenv{test_thm_tab=tttab, - type_candicates=tc, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdr}) - = - (Testenv{test_thm_tab=tttab,type_candicates=type_candidates, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdr}); - -fun pre_safe_tac_update (pre_safe_tat) - (Testenv{test_thm_tab=tttab, - type_candicates=tc, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdr}) - = - (Testenv{test_thm_tab=tttab,type_candicates=tc, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_data_tab=tjt,pre_safe_tac=pre_safe_tat,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdr}); - - -fun pre_normalize_TNF_tac_update (pre_normalize_TNF_tac) - (Testenv{test_thm_tab=tttab, - type_candicates=tc, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdr}) - = - (Testenv{test_thm_tab=tttab, - type_candicates=tc, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pre_normalize_TNF_tac, - pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdr}); - - -fun pre_minimize_TNF_tac_update (pre_minimize_TNF_tac) - (Testenv{test_thm_tab=tttab, - type_candicates=tc, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, - pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdr}) - = - (Testenv{test_thm_tab=tttab, - type_candicates=tc, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, - pre_minimize_TNF_tac=pre_minimize_TNF_tac, - test_derivation_rules = tdr}); - - - - -fun get_pre_safe_tac (Testenv{pre_safe_tac=pst, ...}) = pst; -fun get_pre_normalize_TNF_tac (Testenv{pre_normalize_TNF_tac=pnt, ...}) = pnt; -fun get_pre_minimize_TNF_tac (Testenv{pre_minimize_TNF_tac=pmt, ...}) = pmt; - -fun get_test_derivation_rules (Testenv{test_derivation_rules = tdr, ...}) = map (op !) tdr; - -fun add_test_derivation_rule (tdr) - (Testenv{test_thm_tab=tttab, - type_candicates=tc, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, - pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdrs}) - = - (Testenv{test_thm_tab=tttab, - type_candicates=tc, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_data_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, - pre_minimize_TNF_tac=pmt, - test_derivation_rules = (Unsynchronized.ref tdr)::tdrs}); - -(* module setup *) - -val setup = - prelude_setup #> - wrapper_setup #> - toString_setup #> - setup_code_setup #> - dataconv_code_setup #> - depth_setup #> - breadth_setup #> - bound_setup #> - case_breadth_setup #> - iterations_setup #> - SMT_setup #> - TestGen_SMT.setup; - -end (*struct *); - - diff --git a/src/test-gen/src/test/TestEnv.thy b/src/test-gen/src/test/TestEnv.thy deleted file mode 100644 index e5ad3c7..0000000 --- a/src/test-gen/src/test/TestEnv.thy +++ /dev/null @@ -1,726 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * TestEnv.ML --- environment for configuration parameters - * This file is part of HOL-TestGen. - * - * Copyright (c) 2005-2007 ETH Zurich, Switzerland - * 2008-2013 Achim D. Brucker, Germany - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -theory TestEnv -imports Main -begin -ML{* - -(* TODO : not correctly implemented : no ABSTRACT testcases. *) - -signature TESTENV = -sig - type test_derivation_rule = Proof.context -> int -> thm -> (int * tactic) list - - (* Core data structure for the test environment. - Contains tables, parameters, all sorts of "screws" - to control the test generation process. *) - type testinfo = - {test_thm_tab : thm Symtab.table, (* name to testthm *) - - test_thm_inst_tab : (thm list) Symtab.table, (* instantiated testthm *) - - type_candicates : typ list, (* type grounding list *) - - cod_term_tab : (typ list -> term) Symtab.table, - (* random generators for terms, - tab assigns to each type constuctor - name a generator that gets the - instances of type constructor as args.*) - abstest_data_tab : (thm list) Symtab.table, - (* assigns name to list of - abstract (local) test data statements given - by the user *) - - test_data_tab : (cterm list) Symtab.table, - (* concrete test data table *) - unsolved_PO_tab : (cterm list) Symtab.table, - (* proof obligations left unresolved in selection phase *) - test_thyps_tab : (cterm list) Symtab.table, - (* test-hypothesis *) - - - pre_safe_tac : Proof.context -> tactic, - pre_normalize_TNF_tac : Proof.context -> tactic, - pre_minimize_TNF_tac : Proof.context -> tactic, - test_derivation_rules : test_derivation_rule Unsynchronized.ref list - }; - - val prelude : bool Config.T - val wrapper : bool Config.T - val toString : string Config.T - val setup_code : string Config.T - val dataconv_code : string Config.T - - val depth : int Config.T (* data structure depth *) - val steps : int Config.T (* no of iterations of splittings *) - - val bound : int Config.T (* global bound for data statements *) - val case_steps : int Config.T (* no of test data per case, - weakening uniformity *) - val iterations : int Config.T (* number of random attempts to solve a case *) - - val type_range_bound : int Config.T (* effectively used elements - type grounding list *) - - val SMT : bool Config.T (* use the SMT backend (default: false) *) - val SMT2 : bool Config.T (* use the SMT2 backend (default: false ; false if SMT is set to true) *) - val get_smt_facts : Proof.context -> thm list - - val pon : int Config.T (* test data generation for only the chosen PO; - 0 for all the POs *) - val smt_model : bool Config.T - - val use_metis : bool Config.T (* if true, use metis to check SMT models *) - val use_smt : bool Config.T (* if true, use smt to check SMT models *) - - type testenv - val rep_testenv : testenv -> testinfo - - val get_testenv : Proof.context -> testenv - val map_data : (testenv -> testenv) -> Context.generic -> Context.generic - val get_testenv_global : theory -> testenv - val map_data_global : (testenv -> testenv) -> theory -> theory - val print_testenv : string -> Proof.context -> unit - val get_test_data : Proof.context -> string -> thm list option - - val get_test_data_tab : testenv -> cterm list Symtab.table - val get_unsolved_PO_tab: testenv -> cterm list Symtab.table - val get_test_thyps_tab : testenv -> cterm list Symtab.table - - val add_test_data_tab : string * cterm -> testenv -> testenv - val add_unsolved_PO_tab: string * cterm -> testenv -> testenv - val add_test_thyps_tab : string * cterm -> testenv -> testenv - - val set_test_data_tab : string * (cterm list) -> testenv -> testenv - val set_unsolved_PO_tab: string * (cterm list) -> testenv -> testenv - val set_test_thyps_tab : string * (cterm list) -> testenv -> testenv - - val add_test_case : string * thm -> testenv -> testenv - (* add test theorem of "name". - The test-theorem is assumed to consist of either clauses in TNF or - of Test Hypotheses. *) - - val del_test_case : string -> testenv -> testenv - val add_abstest_data : string * thm -> testenv -> testenv - (* used to insert abstract test data registrated under - "string" into the test environment; abstract test data - (or "local test theorems") were used in gen_test_data *) - val del_abstest_data : string -> testenv -> testenv - (* erase abstract test data from test environment *) - - val get_test_thm_tab : testenv -> thm Symtab.table - val get_absdata_tab : testenv -> thm list Symtab.table - val get_test_thm_inst_tab : testenv -> thm list Symtab.table - - val test_thm_tab_update : thm Symtab.table -> testenv -> testenv - val test_thm_inst_tab_update : thm list Symtab.table -> testenv -> testenv - - val get_pre_safe_tac : testenv -> Proof.context -> tactic - val get_pre_normalize_TNF_tac : testenv -> Proof.context -> tactic - val get_pre_minimize_TNF_tac : testenv -> Proof.context -> tactic - - val get_test_derivation_rules : testenv -> test_derivation_rule list - val add_test_derivation_rule : test_derivation_rule -> testenv -> testenv - - val type_candidates_update : typ list -> testenv -> testenv - val pre_safe_tac_update : (Proof.context -> tactic) -> testenv -> testenv - val pre_normalize_TNF_tac_update : (Proof.context -> tactic) -> testenv -> testenv - val pre_minimize_TNF_tac_update : (Proof.context -> tactic) -> testenv -> testenv - - val print_test_case : Proof.context -> string -> int option -> string - val print_test_data : Proof.context -> string -> int option -> string - val print_test_hyps : Proof.context -> string -> int option -> string - val print_unsolved_pos : Proof.context -> string -> int option -> string - - val setup: theory -> theory -end -*} - -ML{* - -structure TestEnv : TESTENV = -struct - -open HOLogic; - -type test_derivation_rule = Proof.context -> int -> thm -> (int * tactic) list - -type testinfo ={test_thm_tab : thm Symtab.table, (* name to testthm *) - - test_thm_inst_tab : (thm list) Symtab.table, - - type_candicates : typ list, (* type grounding list *) - - cod_term_tab : (typ list -> term) Symtab.table, - (* random generators for terms, - tab assigns to each type constuctor - name a generator that gets the - instances of type constructor as args.*) - abstest_data_tab : (thm list) Symtab.table, - (* assigns name to list of data statements *) - test_data_tab : (cterm list) Symtab.table, - unsolved_PO_tab : (cterm list) Symtab.table, - test_thyps_tab : (cterm list) Symtab.table, - pre_safe_tac : Proof.context -> tactic, - pre_normalize_TNF_tac : Proof.context -> tactic, - pre_minimize_TNF_tac : Proof.context -> tactic, - test_derivation_rules : test_derivation_rule Unsynchronized.ref list - }; - -val (prelude, prelude_setup) = Attrib.config_bool @{binding testgen_prelude} (K true); -val (wrapper, wrapper_setup) = Attrib.config_bool @{binding testgen_wrapper} (K true); -val (toString, toString_setup) = Attrib.config_string @{binding testgen_toString} (K ""); -val (setup_code, setup_code_setup) = Attrib.config_string @{binding testgen_setup_code} (K ""); -val (dataconv_code, dataconv_code_setup) = Attrib.config_string @{binding testgen_dataconv_code} (K ""); -val (depth, depth_setup) = Attrib.config_int @{binding testgen_depth} (K 3); -val (steps, steps_setup) = Attrib.config_int @{binding testgen_steps} (K 1); -val (bound, bound_setup) = Attrib.config_int @{binding testgen_bound} (K 200); -val (case_steps, case_steps_setup) = Attrib.config_int @{binding testgen_case_steps} (K 1); -val (iterations, iterations_setup) = Attrib.config_int @{binding testgen_iterations} (K 25); -val (type_range_bound, type_range_bound_setup) = Attrib.config_int @{binding testgen_type_range_bound} (K 1); -val (SMT, SMT_setup) = Attrib.config_bool @{binding testgen_SMT} (K false); -val (SMT2, SMT2_setup) = Attrib.config_bool @{binding testgen_SMT2} (K false); -val (pon, pon_setup) = Attrib.config_int @{binding testgen_pon} (K 0); -val (smt_model, smt_model_setup) = Attrib.config_bool @{binding testgen_smt_model} (K false); -val (use_metis, use_metis_setup) = Attrib.config_bool @{binding testgen_use_metis} (K false); -val (use_smt, use_smt_setup) = Attrib.config_bool @{binding testgen_use_smt} (K false); - -structure TestGen_SMT - = Named_Thms (val name = @{binding "testgen_smt_facts"} - val description = "Facts for HOL-TestGen SMT solving"); - -val get_smt_facts = TestGen_SMT.get; - -datatype testenv = Testenv of testinfo - -fun rep_testenv (Testenv X) = X; - - -val initial_testenv = Testenv - {test_thm_tab = Symtab.empty, - type_candicates = [HOLogic.intT,HOLogic.unitT, - HOLogic.boolT, - HOLogic.mk_setT HOLogic.intT, - HOLogic.listT HOLogic.intT], - cod_term_tab = Symtab.empty, - abstest_data_tab= Symtab.empty, - test_thm_inst_tab = Symtab.empty, - test_data_tab = Symtab.empty, - unsolved_PO_tab = Symtab.empty, - test_thyps_tab = Symtab.empty, - - pre_safe_tac = fn ctxt => all_tac, - pre_normalize_TNF_tac = fn ctxt => all_tac, - pre_minimize_TNF_tac = fn ctxt => all_tac, - test_derivation_rules = [] - }; - -fun merge_testenv - (Testenv{test_thm_tab=tttab, - type_candicates=tc, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_data_tab = ttdt, - unsolved_PO_tab = uPOt, - test_thyps_tab = ttht, - test_thm_inst_tab=tjt, pre_safe_tac=pst, - pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdr}, - Testenv{test_thm_tab=tttab', - type_candicates=tc', - cod_term_tab=ctt', abstest_data_tab=tdt', - test_data_tab = ttdt', - unsolved_PO_tab = uPOt', - test_thyps_tab = ttht', - test_thm_inst_tab=tjt',pre_safe_tac=pst', - pre_normalize_TNF_tac=pnt',pre_minimize_TNF_tac=pmt', - test_derivation_rules = tdr'}) = - - Testenv{test_thm_tab = Symtab.merge (Thm.eq_thm) (tttab,tttab'), - type_candicates = distinct (op=) (tc@tc'), - cod_term_tab = Symtab.empty, - (* don't know what a senseful - definition of override could - be; therefore constraint to the - simplest and most conservative - one *) - abstest_data_tab = Symtab.merge (fn (t1, t2) => List.all Thm.eq_thm (map2 (fn a => fn b => (a, b)) t1 t2)) (tdt,tdt'), - test_thm_inst_tab = Symtab.merge (fn (t1, t2) => List.all Thm.eq_thm (map2 (fn a => fn b => (a, b)) t1 t2)) (tjt,tjt'), - test_data_tab = Symtab.merge (fn (t1, t2) => List.all Thm.aconvc (map2 (fn a => fn b => (a, b)) t1 t2)) (ttdt,ttdt'), - unsolved_PO_tab = Symtab.merge (fn (t1, t2) => List.all Thm.aconvc (map2 (fn a => fn b => (a, b)) t1 t2)) (uPOt,uPOt'), - test_thyps_tab = Symtab.merge (fn (t1, t2) => List.all Thm.aconvc (map2 (fn a => fn b => (a, b)) t1 t2)) (ttht,ttht'), - pre_safe_tac = pst, - pre_normalize_TNF_tac = pnt, - pre_minimize_TNF_tac = pmt, - test_derivation_rules = distinct (op=) (tdr@tdr')}; - - -structure Data = Generic_Data -( - type T = testenv - val empty = initial_testenv - val extend = I - val merge = merge_testenv -); - -val get_testenv = Data.get o Context.Proof; -val map_data = Data.map; - -val get_testenv_global = Data.get o Context.Theory; -val map_data_global = Context.theory_map o map_data; - -fun print_testenv envname ctxt = - let val {test_thm_tab, type_candicates, - cod_term_tab, abstest_data_tab,test_thm_inst_tab,pre_safe_tac, - test_data_tab, unsolved_PO_tab, test_thyps_tab, - pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules} = - rep_testenv (get_testenv ctxt) - val depth = Config.get ctxt depth - val steps = Config.get ctxt steps - val bound = Config.get ctxt bound - val iterations = Config.get ctxt iterations - fun H (n,thm) = if n = envname orelse envname = "" - then [Pretty.str (n^":"), Thm.pretty_thm ctxt thm] - else [] - fun H2(n,thms)= if n = envname orelse envname = "" - then [Pretty.str (n^":"), Pretty.str "======"] @ - map (Thm.pretty_thm ctxt) thms @ - [Pretty.str "======"] - else [] - in [Pretty.str ">>> Testenvironment >>>>>>>>>>>>>>>>>", - Pretty.str "+++ Control Data: +++++++++++++++++++", - Pretty.str ("*** default depth: "^Int.toString(depth)), - Pretty.str ("*** default steps: "^Int.toString(steps)), - Pretty.str ("*** bound: "^Int.toString(bound)), - Pretty.str ("*** iterations: "^Int.toString(iterations)), - Pretty.str "+++ Testtheoremtable: +++++++++++++++"] @ -(* TODO bu: add cterm construction ... *) - maps H (Symtab.dest test_thm_tab) @ - [Pretty.str "+++ Testjudgements: +++++++++++++++++"] @ - maps H2 (Symtab.dest test_thm_inst_tab) @ - [Pretty.str "+++ Testdatatable: ++++++++++++++++++"] @ - maps H2 (Symtab.dest abstest_data_tab) @ - [Pretty.str "<<< Testenvironment <<<<<<<<<<<<<<<<<"] - end |> Pretty.chunks |> Pretty.writeln; - - -fun add_test_case (name,thm) - (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, - test_data_tab, unsolved_PO_tab, test_thyps_tab, - pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = - Testenv({test_thm_tab=Symtab.update(name,thm)test_thm_tab, - type_candicates=type_candicates, - cod_term_tab=cod_term_tab, - abstest_data_tab=abstest_data_tab, - test_thm_inst_tab=test_thm_inst_tab, - test_data_tab = test_data_tab, - unsolved_PO_tab = unsolved_PO_tab, - test_thyps_tab = test_thyps_tab, - pre_safe_tac=pre_safe_tac, - pre_normalize_TNF_tac=pre_normalize_TNF_tac, - pre_minimize_TNF_tac=pre_minimize_TNF_tac, - test_derivation_rules = test_derivation_rules}); - - -fun del_test_case name (Testenv {test_thm_tab, - type_candicates,cod_term_tab, abstest_data_tab, - test_data_tab, unsolved_PO_tab, test_thyps_tab, - test_thm_inst_tab,pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, - test_derivation_rules}) = - Testenv({test_thm_tab=Symtab.delete_safe name test_thm_tab, type_candicates=type_candicates, - cod_term_tab=cod_term_tab, abstest_data_tab=abstest_data_tab, - test_thm_inst_tab=test_thm_inst_tab, pre_safe_tac=pre_safe_tac, - test_data_tab = test_data_tab, - unsolved_PO_tab = unsolved_PO_tab, - test_thyps_tab = test_thyps_tab, - pre_normalize_TNF_tac=pre_normalize_TNF_tac, - pre_minimize_TNF_tac=pre_minimize_TNF_tac, - test_derivation_rules = test_derivation_rules}); - - - - -fun add_abstest_data (name,thm) - (Testenv {test_thm_tab, type_candicates, - cod_term_tab, abstest_data_tab,test_thm_inst_tab,pre_safe_tac, - test_data_tab, unsolved_PO_tab, test_thyps_tab, - pre_normalize_TNF_tac,pre_minimize_TNF_tac, - test_derivation_rules}) - = Testenv({test_thm_tab=test_thm_tab, - type_candicates=type_candicates, - cod_term_tab=cod_term_tab, - abstest_data_tab=Symtab.cons_list (name,thm) abstest_data_tab, - test_thm_inst_tab=test_thm_inst_tab,pre_safe_tac=pre_safe_tac, - test_data_tab = test_data_tab, - unsolved_PO_tab = unsolved_PO_tab, - test_thyps_tab = test_thyps_tab, - pre_normalize_TNF_tac=pre_normalize_TNF_tac, - pre_minimize_TNF_tac=pre_minimize_TNF_tac, - test_derivation_rules = test_derivation_rules}); - - -fun del_abstest_data name - (Testenv {test_thm_tab, - type_candicates, - cod_term_tab,abstest_data_tab, - test_data_tab, unsolved_PO_tab, test_thyps_tab, - test_thm_inst_tab,pre_safe_tac, - pre_normalize_TNF_tac,pre_minimize_TNF_tac, - test_derivation_rules}) - = Testenv({test_thm_tab=test_thm_tab, - type_candicates=type_candicates, - cod_term_tab=cod_term_tab, - abstest_data_tab=Symtab.delete_safe name abstest_data_tab, - test_thm_inst_tab=test_thm_inst_tab,pre_safe_tac=pre_safe_tac, - test_data_tab = test_data_tab, - unsolved_PO_tab = unsolved_PO_tab, - test_thyps_tab = test_thyps_tab, - pre_normalize_TNF_tac=pre_normalize_TNF_tac, - pre_minimize_TNF_tac=pre_minimize_TNF_tac, - test_derivation_rules = test_derivation_rules}); - - -fun get_test_thm_tab te = #test_thm_tab (rep_testenv te) -fun get_absdata_tab te = #abstest_data_tab(rep_testenv te) -fun get_test_thm_inst_tab te = #test_thm_inst_tab(rep_testenv te) - - - -fun get_test_data_tab te = #test_data_tab (rep_testenv te) -fun get_unsolved_PO_tab te = #unsolved_PO_tab (rep_testenv te) -fun get_test_thyps_tab te = #test_thyps_tab(rep_testenv te) - - -fun add_test_data_tab (name,ct) - (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, - test_data_tab, unsolved_PO_tab, test_thyps_tab, - pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = - Testenv({test_thm_tab=test_thm_tab, - type_candicates=type_candicates, - cod_term_tab=cod_term_tab, - abstest_data_tab=abstest_data_tab, - test_thm_inst_tab=test_thm_inst_tab, - test_data_tab = Symtab.cons_list(name,ct) test_data_tab, - unsolved_PO_tab = unsolved_PO_tab, - test_thyps_tab = test_thyps_tab, - pre_safe_tac=pre_safe_tac, - pre_normalize_TNF_tac=pre_normalize_TNF_tac, - pre_minimize_TNF_tac=pre_minimize_TNF_tac, - test_derivation_rules = test_derivation_rules}); - -fun add_unsolved_PO_tab (name,ct) - (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, - test_data_tab, unsolved_PO_tab, test_thyps_tab, - pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = - Testenv({test_thm_tab=test_thm_tab, - type_candicates=type_candicates, - cod_term_tab=cod_term_tab, - abstest_data_tab=abstest_data_tab, - test_thm_inst_tab=test_thm_inst_tab, - test_data_tab = test_data_tab, - unsolved_PO_tab = Symtab.cons_list(name,ct) unsolved_PO_tab, - test_thyps_tab = test_thyps_tab, - pre_safe_tac=pre_safe_tac, - pre_normalize_TNF_tac=pre_normalize_TNF_tac, - pre_minimize_TNF_tac=pre_minimize_TNF_tac, - test_derivation_rules = test_derivation_rules}); - - -fun add_test_thyps_tab (name,ct) - (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, - test_data_tab, unsolved_PO_tab, test_thyps_tab, - pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = - Testenv({test_thm_tab=test_thm_tab, - type_candicates=type_candicates, - cod_term_tab=cod_term_tab, - abstest_data_tab=abstest_data_tab, - test_thm_inst_tab=test_thm_inst_tab, - test_data_tab = test_data_tab, - unsolved_PO_tab = unsolved_PO_tab, - test_thyps_tab = Symtab.cons_list(name,ct) test_thyps_tab, - pre_safe_tac=pre_safe_tac, - pre_normalize_TNF_tac=pre_normalize_TNF_tac, - pre_minimize_TNF_tac=pre_minimize_TNF_tac, - test_derivation_rules = test_derivation_rules}); - - -fun set_test_data_tab (name,ctl) - (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, - test_data_tab, unsolved_PO_tab, test_thyps_tab, - pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = - Testenv({test_thm_tab=test_thm_tab, - type_candicates=type_candicates, - cod_term_tab=cod_term_tab, - abstest_data_tab=abstest_data_tab, - test_thm_inst_tab=test_thm_inst_tab, - test_data_tab = Symtab.update(name,ctl) test_data_tab, - unsolved_PO_tab = unsolved_PO_tab, - test_thyps_tab = test_thyps_tab, - pre_safe_tac=pre_safe_tac, - pre_normalize_TNF_tac=pre_normalize_TNF_tac, - pre_minimize_TNF_tac=pre_minimize_TNF_tac, - test_derivation_rules = test_derivation_rules}); - -fun set_unsolved_PO_tab (name,ctl) - (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, - test_data_tab, unsolved_PO_tab, test_thyps_tab, - pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = - Testenv({test_thm_tab=test_thm_tab, - type_candicates=type_candicates, - cod_term_tab=cod_term_tab, - abstest_data_tab=abstest_data_tab, - test_thm_inst_tab=test_thm_inst_tab, - test_data_tab = test_data_tab, - unsolved_PO_tab = Symtab.update(name,ctl) unsolved_PO_tab, - test_thyps_tab = test_thyps_tab, - pre_safe_tac=pre_safe_tac, - pre_normalize_TNF_tac=pre_normalize_TNF_tac, - pre_minimize_TNF_tac=pre_minimize_TNF_tac, - test_derivation_rules = test_derivation_rules}); - - -fun set_test_thyps_tab (name,ctl) - (Testenv {test_thm_tab, type_candicates, cod_term_tab, abstest_data_tab,test_thm_inst_tab, - test_data_tab, unsolved_PO_tab, test_thyps_tab, - pre_safe_tac,pre_normalize_TNF_tac,pre_minimize_TNF_tac, test_derivation_rules}) = - Testenv({test_thm_tab=test_thm_tab, - type_candicates=type_candicates, - cod_term_tab=cod_term_tab, - abstest_data_tab=abstest_data_tab, - test_thm_inst_tab=test_thm_inst_tab, - test_data_tab = test_data_tab, - unsolved_PO_tab = unsolved_PO_tab, - test_thyps_tab = Symtab.update(name,ctl) test_thyps_tab, - pre_safe_tac=pre_safe_tac, - pre_normalize_TNF_tac=pre_normalize_TNF_tac, - pre_minimize_TNF_tac=pre_minimize_TNF_tac, - test_derivation_rules = test_derivation_rules}); - - -(* In the sequel we also use the term "test judgement" synonymously for "test data statement". *) - -fun test_thm_tab_update thm_tab - (Testenv{test_thm_tab=tttab, - type_candicates=tc, - cod_term_tab=ctt, - abstest_data_tab=tdt, - test_data_tab, unsolved_PO_tab, test_thyps_tab, - test_thm_inst_tab=tjt,pre_safe_tac=pst, - pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdr}) = - (Testenv{test_thm_tab=thm_tab, - type_candicates=tc, - cod_term_tab=ctt, - abstest_data_tab=tdt, - test_thm_inst_tab=tjt,pre_safe_tac=pst, - test_data_tab = test_data_tab, - unsolved_PO_tab = unsolved_PO_tab, - test_thyps_tab = test_thyps_tab, - pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdr}); - -fun test_thm_inst_tab_update jdmt_tab - (Testenv{test_thm_tab=tttab, - type_candicates=tc, - cod_term_tab=ctt, - abstest_data_tab=tdt, - test_data_tab, unsolved_PO_tab, test_thyps_tab, - test_thm_inst_tab=tjt,pre_safe_tac=pst, - pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdr}) = - (Testenv{test_thm_tab=tttab, - type_candicates=tc, - cod_term_tab=ctt, - abstest_data_tab=tdt, - test_thm_inst_tab=jdmt_tab, - test_data_tab = test_data_tab, - unsolved_PO_tab = unsolved_PO_tab, - test_thyps_tab = test_thyps_tab, - pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, - pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdr}); - - -fun type_candidates_update (type_candidates) - (Testenv{test_thm_tab=tttab, - type_candicates=tc, - cod_term_tab=ctt, - abstest_data_tab=tdt, - test_data_tab, unsolved_PO_tab, test_thyps_tab, - test_thm_inst_tab=tjt,pre_safe_tac=pst, - pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdr}) - =(Testenv{test_thm_tab=tttab,type_candicates=type_candidates, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_thm_inst_tab=tjt,pre_safe_tac=pst, - test_data_tab = test_data_tab, - unsolved_PO_tab = unsolved_PO_tab, - test_thyps_tab = test_thyps_tab, - pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdr}); - -fun pre_safe_tac_update (pre_safe_tat) - (Testenv{test_thm_tab=tttab, type_candicates=tc, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_thm_inst_tab=tjt,pre_safe_tac=pst, - test_data_tab, unsolved_PO_tab, test_thyps_tab, - pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdr}) - =(Testenv{test_thm_tab=tttab,type_candicates=tc, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_thm_inst_tab=tjt, pre_safe_tac=pre_safe_tat, - test_data_tab = test_data_tab, - unsolved_PO_tab = unsolved_PO_tab, - test_thyps_tab = test_thyps_tab, - pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdr}); - - -fun pre_normalize_TNF_tac_update (pre_normalize_TNF_tac) - (Testenv{test_thm_tab=tttab, type_candicates=tc, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_thm_inst_tab=tjt,pre_safe_tac=pst, - test_data_tab, unsolved_PO_tab, test_thyps_tab, - pre_normalize_TNF_tac=pnt,pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdr}) - = (Testenv{test_thm_tab=tttab, type_candicates=tc, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_thm_inst_tab=tjt, pre_safe_tac=pst, - test_data_tab = test_data_tab, - unsolved_PO_tab = unsolved_PO_tab, - test_thyps_tab = test_thyps_tab, - pre_normalize_TNF_tac=pre_normalize_TNF_tac, - pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdr}); - - -fun pre_minimize_TNF_tac_update (pre_minimize_TNF_tac) - (Testenv{test_thm_tab=tttab, type_candicates=tc, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_thm_inst_tab=tjt, pre_safe_tac=pst, - test_data_tab, unsolved_PO_tab, test_thyps_tab, - pre_normalize_TNF_tac=pnt, pre_minimize_TNF_tac=pmt, - test_derivation_rules = tdr}) - =(Testenv{test_thm_tab=tttab, type_candicates=tc, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_thm_inst_tab=tjt, pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, - test_data_tab = test_data_tab, - unsolved_PO_tab = unsolved_PO_tab, - test_thyps_tab = test_thyps_tab, - pre_minimize_TNF_tac=pre_minimize_TNF_tac, - test_derivation_rules = tdr}); - - - - -fun get_pre_safe_tac (Testenv{pre_safe_tac=pst, ...}) = pst; -fun get_pre_normalize_TNF_tac (Testenv{pre_normalize_TNF_tac=pnt, ...}) = pnt; -fun get_pre_minimize_TNF_tac (Testenv{pre_minimize_TNF_tac=pmt, ...}) = pmt; - -fun get_test_derivation_rules (Testenv{test_derivation_rules = tdr, ...}) = map (op !) tdr; - -fun get_test_data ctxt name = let val Testenv {test_thm_inst_tab,...} = (get_testenv ctxt) - in Symtab.lookup test_thm_inst_tab name end - -fun add_test_derivation_rule (tdr) - (Testenv{test_thm_tab=tttab, type_candicates=tc, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_thm_inst_tab=tjt,pre_safe_tac=pst,pre_normalize_TNF_tac=pnt, - test_data_tab, unsolved_PO_tab, test_thyps_tab, - pre_minimize_TNF_tac=pmt, test_derivation_rules = tdrs}) - = (Testenv{test_thm_tab=tttab, type_candicates=tc, - cod_term_tab=ctt, abstest_data_tab=tdt, - test_thm_inst_tab=tjt, pre_safe_tac=pst, pre_normalize_TNF_tac=pnt, - test_data_tab = test_data_tab, - unsolved_PO_tab = unsolved_PO_tab, - test_thyps_tab = test_thyps_tab, - pre_minimize_TNF_tac=pmt, - (* OH MY GOD --- WHAT CRAP IS THIS >>>>> bu *) - test_derivation_rules = (Unsynchronized.ref tdr)::tdrs}); - -(* module setup *) - - - -fun print_test_data_generic f ctxt name index_opt = - let val ctrms = case (Symtab.lookup (f (get_testenv ctxt)) name) of - SOME k => k - | NONE => error "No such test suite" - val print_cterm = Pretty.string_of o (Syntax.pretty_term ctxt) o Thm.term_of - in case index_opt of - NONE => String.concatWith "\n" (map print_cterm ctrms) - |SOME ind => print_cterm (nth ctrms ind) - end - - -val print_test_data = print_test_data_generic get_test_data_tab; -val print_test_case = print_test_data_generic get_test_data_tab; (* temporary hack by bu - - should be own implem with own - table*) - -val print_test_hyps = print_test_data_generic get_test_thyps_tab; - -val print_unsolved_pos = print_test_data_generic get_unsolved_PO_tab; - - -val setup = - prelude_setup #> - wrapper_setup #> - toString_setup #> - setup_code_setup #> - dataconv_code_setup #> - depth_setup #> - steps_setup #> - bound_setup #> - case_steps_setup #> - iterations_setup #> - SMT_setup #> - SMT2_setup #> - pon_setup #> - smt_model_setup #> - use_metis_setup #> - use_smt_setup #> - TestGen_SMT.setup; - -end (*struct *); - -*} - -end diff --git a/src/test-gen/src/test/TestGen.thy b/src/test-gen/src/test/TestGen.thy deleted file mode 100644 index 6f5bd1e..0000000 --- a/src/test-gen/src/test/TestGen.thy +++ /dev/null @@ -1,1704 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * TestGen.thy --- the core of the HOL-TestGen System. - * This file is part of HOL-TestGen. - * - * Copyright (c) 2005-2010 ETH Zurich, Switzerland - * 2008-2015 Achim D. Brucker, Germany - * 2009-2017 Universite Paris-Sud, France - * 2015-2017 The University of Sheffield, UK - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id:$ *) - -chapter {* The TestGen Core*} - -theory TestGen -imports - Complex_Main - Term_Tactics (*ML*) - clocks (*ML*) - log (* ML *) - TestEnv (* ML *) - BackendUtils - RandomBackend - QuickCheckBackend - SMTBackend - - -keywords "mk_test_suite" :: "qed_global" - and "test_spec" :: "thy_goal" - and "gen_test_data" :: "thy_decl_block" (*"thy_script"*) (* really ? *) - and "print_testenv" :: diag - and "print_conc_tests" :: diag - and "print_abs_tests" :: diag - and "print_thyps" :: diag - and "print_upos" :: diag - and "set_pre_safe_tac" :: diag - and "set_pre_normalize_TNF_tac" :: diag - and "set_pre_minimize_TNF_tac" :: diag -begin - - -section{*General Framework for Unit-Test*} - - -definition PO :: "bool \ bool" - where "PO x = x" - -lemma PO_norm1 [simp]: "PO(PO A) = PO A" by(simp add: PO_def) -lemma PO_norm2 [simp]: "(\ PO A) = (PO(\ A))" by(simp add: PO_def) -lemma PO_norm3 [simp]: "(A \ PO B) = (PO(A \ B))" by(simp add: PO_def) -lemma PO_norm4 [simp]: "(PO A \ B) = (PO(A \ B))" by(simp add: PO_def) -lemma PO_norm5 [simp]: "(A \ PO B) = (PO(A \ B))" by(simp add: PO_def) -lemma PO_norm6 [simp]: "(PO A \ B) = (PO(A \ B))" by(simp add: PO_def) -lemma PO_norm7 [simp]: "(A \ (PO B)) = (PO(A \ B))" by(simp add: PO_def) -lemma PO_norm8 [simp]: "((PO A) \ B) = (PO(A \ B))" by(simp add: PO_def) -lemma PO_norm9 [simp]: "(A = (PO B)) = (PO(A = B))" by(simp add: PO_def) -lemma PO_norm10 [simp]: "((PO A) = B) = (PO(A = B))" by(simp add: PO_def) -lemma PO_norm11 [simp]: "(\x. (PO (A x))) = (PO(\x. A x))" by(simp add: PO_def) -lemma PO_norm12 [simp]: "(\x. (PO (A x))) = (PO(\x. A x))" by(simp add: PO_def) - -lemma PO_I : "(PO P) \ Q \ P \ Q" by(simp add: PO_def) (* internal use only *) -lemma PO_rev[elim!]: "\ PO False \ PO False \ False" by auto -lemma PO_grow : "PO(P \ Q) \ R \ (PO P) \ (Q \ R)" by(simp add: PO_def) (* internal use only *) - - -definition THYP :: "bool \ bool" - where "THYP x = x" - -lemma THYP_triv [simp]: "THYP True = True" by(simp add: THYP_def) - -lemma THYP_I : "A \ THYP A" by(simp add: THYP_def) - -lemma THYP_D : "THYP A \ A" by(simp add: THYP_def) - -lemma THYP_E : "\ THYP A; (A \ R) \ \ R" by(simp add: THYP_def) - -lemma THYP_spec : "THYP(\x. P x) \ THYP(P x)" by(simp add: THYP_def) - -lemma THYP_app1 : "\ THYP(A \ B); A \ \ B" by(simp add: THYP_def) - (* to be used with etac in order to apply uniformity hyp *) - -lemma THYP_app1_rev : "\ A; THYP(A \ B) \ \ B" by(simp add: THYP_def) - (* to be used with etac in order to apply uniformity hyp *) - -lemma THYP_app2 : "\ THYP(A); A \ B \ \ B" by(simp add: THYP_def) - (* to be used with RS in order to apply regularity hyp *) - - -lemma THYP_E_reg : "\ size x \ k \ B; THYP((k < size x) \ B)\ \ B" - by(simp add: THYP_def, auto) - (* to be used with rtac in order to introduce regularity hyp *) - - -definition RSF :: "bool" - where "RSF = True" -- {* just to denote random solver failures *} - -lemma RSF_E : "\ RSF \ P \ \ P" - by(auto simp: RSF_def) - - -section {* Tool Setup *} - - -ML{* -infix 1 |$> - -infix 6 DOWNTO - -infix 0 COLLECT_SS - -signature TESTGEN = -sig - val mt_testenv : TestEnv.testenv - - val get_test_thm : theory -> Symtab.key -> thm list - - val trace : bool Config.T - (* switches on debug information *) - - val no_uniformity : bool Config.T - (* switches off uniformity generation *) - - val profiling : bool Config.T - (* switches on timer profiler *) - - val no_finalize : bool Config.T - (* don't generate constraints (POs) *) - - val is_neither_thyp_nor_PO : term -> bool - - val |$> : thm * tactic -> thm - val TRY' : (int -> tactic) -> int -> tactic - val COND' : (int -> thm -> bool) -> (int -> tactic) -> (int -> tactic) -> int -> tactic - val DOWNTO : int * int -> (int -> tactic) -> tactic - val DEPTH_SPLIT : (int -> tactic) -> int -> tactic - val REPEAT' : ('a -> tactic) -> 'a -> tactic - val ALLCASES : (int -> tactic) -> tactic - val to_TNF : Proof.context -> string list -> int -> thm -> thm Seq.seq - val uniformityI_tac : Proof.context -> string list -> int -> tactic - val CLOSURE : (int -> tactic) -> int -> thm -> thm Seq.seq - val case_tac_typ : Proof.context -> string list -> int -> thm -> thm Seq.seq - - val ONLY_POS : (int -> tactic) -> int -> tactic - - val finalize_tac : Proof.context -> string list -> tactic - - val abs_data_tac : Proof.context -> thm list -> int -> tactic - - val mp_fy : Proof.context -> int -> tactic - val all_ify : Proof.context -> string list -> int -> tactic - val thyp_ify : Proof.context -> int -> tactic - - val var_hyp_subst_tac : Proof.context -> int -> tactic - - val gen_test_case_tac : Proof.context -> string list -> tactic - (* "gen_test_case_tac ctxt prognames" is a - tactic that converts all subgoals of current proof state into - test-normal form - i.e. a conjunctive normal form - (instead of DNF), where the conclusions have the - form "P(progname x1 .. xn)", where progname does - not occur in the premisses (the "constraints") - of the test. As a prerequisite, a data-separation - theorem of depth k is resolved for up to >steps< number - of free variables in the current proof-state. - - NOTE: At present, the generation of data-separation - lemmas is limited to "real" free variables and will - not work for parameters of a test specification. Further, - the proof-state is assumed to have only one goal. - *) - - (* Included for complete backward compatibility *) - val gen_test_case_old_tac : Proof.context -> string list -> tactic - - val gen_test_data : string -> Context.generic -> Context.generic - (* "gen_test_data name context" constructs ground type instances of - test theorems associated to "name". In a further step, - "abstract" or local test data were resolved into the resulting test - theorems (if available in the testenv under "name"), remaining - meta variables were instantiated by a "random_solver" - that attempts to generate an instance and tries to solve - the constraints of a goal up to "iterations" times. - - The resulting simplified and "solved" test data theorems - were stored again in the test environment. - *) - - (* TODO: remove these three functions *) - val get_test_data : theory -> string -> thm list - (* get_test_data thy name extracts the test judgements - (i.e. the premisses of test data statement, that are not - test hypothesis or proof obligation.). *) - - val get_test_hyps : theory -> string -> thm list - (* get_test_hyps thy name extracts the test hypothesis - of a !!test data statement!!, not a test theorem. *) - - val get_pos : theory -> string -> thm list - (* extract remaining constraints (proof obligations) *) - - val animate : theory -> string -> unit - (* ground test data statements may be animated, if "P(progname x1 .. xn)" - is executable. "animate" presents test data statements by - unfolding "progname x1 .. xn" and simplifying on a stepwise - basis. (* for the moment, it's just asm_simp_tac with the - standard evaluation trace, in the future its more or less - the same except with slightly improved output *) - *) - val discharge_POs : string -> Proof.context -> tactic - - val setup: theory -> theory -end -*} - - -ML{* - -structure TestGen : TESTGEN = -struct - -open Thm; -open HOLogic; -open BNF_LFP_Compat ; -open Old_Datatype_Aux -(* open TestEnv; *) -(* open Clocks; *) -(* open BNF_LFP_Compat *) -open Int; -open List; -open Ctr_Sugar_Util ; (* rtac dtac etc. *) -open Rule_Insts; (* res_inst_tac etc *) - - - -val (trace, trace_setup) = Attrib.config_bool @{binding testgen_trace} (K false); -val (no_uniformity, no_uniformity_setup) = Attrib.config_bool @{binding no_uniformity} (K false); -val (profiling, profiling_setup) = Attrib.config_bool @{binding testgen_profiling} (K false); -val (no_finalize, no_finalize_setup) = Attrib.config_bool @{binding testgen_no_finalize} (K false); - - -(* ************************************************************************* *) -(* access to datatype package ... *) -(* ************************************************************************* *) - -fun isDataType thy (Type(tn,_)) = is_some (BNF_LFP_Compat.get_info thy [] tn) (* CHECK ME: empty preference list ok ? *) - |isDataType _ _ = false; - - -fun get_case_split_of_dt thy type_name = #exhaust (BNF_LFP_Compat.the_info thy [] type_name); - -fun get_nchotomy_of thy type_name = #nchotomy (BNF_LFP_Compat.the_info thy [] type_name); - - -(* ************************************************************************* *) -(* access to record package ... *) -(* ************************************************************************* *) - -fun isRecordType thy (Type(tn,_)) = is_some (Record.get_info thy tn) - |isRecordType _ _ = false; - -fun get_case_split_of_record thy type_name = #cases(the (Record.get_info thy type_name)); - - -(* ************************************************************************* *) -(* term computations ... *) -(* ************************************************************************* *) - -fun max_depth (Free h) = (0,[h]) - |max_depth (Const h)= (0,[]) - |max_depth (Abs(s,tt,t)) = max_depth t - |max_depth (s $ t) = let val (k,S) = max_depth (s) - val (k',S') = max_depth (t) - in if k > k'+1 then (k,S) - else if k = k'+1 then (k, S @ S') - else (k'+1, S') - end - |max_depth (_) = (0,[]); - -(* relies on definitions in Testing.thy ... *) -val is_thyp = exists_Const (fn (@{const_name THYP},_) => true | _ => false) - -fun is_po (Const (@{const_name Trueprop},_) $ (Const (@{const_name PO},_) $ _)) = true - |is_po _ = false; - -fun is_neither_thyp_nor_PO t = (not (is_thyp t)) andalso (not (is_po t)) - -fun is_test_data t = (is_neither_thyp_nor_PO t) andalso not (exists_subterm is_Var t) - -fun dest_TruepropAll (Const ("all", _) $ Abs(s,ty, t)) = - (Const (@{const_name "HOL.All"}, (ty --> HOLogic.boolT) --> HOLogic.boolT) - $ Abs(s,ty , dest_TruepropAll t)) - |dest_TruepropAll t = HOLogic.dest_Trueprop t; - - -fun exists_prog prog = Term.exists_subterm - (fn Free (x, _) => member (op =) prog x - | Const (c, _) => member (op =) prog (Long_Name.base_name c) (* FIXME proper internal name!? *) - | _ => false); - -(* complex type instantiations ... *) - -fun groundT_thm context bound candTs thm = - let val ctxt = Context.proof_of context - val thy = Proof_Context.theory_of ctxt - val thm = Thm.varifyT_global thm - val tvars = Term.add_tvars (list_comb(Thm.concl_of thm,Thm.prems_of thm)) [] - fun ctyp_of_sort x so t = if Sign.of_sort thy (t,so) - then (SOME((x,so), Thm.ctyp_of ctxt t) - handle _ => NONE) (* FIXME avoid handle _ *) - else NONE - fun prodS [] _ = [] - |prodS (a::R) B = (map(fn x => a::x) B) @ (prodS R B) - - fun compute_instsT [] = [[]] - |compute_instsT ((x,so)::R) = prodS (List.mapPartial (ctyp_of_sort x so) candTs) - (compute_instsT R) - in - map (fn x => Drule.instantiate_normalize (x,[]) thm) - (Library.take bound (compute_instsT tvars)) - end; - -(* test - - val thm' = groundT_thm thy - (#type_range_bound(rep_testenv te)) - (#type_candicates(rep_testenv te)) - thm - - *) - - - -(* *********************************************************************** *) -(* *) -(* SOME more universal tacticals -> tctical.ML *) -(* *) -(* *********************************************************************** *) - -fun thm |$> tac = - case Seq.pull (tac thm) of - SOME (m,_) => m - |_ => error"|$> : TACTIC FAILED" - -fun TRY' tac n = TRY(tac n) (* standard combinator, - not implemented so far in Isabelle *) - -fun REPEAT' tac n = REPEAT (tac n) - -fun COND' testfun thenf elsef x prf = - if testfun x prf then thenf x prf else elsef x prf; - - -fun (k DOWNTO (n:int)) = fn tac => (fn st => - let fun doall x = if x < n - then all_tac - else ((tac x) THEN doall(x-1)) - val m = Thm.nprems_of st - in if k <= 0 orelse k > m - then all_tac st - else doall k st - end) - - -fun DEPTH_SPLIT tac n st = -(* PRE tac should only exhausting - i.e. increase - the number of subgoals finitely many times - POST if tac never fails, DEPTH_SPLIT tac will not fail. - *) - let val m = Thm.nprems_of st - val res = st |$> (tac n) - val m' = Thm.nprems_of res - in if m' > m - then ((n + m' - m) DOWNTO n)(DEPTH_SPLIT tac) (res) - else all_tac res - end - -fun SOLVE_ASMS ctxt tac n state = - let val goal = Logic.get_goal (Thm.prop_of state) n - val params = Logic.strip_params goal - val prems = map HOLogic.dest_Trueprop (rev(Logic.strip_imp_prems goal)) - val eqTrueI2 = @{lemma "[|P;P |] ==> True" by auto} - fun decomp thm n = let val mm = (thm RS conjunct1) - in (dtac ctxt (mm RS eqTrueI2) n) THEN - (etac ctxt @{thm TrueE} n) THEN - (decomp (thm RS conjunct2) n) end - handle THM _ => (dtac ctxt (thm RS eqTrueI2) n) THEN - (etac ctxt @{thm TrueE} n) - - in case prems of - [] => all_tac state - |a::r => let val prem = List.foldl HOLogic.mk_conj a r - val prem'= (Term.subst_bounds ((rev(map Free params)),prem)) - val thm = Goal.prove ctxt [] [] (HOLogic.mk_Trueprop prem') tac - in (decomp thm n) state end - handle _ => no_tac state (* could not solve asms ... *) (* FIXME avoid handle _ *) - end - - -fun auto_solver thms {prems: thm list, context: Proof.context} state = - let val thm' = state |$> (TRY(safe_tac (context delrules [notI]))) - val goals = Thm.nprems_of thm' - fun solve_thm thm = FIRSTGOAL (fn n => (rtac context thm n) THEN - (IF_UNSOLVED (SOLVE(auto_tac context)))) - fun solve_thms [] = no_tac - |solve_thms (a::R) = solve_thm a ORELSE solve_thms R - in if goals > 0 then (solve_thms thms) thm' else all_tac thm' - end - - -(* *********************************************************************** *) -(* *) -(* SOME Testing specific tacticals *) -(* *) -(* *********************************************************************** *) - -(* PRE n>0 and n <= nprems_of thm *) -fun is_THYP n thm = is_thyp(Logic.nth_prem(n, Thm.prop_of thm)) - -fun is_PO n thm = is_po(Logic.nth_prem(n, Thm.prop_of thm)) - -fun is_TEST n thm = is_neither_thyp_nor_PO(Logic.nth_prem(n, Thm.prop_of thm)) - -fun ONLY_CASES tac = COND' is_TEST tac (K all_tac) - -fun ALLCASES_before k tac = (k DOWNTO 1) (ONLY_CASES tac) - -fun ALLCASES tac thm = (ALLCASES_before (Thm.nprems_of thm) tac thm) - -fun ONLY_POS tac = COND' is_PO tac (K all_tac) - - -fun unfold ctxt rules = asm_full_simp_tac (put_simpset HOL_basic_ss ctxt addsimps rules) - -(* Optimization potential: safe_tac against something more - specialized should do ... *) -fun if_splitter ctxt cs = asm_full_simp_tac (fold Splitter.add_split @{thms if_splits} (put_simpset HOL_basic_ss ctxt)) - THEN' TRY' (fn n => SELECT_GOAL (safe_tac cs) n) - -(* *********************************************************************** *) -(* *) -(* basic tactics ... *) -(* *) -(* *********************************************************************** *) - -fun strip_tac ctxt i = REPEAT (resolve_tac ctxt [impI, allI] i); (* declared Legacy in 2013-2*) - -(* Pre : A1 ==> .. ==> An ==> An+1 - Post: A1 --> .. --> An --> An+1 *) -fun mp_fy ctxt n = (REPEAT(etac ctxt rev_mp n)); (* Version replaced by more efficient one *) -fun mp_fy ctxt n = (REPEAT(ematch_tac ctxt [rev_mp] n)); - -fun all_ify ctxt excpt n state = - let val subgoal = Logic.nth_prem(n, Thm.prop_of state) - val params = Logic.strip_params subgoal - val frees = Term.add_frees subgoal [] - fun sbs(n,t) = res_inst_tac ctxt [((("x",0), Position.none), n)] [] spec - val vars = filter_out (member (op =) excpt o fst) (params @ frees) - in (if null vars - then all_tac - else (List.foldr(fn(x,ta) => (sbs x n) THEN ta) (prune_params_tac ctxt) vars) - (* NEW PATCH rev 12197*) - THEN - all_ify ctxt excpt n (* sbs can fail to eliminate *all* variables - due to alpha-renaming of parameters in - res_inst_tac. This recursive call makes - all_ify exhaustive.*) - (* END NEW PATCH rev 12197*) - ) - (state) - end - - -(*FIXME Free's are actually local consts, and cannot be closed in general - without disrupting Isar locality*) -fun allify_frees_tac ctxt excpt = SUBGOAL (fn (subgoal, i) => - let val thy = Proof_Context.theory_of ctxt - val a = (("'a", 0), @{sort type}) - fun spec_tac (x, T) = - rtac ctxt (Thm.instantiate ([(a, Thm.ctyp_of ctxt T)], [((("x", 0), T), Thm.cterm_of ctxt (Free (x, T)))]) - (Thm.transfer thy @{thm spec})) i - val vars = filter_out (member (op =) excpt o fst) (Term.add_frees subgoal []) - in - if null vars then all_tac - else EVERY (map spec_tac vars) - end) - -fun thyp_ify ctxt n = - EVERY[mp_fy ctxt n, all_ify ctxt [] n, rtac ctxt (@{thm THYP_def} RS @{thm iffD1}) n] - -fun vars_to_frees t = let - val termvars = Term.add_vars t [] - val freenames = Term.add_free_names t [] - val varnames = map (fn ((s, a),_) => s^(Int.toString a)) termvars - val newfreenames = Name.variant_list freenames varnames - val insts = map (fn (x as (_, T), freename) => (Var x, Free (freename, T))) (termvars ~~ newfreenames) -in - Term.subst_atomic insts t -end - -fun convert_goals_to_metahyps (test:(term -> bool)) thm = - let val thy = Thm.theory_of_thm thm - in map ((fn prop => Thm.forall_elim_vars 0 (Thm.assume (Thm.global_cterm_of thy prop))) o vars_to_frees) - (filter test (Thm.prems_of thm)) - end; - - -fun instantiate_tac ctxt insts n thm = - let val goal = Thm.global_cterm_of (Thm.theory_of_thm thm) (Logic.nth_prem(n, Thm.prop_of thm)) - in (rtac ctxt (Drule.instantiate_normalize ([],insts)(Thm.trivial goal)) n -) thm end - -fun var_hyp_subst_tac ctxt no state = - let val prms = Logic.strip_imp_prems(Logic.nth_prem(no, Thm.prop_of state)); - fun is_eq (Const (@{const_name HOL.eq}, _) $ (Var _) $ _) = true - |is_eq (Const (@{const_name HOL.eq}, _) $ _ $ (Var _)) = true - |is_eq _ = false - val i = Library.find_index(fn (Const(@{const_name Trueprop},_)$X) => is_eq X) - (prms) - fun mks thm = if i >= 0 - then ((rotate_tac i no) THEN etac ctxt @{thm thin_refl} no) thm - else no_tac thm - in ((REPEAT mks) - THEN - (full_simp_tac ctxt no)) state - end; - - -(* Code seems to be superseeded by Goals.PARALLEL_GOALS in Isabelle2013-2 ... *) -(* parallel tactic application, without any sanity checks *) - -local exception FAILED of unit in - -fun PARALLEL tac st = - let - fun try_tac g = - (case SINGLE tac g of - NONE => raise FAILED () - | SOME g' => g'); - val goals = Drule.strip_imp_prems (Thm.cprop_of st); - val results = Par_List.map (try_tac o Goal.init) goals; - in all_tac st (* ALLGOALS (fn i => retrofit i 1 (nth results (i - 1))) st *) - end - handle FAILED () => Seq.empty; - -end; - - - -(* A specialized map-reduce pattern for constraint-solving in TestGen. The - PO's were extracted, attempts to their solution parallized, and the retrofit - of the results into the test-theorem is again sequential, since it instantiates - the meta-variables. *) -fun PARALLEL_TRYSOLVE_POs ctxt test_solve_tac st = - let - val pon = Config.get ctxt TestEnv.pon - fun filter_pos filter S = - let fun h _ _ [] = [] - | h f n (a::S) = if f a then (n,a)::(h f (n+1) S) else (h f (n+1) S) - fun h_pon _ _ _ [] = [] - | h_pon f n k (a::S) = if f a then (if k = pon then (n,a):: nil - else (h_pon f (n+1) (k+1) S)) - else (h_pon f (n+1) k S) - in if pon = 0 then h filter 1 S else h_pon filter 1 1 S end; - fun try_solve ct = ((Option.map (Goal.finish ctxt)) o - (SINGLE (test_solve_tac ctxt))) - (Goal.init ct); - val goals = Drule.strip_imp_prems (Thm.cprop_of st); - val po_trms = (filter_pos (is_po o Thm.term_of) goals); - val jdmts = Par_List.map (fn(n,ct) => (n, try_solve ct)) po_trms - - in Seq.succeed(foldr (fn ((k, SOME a),thm) => (a RSN (k, thm)) - |((_, NONE),thm) => thm) st jdmts) - end - - (* val PARALLEL = PARALLEL_GOALS *) (* Experiment. See above. bu *) - - -(* applies exhaustively tactic tac ctxt i on subgoal i and all its descendants. - tac can solve current goal i. - PRE: tac must eventually terminate, also in all descendants ( otherwise CLOSURE loops). - PRE: current goal i must be in {1 .. nprems_of st}. - PRE: ctxt must be the ProofContext of st. -*) -fun CLOSURE tac i st = - let fun closure i k st = if i > k then all_tac st - else let val m = Thm.nprems_of st - in case SINGLE (tac i) st of - NONE => closure (i+1) k st - | SOME st' => let val m' = Thm.nprems_of st' - in if m' = m-1 (* tac eliminated subgoal *) - then closure i (k-1) st' - else closure i (k+(m'-m)) st' - (* superfluous case distinct *) - end - end - in closure i i st end - - - -(* *********************************************************************** *) -(* *) -(* special tactics for TestGen *) -(* *) -(* *********************************************************************** *) - -fun cpat ctxt str = Thm.cterm_of ctxt (Proof_Context.read_term_pattern ctxt str); - -(*PRE : Must not be THYP or PO *) -(* Known Bug: does not work for arbirary mixtures between HOL and Pure Quantifications *) -fun uniformityI_tac ctxt prog = - (if Config.get ctxt no_uniformity - then (fn _ => all_tac) - else SELECT_GOAL - let - val cert = Thm.cterm_of ctxt; - - val core_tac = - SUBGOAL (fn (term, i) => - let fun conv_All (Const (@{const_name All}, ty) $ Abs(n, ty', tt)) = - (Const (@{const_name Ex}, ty) $ Abs (n, ty', conv_All tt)) - |conv_All (Const (@{const_name HOL.implies}, _) $ P $ Q) = - (Const (@{const_name HOL.conj}, [boolT,boolT]--->boolT) $ P $ conv_All Q) - | conv_All tt = tt; - (* val tt = dest_TruepropAll term; *) - val tt = dest_Trueprop term; - val thyp = mk_Trueprop (@{const THYP} $ (@{const HOL.implies} $ conv_All tt $ tt)); - in rtac ctxt (Thm.instantiate ([], [((("psi",0),propT), cert thyp)]) cut_rl) i end); - - val x = cpat ctxt "?x::?'a"; - val T = Thm.typ_of_cterm x; - val count = Unsynchronized.ref 0; - fun new_var () = cert (Var (("?X" ^ Int.toString (Unsynchronized.inc count) ^ "X", 0), T)); - fun exI_tac i st = rtac ctxt (Thm.instantiate ([], [((("x",0),T), new_var ())]) @{thm exI}) i st; - fun check_tac' (t,i) = case t of - @{term Trueprop} $ t' => check_tac' (t',i) - | @{term HOL.conj} $ u $ _ => if exists_prog prog u then - no_tac - else all_tac - - val check_tac = SUBGOAL check_tac' - val intro_PO = (rtac ctxt @{thm PO_I} 1) - THEN (check_tac 1) - THEN REPEAT_DETERM ((rtac ctxt @{thm PO_grow} 1) - THEN (check_tac 1)) - - val finish_tac = (intro_PO THEN (rtac ctxt @{thm "conjI"} 1) THEN (strip_tac ctxt 2)) ORELSE (strip_tac ctxt 1) - in - EVERY [mp_fy ctxt 1, all_ify ctxt prog 1, - core_tac 1, etac ctxt @{thm THYP_app1} 1, - (REPEAT_DETERM o exI_tac) 1, - finish_tac] - end) - - -(* ************************************************************************* *) -(* data separation lemma generation ... *) -(* ************************************************************************* *) - - -fun refine_data_separation_lemma changed ctxt bal prem_no cands thm = -(* - bal controls of the refinement is in balancing mode - (only parameters whose occurrence in the term is - has strictly less than maximum height) - or not. - - prem_no indicates the number of the premise - - cands is the global list of candidates. - In balancing mode, parameters in maximal depth - position were eliminated. - - PRE maxidx is 0 for all mvars - - POST maxidx is 0 for all mvars - - *) -let - val thy = Proof_Context.theory_of ctxt - val prem = Logic.nth_prem(prem_no, Thm.prop_of thm) - val params = Logic.strip_params(prem) - val cands' = if bal then - let val term = HOLogic.dest_Trueprop(hd - (Logic.strip_assums_hyp prem)) - val (k,mp) = max_depth(Term.subst_bounds - ((rev(map Free params)),term)) - in filter(fn p => forall(fn p'=> not(p=p')) (mp)) cands end - else cands; - - val var_no = find_index (fn(s,t)=> exists(fn (name,ty)=> - s = name andalso t = ty) cands') - params - val (name,ty) = if var_no > ~1 then List.nth(params,var_no) - else raise (ERROR("no param found")) - val Type(tn,_) = ty; - val m = get_case_split_of_dt thy tn; - val n = m RSN (prem_no, thm); - (* pre: all occurrences of ?indname t1 ... tn have the same - arguments and types and occur in a critical pattern : - ?indname t1 ... tn = C x1 ... xn - ... - The Thm.adjust_maxidx_thm reflects the fact, that only one - MVar y should have been left - the others are erased due to - hypsubst internally - *) - fun find_mvar_pat indname goal_no thm = - let val goal = Logic.get_goal thm goal_no - val params = Logic.strip_params goal - fun cc(Const(@{const_name Pure.imp}, _) - $ (Const(@{const_name Trueprop}, _) - $ (Const(@{const_name HOL.eq},t) $ A $ B)) - $ C) = (case strip_comb(subst_bounds( - rev(map Free params), A)) of - (Var(s,_),S) =>(if s = indname - then (map dest_Free S, head_of B) - else cc(C)) - | (_, _) => cc C) - | cc _ = error("find_mvar_pat: No pattern found") - in cc (strip_all_body goal) end - fun lifter name S = - let fun ll k [] = if 0 <=k then Bound(k) - else error"find_mvar:impossible lifter" - |ll k ((s,t)::R) = - if s = name then Abs(s,t, ll 0 R) - else Abs(s,t, ll (k+1) R) - in ll (~(length S)) S end - fun typ_of_lifter (name:string) S = - let fun tll S [] = error"find_mvar:impossible type for lifter" - |tll S ((s,t)::R) = - if name = s - then ((rev S)@(t::(map snd R)), t) - else tll (t::S) R - in (op --->)(tll [] S) end - val (lifter_params, constr) = find_mvar_pat ("y",1) prem_no (Thm.prop_of n) - (* ("y",1) is a fact from the invariant maxidx=0 (see below) *) - val subst0 = lifter name lifter_params - val ty_lift = fastype_of subst0 - val ty_constr = fastype_of constr - val subst = Thm.cterm_of ctxt subst0 - val ty_lift_body = body_type ty_lift - val ty_lift_constr = body_type ty_constr - val tyenv = Sign.typ_match thy (ty_lift_constr,ty_lift_body) Vartab.empty - handle Type.TYPE_MATCH => Vartab.empty - (* Hack that works for cases, where non-recursive positions in a type - were GROUND types (like color in RBT_tree). *) - fun tysubs2ctyps(x,(s,t)) = ((x,s),Thm.ctyp_of ctxt t) (*(Thm.ctyp_of ctxt (TVar(x,s)),Thm.ctyp_of ctxt t) *) - val tsubst = map tysubs2ctyps (Vartab.dest tyenv) - val () = (changed:=true) - val var = (("y",1),ty_lift) - val result = (Drule.instantiate_normalize (tsubst, [(var,subst)]) n) - |$> (ALLGOALS(fn n => TRY(hyp_subst_tac_thin true ctxt n))) - |$> prune_params_tac ctxt -in Thm.adjust_maxidx_thm 0 result - (* establishes POST and makes this fact therefore invariant. *) - (* More safe, but also more expensive variant: standard result. *) -end - -(* This is the core procedure of the data_separation_lemma generation, and, - in a way, the core of the test case generator itself. - - It proceeds as follows: in a given theorem thm (usually an exhaustion lemma - itself), the premisses (having the form !!x1..xn. A x1 .. xn ==> B in - general) were selected and "refined" in a "sweep". This means that in a - sweep, for each premisse an xi is selected (which must have a data-type, - say ti) and the exhaustion theorem of ti is forward resolved into this - premisse; cleaning it up (by prems_hyp_subst_tac) also erases xi as - parameter of this premisse. - In a next sweep, another xj may be selected and so forth, up to the moment - were no parameters remained in the premisse that existed before the initial - sweep (this the list parameters is the "cand" (candidates) x1 .. xj for - all premisses which precomputed.) - - Then, the candidate list may be recomputed and the process repeated up to - n times. Since any group of sweeps erases one generation of parameters, - the parameter n corresponds to "depth+1" of the data term occuring in the - premisse increases, i.e. from: - - [| ?y = Leaf ==> ?P; - !!a tree1 tree2. ?y = Node a tree1 tree2 ==> ?P |] - ==> ?P - - we get: - - [| ?y = Leaf ==> ?P; !!a. ?y = Node a Leaf Leaf ==> ?P; - !!a aa tree1 tree2a. ?y = Node a Leaf (Node aa tree1 tree2a) ==> ?P; - !!a aa tree1a tree2a. ?y = Node a (Node aa tree1a tree2a) Leaf ==> ?P; - !!a aa tree1a tree2a ab tree1 tree2b. - ?y = Node a (Node aa tree1a tree2a) (Node ab tree1 tree2b) ==> ?P - |] ==> ?P - - This picture is slightly complicated by the fact that Isabelle does not - change the parameters, but may reintroduce parameter erased by a previous - sweep during the forward resolution step. Thus, the procedure may loop. - - Instead of not erasing the parameters during lemma-refinement (this - leads to very large liftings of the meta-variables over these - parameter lists and is thus expensive), we chose to use - a particular test for "maximal" parameters in order to avoid this problem. - - *) -fun nrefine_data_separation_lemma changed ctxt 0 thm = thm - | nrefine_data_separation_lemma changed ctxt n thm = - let - val thy = Proof_Context.theory_of ctxt - val isaDataType = fn(_,ty)=> isDataType thy ty - val thm_params = maps Logic.strip_params (Thm.prems_of thm) - val cands = distinct (op=) (filter isaDataType thm_params) - fun rds b(prno,t) = refine_data_separation_lemma changed ctxt b prno cands t - handle _ => t (* FIXME avoid handle _ *) - val () = (changed:=false) - fun sweep b th = List.foldr (rds b) th (1 upto (length(Thm.prems_of th))) - val thm1 = sweep false thm (* debalancing phase *) - fun repeat_till_stable thm = let val () = (changed:=false) - val thm' = sweep true thm - in if !changed - then repeat_till_stable thm' - else thm' - end - in nrefine_data_separation_lemma changed ctxt - (n-1) - (if !changed - then thm1 (* debalancing phase had no effect *) - else repeat_till_stable thm1) (* balancing phase *) - end; - -(* NOTE: Currently, this core routine is restricted to - "real free" variables; parameters will not be - considered as a simple means to avoid looping ... *) -fun data_separation_tac ctxt var no state = - let - val thy = Proof_Context.theory_of ctxt - val depth = Config.get ctxt TestEnv.depth - val (na,Type(tn,_)) = var - fun ds_thm tn = nrefine_data_separation_lemma (Unsynchronized.ref false) - ctxt depth - (get_case_split_of_dt thy tn) - - val data_sep_thm = Drule.export_without_context (ds_thm tn) - val brchs = length(Thm.prems_of data_sep_thm) - in ((res_inst_tac ctxt [((("y",0),Position.none),na)] [] data_sep_thm no) - THEN - (res_inst_tac ctxt [((("x",0),Position.none),na), - ((("k",0),Position.none),Int.toString depth)] - [] - @{thm THYP_E_reg} - (no + brchs - 1)) - THEN (thin_tac ctxt (na^"= _") [] (no+brchs))) state - end; - -(* Copied from Isabelle2011 old_term.ML sources, for providing old variable order *) -(*Accumulates the Frees in the term, suppressing duplicates.*) -fun add_term_frees (t, frees: term list) = case t of - Free _ => Ord_List.insert Term_Ord.term_ord t frees - | Abs (_,_,body) => add_term_frees(body,frees) - | f$t => add_term_frees (f, add_term_frees(t, frees)) - | _ => frees; - -fun term_frees t = add_term_frees(t,[]); - -(* Included for complete backward compatibility *) -fun data_separation_old_tac ctxt depth no state = - let - val thy = Proof_Context.theory_of ctxt - val prem = Logic.nth_prem(no, Thm.prop_of state) - val params = Logic.strip_params(prem) - val term = HOLogic.dest_Trueprop(Logic.strip_assums_concl prem) - val frees = map dest_Free (term_frees term) -(* val cands = filter (fn (_,ty) => isDataType thy ty) (params@frees) *) - val cands = filter (fn (_,ty) => isDataType thy ty) (frees) - fun ds_thm tn = - nrefine_data_separation_lemma (Unsynchronized.ref false) ctxt depth (get_case_split_of_dt thy tn) - in (case cands of - [] => no_tac - | (na,Type(tn,_))::_ => - let val data_sep_thm = Drule.export_without_context (ds_thm tn) - val brchs = length(Thm.prems_of data_sep_thm) - in (res_inst_tac ctxt [((("y",0),Position.none),na)] [] data_sep_thm no) - THEN (res_inst_tac ctxt [((("x",0),Position.none),na), - ((("k",0),Position.none),Int.toString depth)] [] - @{thm THYP_E_reg} - (no + brchs - 1)) - THEN (thin_tac ctxt (na^"= _") [] (no+brchs)) - end) - (state) -end - - -(* "Small", non-recursive Version of gen_test_cases: - (TODO for future releases: Integration into the testgen-Kernel). - searches parameter in goal no with typename tname and performs case-split on this - parameter. Note that the type-name must be the full name. - PRE: current goal i must be in {1 .. nprems_of st}. - PRE: ctxt must be the ProofContext of st. *) - (* op member : ('a * 'b -> bool) -> 'b list -> 'a -> bool *) - - fun case_tac_typ ctxt tnames no thm = - let - val prem = Logic.nth_prem(no, Thm.prop_of thm) - val params = Logic.strip_params(prem) - val max = Thm.nprems_of thm - val params = params (* @ free variables TODO !!! *) - fun ty_search (_,Type(s,_)) = member (op =) (tnames) (s) - |ty_search (_,_) = false - fun REP tac st = let val max' = Thm.nprems_of st - in (max'-max+no DOWNTO no) tac st end - in case List.find ty_search params of - SOME (X,_) => EVERY[Induct_Tacs.case_tac ctxt X [] NONE no , - REP (hyp_subst_tac_thin true ctxt), - unfold_tac ctxt [@{thm triv_forall_equality}] ] thm - (* probably not the most efficient - try simp only *) - | NONE => no_tac thm - end; - - - -(* ************************************************************************* *) -(* normal form computation ... *) -(* *************************** ********************************************** *) - -fun to_TNF ctxt prog no state = - let - val thy = Proof_Context.theory_of ctxt; - val term = Thm.prop_of state - val prems = Logic.prems_of_goal term no - val concl = Logic.concl_of_goal term no - fun term_string_of_goal_no () = (Pretty.unformatted_string_of o (Syntax.pretty_term ctxt)) - (Logic.list_implies(prems,concl)) - val prems' = filter (exists_prog prog) prems; - val concl' = filter (exists_prog prog) [concl]; - fun subst t = (Thm.cterm_of ctxt (Var(("P",0),HOLogic.boolT)), - Thm.cterm_of ctxt t); - (* swap_tac : tactic * term -> tactic; takes one term containing - "prog" and swap it into the conclusion as disjoint *) - val swap_accumulate = @{lemma "[| P ; ~P | Q |] ==> Q" by auto} - fun swap_tac (tm,tc) = tc THEN - (Term_Tactics.eres_terminst_tac - ctxt - [] - [subst (HOLogic.dest_Trueprop tm)] - swap_accumulate - no) - (* rev_tac swaps term "a" into conclusion *) - fun rev_tac thy a = Term_Tactics.eres_terminst_tac - ctxt - [] - [subst (HOLogic.dest_Trueprop a)] - @{thm rev_notE} - no - in ((case (prems',concl') of - ([],[]) => error ("Subgoal " ^Int.toString(no) - ^" can not be converted into TNF: \n \n" ^ - term_string_of_goal_no()) - (* case that conclusion does not contain "prog", but several - assumptions do: pick one of them, revert first *) - |(a::R,[]) =>(rev_tac thy a - THEN - (List.foldl swap_tac all_tac R)) - |(R,_) => (List.foldl swap_tac all_tac R) - ) - THEN - (simp_tac (put_simpset HOL_ss ctxt) no)) state - end; - - - -fun normalize_TNF ctxt prog top = - List.foldl (fn (arg,t) => t THEN to_TNF ctxt prog arg) - all_tac - ((length (prems_of top)) downto 1) - top - - - -(* TODO: adb *) -fun minimize_TNF ctxt = distinct_subgoals_tac -(* This should do: - - sorting assumptions in all subgoals - - sorting parameters of subgoals - - sortieren der clauses - - weniger asms zuerst (defer_tac) - - eliminieren von assumptions subsumed in other clauses - [| P;R |] ==> A; - [| P;Q;R |] ==> A - ------------------ - [| P;R |] ==> A; - [| P;R |] ==> A - durch thin_tac - - eliminieren von subsumed or's, - a particular weakness of our procedure so far. - [| P;R |] ==> A; - [| ~P;Q |] ==> B - [| R; Q|] ==> A|B; - ------------------ - [| P;R |] ==> A; - [| ~P;Q |] ==> B - - finally distinct_subgoals_tac -*) - - -(* orig version *) -(* -fun gen_test_case_tac thy clasimp depth steps prog = - EVERY[ALLGOALS (asm_full_simp_tac (snd clasimp)), - safe_tac ((global_claset_of thy) addss (global_simpset_of thy)), - normalize_TNF thy prog, - minimize_TNF, - ALLGOALS (fn n => TRY(uniformityI_tac prog n))]; -*) - - - -(* ************************************************************************* *) -(* test case generation ... *) -(* ************************************************************************* *) - -fun SORT ctxt rules n thm = let - val matches = maps (fn rule => rule ctxt n thm) rules - val sorted = map (fn (i,tac) => tac) - (sort (fn (x,y) => rev_order (int_ord (fst x, fst y))) matches) -in - (EVERY sorted) thm -end - - - -fun EXEC ctxt tac str = EVERY[if Config.get ctxt profiling - then Clocks.start_clock_tac str - else all_tac, - if Config.get ctxt trace - then print_tac ctxt ("\n:"^str^"\n") - else all_tac, - tac, - if Config.get ctxt profiling - then Clocks.stop_clock_tac str - else all_tac] - -fun EXEC' ctxt tac str n = EXEC ctxt (tac n) str - -fun PRINT_THM ctxt no = - (fn state => (if Config.get ctxt trace - then let val prem = Logic.nth_prem(no, prop_of state) - val str = (Pretty.string_of o (Syntax.pretty_term ctxt)) prem - in print_tac ctxt ("\n goal "^Int.toString(no)^"::"^str^"\n") end - else all_tac) state) - -fun finalize_tac ctxt prog = - EXEC ctxt (ALLCASES(TRY'(uniformityI_tac ctxt prog))) "main uniformity NF" - -fun gen_test_case_tac ctxt prog = let - val steps = Config.get ctxt TestEnv.steps - val testenv = TestEnv.get_testenv ctxt - val rules = TestEnv.get_test_derivation_rules testenv - - val begin_profiling_tac = if Config.get ctxt profiling - then Clocks.start_clock_tac "unnamed_test_thm" THEN Clocks.start_clock_tac "gen_test_cases" - else all_tac - - val end_profiling_tac = if Config.get ctxt profiling - then Clocks.stop_clock_tac "gen_test_cases" THEN Clocks.stop_clock_tac "unnamed_test_thm" - else all_tac - - fun pre_normalize_TNF_tac ctxt = TestEnv.get_pre_normalize_TNF_tac (TestEnv.get_testenv ctxt) ctxt - - val sort_tac = SELECT_GOAL (SORT ctxt rules 1) -in - EVERY[begin_profiling_tac, - EXEC ctxt (ALLCASES((asm_full_simp_tac ctxt))) "pre-simplification", - EXEC ctxt (REPEAT_DETERM_N steps (ALLCASES sort_tac)) "main completed", - EXEC ctxt (pre_normalize_TNF_tac ctxt) "pre_norm", - EXEC ctxt (normalize_TNF ctxt prog) "TNF", - if Config.get ctxt no_finalize - then all_tac - else finalize_tac ctxt prog, - end_profiling_tac] -end - -fun data_separation_rule ctxt no state = -let - val thy = Proof_Context.theory_of ctxt - val prem = Logic.nth_prem(no, prop_of state) - (*val params = Logic.strip_params(prem) *) - val term = HOLogic.dest_Trueprop(Logic.strip_assums_concl prem) - val frees = Term.add_frees term [] - (*val cands = filter (fn (_,ty) => isDataType thy ty) (params@frees) - : LOOPS AT PRESENT SINCE gen_test_cases active on frees *) - val cands = filter (fn (_,ty) => isDataType thy ty) (frees) - fun tac cand = TRY (CHANGED_PROP (ALLCASES (data_separation_tac ctxt cand)) - THEN (ALLCASES (TRY' (hyp_subst_tac_thin true ctxt)))) - THEN (if Config.get ctxt trace - then print_tac ctxt "After data separation" - else all_tac) -in - map (fn cand => (100, tac cand)) cands -end - -fun to_HCNF_rule ctxt no state = - [(50, EXEC ctxt (TestEnv.get_pre_safe_tac (TestEnv.get_testenv ctxt) ctxt) "Simp" - THEN EXEC ctxt (TRY (safe_tac ctxt)) "HCN")] - -fun minimize_rule ctxt no state = - [(10, EXEC ctxt (TestEnv.get_pre_minimize_TNF_tac (TestEnv.get_testenv ctxt) ctxt) "pre_minimize" - THEN EXEC ctxt (minimize_TNF ctxt) "MinimTNF" - THEN (ALLCASES(asm_full_simp_tac ctxt)) - THEN (TRY (safe_tac ctxt)))] - -(* Included for complete backward compatibility *) -fun gen_test_case_old_tac ctxt prog = - let val testenv = TestEnv.get_testenv ctxt - val depth = Config.get ctxt TestEnv.depth - val steps = Config.get ctxt TestEnv.steps - - val prep_HCNF = TestEnv.get_pre_safe_tac testenv - val to_HCNF = TRY (safe_tac ctxt) - val pre_normalize_TNF_tac = TestEnv.get_pre_normalize_TNF_tac testenv - val pre_minimize_TNF_tac = TestEnv.get_pre_minimize_TNF_tac testenv - fun trace_tac b str = (if Config.get ctxt trace - then print_tac ctxt ( "\n"^b^":"^str^"\n") - else all_tac) - fun exec phase b str = EVERY[if Config.get ctxt profiling - then Clocks.start_clock_tac str - else all_tac, - phase, trace_tac b str, - if Config.get ctxt profiling - then Clocks.stop_clock_tac str - else all_tac] - - fun standard k b = - EVERY[if Config.get ctxt trace then print_tac ctxt "Enter" else all_tac, - exec (prep_HCNF ctxt) b "Simp", - exec (to_HCNF) b "HCN", - exec (pre_normalize_TNF_tac ctxt) b "pre_norm", - exec (normalize_TNF ctxt prog) b "TNF", - exec (pre_minimize_TNF_tac ctxt) b "pre_minimize", - exec (minimize_TNF ctxt) b "MinimTNF"] - val dst = data_separation_old_tac ctxt depth - - fun pairself f (x, y) = (f x, f y); - - (*True if the two theorems have the same prop field, ignoring hyps, der, etc.*) - val eq_thm_prop = op aconv o pairself Thm.full_prop_of; - - fun main k st =(if k < steps (* not yet all frees per goal - less steps exhausted*) - then (ALLCASES(fn n => TRY(dst n) )) THEN - (* try exhaustion per goal *) - (COND(fn st' => not (eq_thm_prop(st, st'))) - (* something 's changed *) - ( (ALLCASES (fn n => TRY((hyp_subst_tac_thin true ctxt) n) ) ) (* REALLY ? *) - THEN (standard k "1") - THEN (main (k+1)) - ) - (standard k "2")) - else standard k "0" (* run out of steps *)) (st) - val begin_profiling_tac = if Config.get ctxt profiling then - Clocks.start_clock_tac "unnamed_test_thm" THEN Clocks.start_clock_tac "gen_test_cases" - else - all_tac - - val end_profiling_tac = if Config.get ctxt profiling then - Clocks.stop_clock_tac "gen_test_cases" THEN Clocks.stop_clock_tac "unnamed_test_thm" - else - all_tac - in EVERY[begin_profiling_tac, - exec (ALLCASES((asm_full_simp_tac ctxt))) "" "pre-simplification", - exec (main 0) "" "main completed", - exec (ALLCASES(TRY'(uniformityI_tac ctxt prog))) "" "main uniformity NF", - end_profiling_tac] - end; - - -(* ************************************************************************* *) -(* *) -(* testdata generation ... *) -(* *) -(* ************************************************************************* *) - - -fun abs_data_tac ctxt atdata = - let fun single_abs_data_tac thm = - Subgoal.FOCUS_PARAMS(fn {context, ...} => - SOLVE (Method.insert_tac context [thm] 1 THEN auto_tac context)) ctxt - in - FIRST' (map single_abs_data_tac atdata) - end - - -fun test_solve_tac ctxt atdata = - let val thy = Proof_Context.theory_of ctxt - val remove_po = EqSubst.eqsubst_tac ctxt [0] [@{thm PO_def}] - val total_iterations = Config.get ctxt TestEnv.iterations - val max_simple_iterations = 50 - val simple_iterations = Int.min(total_iterations, max_simple_iterations) - val further_iterations = Int.max(total_iterations - max_simple_iterations,0) - in - remove_po - THEN' PRINT_THM ctxt - THEN' (FIRST'[EXEC' ctxt (abs_data_tac ctxt atdata) "abs_data", - EXEC' ctxt (RandomBackend.random_inst_tac ctxt simple_iterations) "random_inst", - EXEC' ctxt (QuickCheckBackend.quickcheck_tac ctxt further_iterations) "quickcheck", - EXEC' ctxt (SMTBackend.testgen_smt_tac ctxt) "smt"]) - end - - - -fun is_solved n thm = - let fun is_unsolved_po i = not (null (inter (op =) (BackendUtils.premvars n thm) (BackendUtils.premvars i thm))) - andalso i <> n - in not (exists is_unsolved_po (1 upto (nprems_of thm))) - end - -(* Instantiate the remaining variables in test cases -with random terms *) - -fun finalize_test_tac ctxt n thm = let -in - (COND (is_solved n) (RandomBackend.single_rand_inst_tac ctxt (BackendUtils.premvars n thm)) all_tac) thm -end - - - -fun solve_all ctxt atdata state = - let (* adding a let expression.. in order to check if all free vars were instatiated by smt.. - if true then error code else solve_all*) - val PARALLEL_TRYSOLVE = PARALLEL_TRYSOLVE_POs ctxt (fn ctxt => test_solve_tac ctxt atdata 1) state - - val term = PARALLEL_TRYSOLVE |> Seq.list_of |> - (List.map prop_of) |> HOLogic.mk_list @{typ "term"} - val use_smt = Config.get ctxt TestEnv.use_smt; -in - if (use_smt andalso (Term.exists_subterm (Term.is_Var) term)) - then error("One or more free variables were not instantiated by the solver!") - - else state |$> PARALLEL_TRYSOLVE_POs ctxt (fn ctxt => test_solve_tac ctxt atdata 1) - |$> ALLCASES (finalize_test_tac ctxt) (* cannot parallelize here *) - -end - - - (* *********************************************************************** *) - (* Normalizer *) - (* *********************************************************************** *) - (* motivation: type instantiation may make - constraints of test-cases unsolvable. - Unsolvable cases should be removed before random-solving. - Since constraints with Mvars were not considered - by simptac and arith_tac, they were substituted - against arbitrary free vars aforehand. - -fun norm_tac ctxt n thm = - let val thy = Proof_Context.theory_of ctxt - val m = Thm.nprems_of thm - val prem = Logic.nth_prem(n, prop_of thm) - val k = length (Logic.strip_imp_prems(prem)) - val goal = Thm.trivial(cterm_of thy prem) - val tvars = OldTerm.term_vars prem - val insts = map(fn(x as Var((s,a),t))=> - (cterm_of thy x, - cterm_of thy (Free("XXX"^s^(Int.toString a),t)))) - (tvars) - val cleanup = List.foldr (fn (_,tac) => (atac (n)) THEN tac) - all_tac (1 upto k) - in ( - (rtac (instantiate ([],insts) goal) n) - THEN - (full_simp_tac (simpset_of ctxt) n) (* may erase goal n. - --> test case unsolvable. *) - THEN - (fn thm' => if (nprems_of thm' < m+k) (* goal erased *) - then cleanup thm' - else no_tac thm )(* report failure *) - )(thm) - end *) - -fun gen_test_data name context = - let val ctxt = Context.proof_of context - val te = TestEnv.get_testenv ctxt - val bound = Config.get ctxt TestEnv.type_range_bound - val candTs= #type_candicates(TestEnv.rep_testenv te) - val type_grounds = groundT_thm context bound candTs - val testthm = (the(Symtab.lookup(TestEnv.get_test_thm_tab te)name) - handle Option.Option => error("No test theorem: "^name^" available!")) - val atdata = (the(Symtab.lookup(TestEnv.get_absdata_tab te)name) - handle Option.Option => []) - val jdmts = map (solve_all ctxt atdata) (type_grounds testthm) - val te1 = TestEnv.test_thm_inst_tab_update (Symtab.update(name,jdmts)(TestEnv.get_test_thm_inst_tab te)) te - val prems = List.concat (map Thm.prems_of jdmts) - val data = map (Thm.cterm_of ctxt) (filter is_test_data prems) - val te2 = TestEnv.set_test_data_tab (name,data) te1 - val hyps = map (Thm.cterm_of ctxt) (filter is_thyp prems) - val te3 = TestEnv.set_test_thyps_tab (name,hyps) te2 - val pos = map (Thm.cterm_of ctxt) (filter is_po prems) - val te4 = TestEnv.set_unsolved_PO_tab (name,pos) te3 - - val t = LogThy.get_td_delta () - val _ = writeln (String.concat ["Test theorem (gen_test_data) '",name,"': ", - Int.toString (List.length data)," test cases in ", - Time.toString t, " seconds"]) - val _ = if not (null pos) - then writeln (String.concat ["Warning: There were ", Int.toString (List.length pos) , " unsolved POs."]) - else () - val _ = LogThy.append (String.concat [ - Context.theory_name (Proof_Context.theory_of ctxt), ", " ,name, ", ", - "test data, " ,Int.toString (List.length data), - ", " ,Time.toString t,"\n"]) - - in TestEnv.map_data (K te4) context end; - -fun select_goals pred thy name = let - val te = TestEnv.get_testenv_global thy -in - maps (convert_goals_to_metahyps pred) - (the(Symtab.lookup(TestEnv.get_test_thm_inst_tab te)name) - handle Option.Option => error("No data statement: "^name^" available!")) -end - -val get_test_data = select_goals is_test_data - -val get_test_hyps = select_goals is_thyp - -val get_pos = select_goals is_po - -fun get_test_thm thy name = let - val te = TestEnv.get_testenv_global thy -in - (the(Symtab.lookup(TestEnv.get_test_thm_inst_tab te)name) - handle Option.Option => error("No data statement: "^name^" available!")) -end - -fun discharge_POs name context = - let fun use_test_instances_tac name ctxt = - let val te = TestEnv.get_testenv ctxt - val atdata = (the(Symtab.lookup(TestEnv.get_absdata_tab te)name) - handle Option.Option => []) - in (TRY' ( EqSubst.eqsubst_tac ctxt [0] [@{thm PO_def}] - THEN' abs_data_tac ctxt atdata)) - end - in ALLGOALS(ONLY_POS (use_test_instances_tac name context)) end - -(* *********************************************************************** *) -(* *) -(* Initial test environment *) -(* *) -(* *********************************************************************** *) - -val _ = Context.>> (TestEnv.map_data - ((TestEnv.pre_safe_tac_update (fn ctxt => ALLCASES (asm_full_simp_tac ctxt))) - #> (TestEnv.add_test_derivation_rule data_separation_rule) - #> (TestEnv.add_test_derivation_rule to_HCNF_rule) - #> (TestEnv.add_test_derivation_rule minimize_rule))); - -val mt_testenv = TestEnv.get_testenv (Context.the_local_context ()); - -val _ = - Outer_Syntax.command @{command_keyword "print_testenv"} "print test environment" - (Parse.name >> (fn name => Toplevel.keep ((TestEnv.print_testenv name) o Toplevel.context_of))); - -val options = Scan.optional (Parse.$$$ "(" |-- Parse.!!! (Scan.option Parse.nat--| Parse.$$$ ")")) (NONE); - -val _ = let fun print name opts ctxt = writeln (TestEnv.print_test_data ctxt name opts) in - Outer_Syntax.command @{command_keyword "print_conc_tests"} "print concrete tests" - (options -- Parse.name >> (fn (opts, name) => - Toplevel.keep ((print name opts) o Toplevel.context_of))) - end; - -val _ = let fun print name opts ctxt = writeln (TestEnv.print_test_case ctxt name opts) in - Outer_Syntax.command @{command_keyword "print_abs_tests"} "print abstract tests" - (options -- Parse.name >> (fn (opts, name) => - Toplevel.keep ((print name opts) o Toplevel.context_of))) - end; - -val _ = let fun print name opts ctxt = writeln (TestEnv.print_test_hyps ctxt name opts) in - Outer_Syntax.command @{command_keyword "print_thyps"} "print test hypothesis" - (options -- Parse.name >> (fn (opts, name) => - Toplevel.keep ((print name opts) o Toplevel.context_of))) - end; - -val _ = let fun print name opts ctxt = writeln (TestEnv.print_unsolved_pos ctxt name opts) in - Outer_Syntax.command @{command_keyword "print_upos"} "print unsolved proof obligations" - (options -- Parse.name >> (fn (opts, name) => - Toplevel.keep ((print name opts) o Toplevel.context_of))) - end; - - -(* ************************************************************************* *) -(* micellaneous functions ... *) -(* ************************************************************************* *) - - -fun animate thy name = (* HACK - very simplistic implementation *) - let val ctxt = Proof_Context.init_global thy |> Config.put simp_trace true - fun animate_data_statement thm = - (Output.writeln ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"; - Pretty.writeln (Thm.pretty_thm ctxt thm); - Output.writeln "============================================"; - (* asm_full_simplify (global_simpset_of(theory_of_thm(thm))) thm; *) - asm_full_simplify ctxt thm; - Output.writeln "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"); - in - List.app animate_data_statement (get_test_data thy name) - end; - -(* module setup *) - -val setup = - trace_setup #> - no_uniformity_setup #> - profiling_setup #> - no_finalize_setup; - -end (*struct *); - -*} - -setup {* TestEnv.setup #> TestGen.setup *} -text{* A key concept of HOL/TestGen are \emph{test suites}. These can be seen as a kind of -container data-structure that relate to a test-specification a number of data, namely the -test theorem (the TNF), abstract and concrete test data, and specific configuration data. -Test-suites were created at the end of a test-specification statement and initial test-refinement. -Later on they were enriched and finally used to generate test drivers and test executions. - -So, a test suite with name "X" will be finally run at a test-execution executed outside HOL/TestGen -started by the shell-command "run_X". -*} - -ML{* - -fun mk_test_suite name = Toplevel.end_proof (fn _ => fn state => - let - val ctxt = Proof.context_of state; - val _ = if Config.get ctxt TestGen.profiling - then (Clocks.start_clock "unnamed_test_thm"; - Clocks.rename_clock "unnamed_test_thm" name; - Clocks.stop_clock name) - else () - - val _ = Proof.assert_backward state; - val {context = goal_ctxt, goal, ...} = Proof.goal state - fun up_thy thy = - let - fun add_to_dm name thm thy = - let - val thy = TestEnv.map_data_global (TestEnv.add_test_case (name,thm)) thy - val t = LogThy.get_tc_delta () - val num_of_tc = List.length - (List.filter (fn tc => TestGen.is_neither_thyp_nor_PO tc) (Thm.prems_of thm)) - val _ = writeln (String.concat ["Test theorem (gen_test_cases) '", - name,"': ", Int.toString num_of_tc, - " test cases in ",Time.toString t, - " seconds"]) - - val _ = LogThy.append (String.concat - [Context.theory_name thy, ", ", - name, ", " ,"test case, " ,Int.toString num_of_tc, - ", ", Time.toString t,"\n"]) - in - thy - end - in - thy |> Sign.add_path (space_implode "_" [name]) - |> (Global_Theory.add_thms [((@{binding test_thm}, goal), [])]) - |> snd - |> Sign.parent_path - |> add_to_dm (space_implode "_" [name]) goal - |> Clocks.flush_clocks - end - - in goal_ctxt |> Local_Theory.background_theory up_thy end); - - -val _ = Outer_Syntax.command - @{command_keyword mk_test_suite} - "store test state (theorem)" - (Parse.name >> mk_test_suite); - - - -(**********************) -fun gen_test_dataT name thy = - let val _ = LogThy.start_td_timer () - val _ = if Config.get_global thy TestGen.profiling - then (Clocks.init_clocks thy;Clocks.start_clock name;Clocks.start_clock "gen_test_data") - else () - - val thy = Context.theory_map (TestGen.gen_test_data name) thy (* call of the core-routine*) - - val t = LogThy.get_td_delta () - val thy = if Config.get_global thy TestGen.profiling - then (Clocks.stop_clock "gen_test_data"; Clocks.stop_clock name; Clocks.flush_clocks thy) - else thy - - val thy = Sign.add_path (space_implode "_" [name]) thy; - val thm = TestGen.get_test_thm thy name - val thy = snd(Global_Theory.add_thmss [((@{binding test_thm_inst},thm),[])] (thy)) - - val thy = Sign.parent_path thy; - in - thy - end - -val _ = Outer_Syntax.command - (*@{command_spec "gen_test_data"}*) - @{command_keyword gen_test_data} - "generate test data" - (Parse.name >> (Toplevel.theory o gen_test_dataT)); - -(**********************) - -val general_statement = - Parse_Spec.statement >> (fn x => ([], Element.Shows x)) || - Parse_Spec.long_statement; - -val _ = Outer_Syntax.local_theory_to_proof - @{command_keyword test_spec} - "define test specification" - (Scan.optional (Parse_Spec.opt_thm_name ":" --| - Scan.ahead (Parse_Spec.includes >> K "" || Parse_Spec.long_statement_keyword)) - Binding.empty_atts -- - Scan.optional Parse_Spec.includes [] -- - general_statement >> - (fn ((a, includes), (elems, concl)) => fn lthy => - let val _ = if Config.get lthy TestGen.profiling - then Clocks.init_clocks (Proof_Context.theory_of lthy) - else () - in - Specification.schematic_theorem_cmd true - "test_spec" NONE (K I) a includes elems concl false lthy - end)); - -*} -ML{* -fun set_wrapper str H src = - let val txt = Input.text_of src - val range = Input.range_of src - val txt' = str ^ "(" ^ txt ^ ")" - in H (Input.source true txt' range) end; - -(* BU : TO BE REVISED BY BU TO 2016. WAS SORT OF A HACK ANYWAY. - Chantal: did revise it; please double check*) -val _ = Outer_Syntax.command - (*@{command_spec "set_pre_safe_tac"}*) - @{command_keyword set_pre_safe_tac} - "configure gen_test_gen: set pre_safe_tac" - (Parse.ML_source >> (Toplevel.theory o - (set_wrapper - "(TestEnv.map_data_global o TestEnv.pre_safe_tac_update)" - Isar_Cmd.setup))); - - -val _ = Outer_Syntax.command - (*@{command_spec "set_pre_normalize_TNF_tac"}*) - @{command_keyword set_pre_normalize_TNF_tac} - "configure gen_test_gen: pre_normalize_TNF_tac" - (Parse.ML_source >> (Toplevel.theory o - (set_wrapper - "(TestEnv.map_data_global o TestEnv.pre_normalize_TNF_tac_update)" - Isar_Cmd.setup))) - -val _ = Outer_Syntax.command - (*@{command_spec "set_pre_minimize_TNF_tac"}*) - @{command_keyword set_pre_minimize_TNF_tac} - "configure gen_test_gen: set pre_minimize_TNF_tac" - (Parse.ML_source >> (Toplevel.theory o - (set_wrapper - "(TestEnv.map_data_global o TestEnv.pre_minimize_TNF_tac_update)" - Isar_Cmd.setup))) - - -fun pretty_cterm_style_generic f ctxt (style, (name,pos:Position.T)) = - let val termS = case (Symtab.lookup (f (TestEnv.get_testenv ctxt)) name) of - SOME k => List.map (style o Thm.term_of) k (* (nth(k,0)) *) - | NONE => error "No such test suite" - in Pretty.big_list "\\\\" (List.map (Thy_Output.pretty_term ctxt) ( termS)) end - -val pretty_thyp_style = pretty_cterm_style_generic TestEnv.get_test_thyps_tab -val pretty_conctest_style = pretty_cterm_style_generic TestEnv.get_test_data_tab -val pretty_uPO_style = pretty_cterm_style_generic TestEnv.get_unsolved_PO_tab - -(* code copy from Thy_Output (2016) since this is not exported there ... *) - -fun basic_entities name scan pretty = - Thy_Output.antiquotation name scan (fn {source, context = ctxt, ...} => - Thy_Output.output ctxt o Thy_Output.maybe_pretty_source pretty ctxt source); - -fun basic_entity name scan = basic_entities name (scan >> single); - -(* end copy *) - -val _ = Theory.setup -( (basic_entity @{binding thyps} (Term_Style.parse -- (Scan.lift (Parse.position Args.name))) - pretty_thyp_style) #> - (basic_entity @{binding uPOs} (Term_Style.parse -- (Scan.lift (Parse.position Args.name))) - pretty_uPO_style) #> - (basic_entity @{binding conctests} (Term_Style.parse -- (Scan.lift (Parse.position Args.name))) - pretty_conctest_style)) - ; -*} - - -attribute_setup test_inst = - {* Scan.lift Args.name >> (fn name => - Thm.declaration_attribute (fn thm => - TestEnv.map_data (TestEnv.add_abstest_data (name, thm)))) *} - "declare theorems for test case generation" - -attribute_setup testgen_type_candidates = - {* Scan.repeat1 Args.typ >> - (fn Ts => - Thm.declaration_attribute (fn _ => - TestEnv.map_data (TestEnv.type_candidates_update Ts))) *} - "declare testgen type candidates" - - -method_setup discharge_POs = - {* Scan.lift (Scan.option(Parse.string) >> - (fn params => fn ctxt => - let val _ = LogThy.start (); - val name = (case params of - NONE => "" - | SOME (s) => s); - in Method.SIMPLE_METHOD (CHANGED_PROP - (TestGen.discharge_POs name ctxt)) - end)) *} - "eliminate POs by test instances" - -method_setup gen_test_cases = - {* Scan.lift (Scan.option (Parse.nat -- Parse.nat) - -- Scan.repeat1 (Scan.unless (Scan.first clasimp_modifiers) Args.name)) - --| Method.sections clasimp_modifiers >> - (fn (params, prog) => fn ctxt => - let val _ = LogThy.start (); - val ctxt' = (case params of - NONE => ctxt - | SOME (depth, steps) => ctxt - |> Config.put TestEnv.depth depth - |> Config.put TestEnv.steps steps); - in Method.SIMPLE_METHOD (CHANGED_PROP - (TestGen.gen_test_case_tac ctxt' prog)) - end) *} - "generate symbolic test cases" - -method_setup gen_test_cases_old = - {* Scan.lift (Scan.option (Parse.nat -- Parse.nat) -- - Scan.repeat1 (Scan.unless (Scan.first clasimp_modifiers) Args.name)) --| - Method.sections clasimp_modifiers >> - (fn (params, prog) => fn ctxt => - let val _ = LogThy.start (); - val ctxt' = (case params of - NONE => ctxt - | SOME (depth, steps) => ctxt - |> Config.put TestEnv.depth depth - |> Config.put TestEnv.steps steps); - in Method.SIMPLE_METHOD (CHANGED_PROP - (TestGen.gen_test_case_old_tac ctxt' prog)) - end) - *} - "old variant of gen_test_cases" - -ML{* fn ctxt => HEADGOAL o (K(TestGen.thyp_ify ctxt ))*} -ML{* fn ctxt => METHOD (HEADGOAL o (K(TestGen.thyp_ify ctxt )))*} - - - -ML{* -val _ = - Theory.setup - (Method.setup @{binding thyp_ify} - (Attrib.thms >> (fn _ => fn ctxt => METHOD (HEADGOAL o (K(TestGen.thyp_ify ctxt ))))) - "making testhyps explicit" #> - Method.setup @{binding mp_ify} - (Attrib.thms >> (fn _ => fn ctxt => METHOD (HEADGOAL o (K(TestGen.mp_fy ctxt )) ))) - "fast destruction matching" #> - Method.setup @{binding all_ify} - (Attrib.thms >> (fn _ => fn ctxt => METHOD (HEADGOAL o (K(TestGen.all_ify ctxt [])) ))) - "replace all free variables by quantifiers") -*} - -end diff --git a/src/test-gen/src/test/TestLib.thy b/src/test-gen/src/test/TestLib.thy deleted file mode 100644 index b3ccc0a..0000000 --- a/src/test-gen/src/test/TestLib.thy +++ /dev/null @@ -1,57 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * Testing.thy --- wiring everything together. - * This file is part of HOL-TestGen. - * - * Copyright (c) 2005-2007 ETH Zurich, Switzerland - * 2008-2012 Achim D. Brucker, Germany - * 2009-2012 Université Paris-Sud, France - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id:$ *) - -chapter {* Summing up the Library *} - -theory - TestLib -imports - Monads - Automata - Interleaving - Observers - EFSM_Toolkit - TestRefinements -begin - -end diff --git a/src/test-gen/src/test/TestRefinements.thy b/src/test-gen/src/test/TestRefinements.thy deleted file mode 100644 index 4fa18cf..0000000 --- a/src/test-gen/src/test/TestRefinements.thy +++ /dev/null @@ -1,248 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * TestRefinements.thy --- for sequence testing. - * This file is part of HOL-TestGen. - * - * Copyright (c) 2005-2007, ETH Zurich, Switzerland - * 2009 B. Wolff, Univ. Paris-Sud, France - * 2009 Achim D. Brucker, Germany - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id:$ *) - -chapter {* Test-Refinements (IOCO and Friends) *} - -theory TestRefinements imports Monads Automata -begin - - - -text{* converts infinite trace sets to prefix-closed sets of finite traces, -reconciling the most common different concepts of traces ... *} - -consts cnv :: "(nat \ '\) \ '\ list" -(* NAIV !!! *) - -(* I WOULD PREFER A SYMMETRIC NOTION OF REFINEMENT ... However, this makes the use -of mbind less clear and evident. Sufficient conditions when these two things -are equivalent need to be explored ... *) - - -definition input_refine :: - "[('\,'o,'\) det_io_atm,('\,'o,'\) ndet_io_atm] \ bool" -where - input_refine_def: - "input_refine I SP = - (({det_io_atm.init I} = ndet_io_atm.init SP) \ - (\ t \ cnv ` (in_trace `(etraces SP)). - (det_io_atm.init I) - \ (os \ (mbind t (det_io_atm.step I)) ; - return(length t = length os))))" -notation input_refine ("(_/ \\<^sub>I _)" [51, 51] 50) - -text{* This testing notion essentially says: whenever we can run an -input sequence succesfully on the PUT (the program does not throw an exception), -it is ok. *} - -definition input_output_refine :: - "[('\,'o,'\) det_io_atm,('\,'o,'\) ndet_io_atm] \ bool" -where - input_output_refine_def: - "input_output_refine i s \ - ({det_io_atm.init i} = ndet_io_atm.init s) \ - (\ t \ prefixes (etraces s). - (det_io_atm.init i) - \ (os \ (mbind (map fst t) (det_io_atm.step i)); - return((map snd t) = os)))" - -notation input_output_refine ("(_/ \\<^sub>IO _)" [51, 51] 50) -text{* Our no-frills-approach to I/O conformance testing: -no quiescense, and strict alternation between input and output. *} - -text{* PROBLEM : Tretmanns / Belinfantes notion of ioco is inherently -different from Gaudels, which is refusal based. -See definition in accompanying IOCO.thy file: -\begin{verbatim} - -definition ioco :: "[('\,'o option,'\)IO_TS,('\,'o option,'\)IO_TS] \ bool" - (infixl "ioco" 200) - "i ioco s \ (\ t \ Straces(s). - out i (i after t) \ out s (s after t))" - -\end{verbatim} - -*} - - -definition after :: "[('\, 'o, '\) ndet_io_atm, ('\ \ 'o) list] \ '\ set" - (infixl "after" 100) -where "atm after l = {\' . \ t \ rtraces atm. (\' = fst(t (length l)) \ - (\ n \ {0 .. (length l) - 1}. l!n = fst(snd(t n))))}" - -definition out :: "[('\, 'o, '\) ndet_io_atm,'\ set, '\] \ 'o set" -where "out atm ss \ = {a. \\ \ ss. \\'. (a,\') \ ndet_io_atm.step atm \ \}" - -(* OLD: -definition ioco :: "[('\, 'o, '\) det_io_atm,('\, 'o, '\) ndet_io_atm] \ bool" - (infixl "ioco" 200) -where "i ioco s = (({det_io_atm.init i} = ndet_io_atm.init s) \ - (\ t \ cnv ` (etraces s). - \ \ \. - ((det_io_atm.init i) - \ (os \ (mbind (map fst t @ [\]) (det_io_atm.step i)) ; - return(os = map snd t @ [\]))) - \ - \ \ out s (s after t)))" *) - -definition ready :: "[('\, 'o, '\) ndet_io_atm,'\ set] \ '\ set" -where "ready atm ss = {\. \\ \ ss. ndet_io_atm.step atm \ \ \ {}}" - -definition ioco :: "[('\,'o,'\)ndet_io_atm, ('\,'o,'\)ndet_io_atm] \ bool" - (infixl "ioco" 200) -where "i ioco s = (\ t \ prefixes(etraces s). - \ \ \ ready s (s after t). out i (i after t) \ \ out s (s after t) \)" - -definition oico :: "[('\,'o,'\)ndet_io_atm,('\,'o,'\)ndet_io_atm] \ bool" - (infixl "oico" 200) -where "i oico s = (\ t \ prefixes(etraces s). - ready i (i after t) \ ready s (s after t))" - -definition ioco2 :: "[('\,'o,'\)ndet_io_atm, ('\,'o,'\)ndet_io_atm] \ bool" - (infixl "ioco2" 200) -where "i ioco2 s = (\ t \ eprefixes (ndet_io_atm.step s) (rtraces s). - \ \ \ ready s (s after t). - out i (i after t) \ \ out s (s after t) \)" - -definition ico :: "[('\, 'o, '\) det_io_atm,('\, 'o, '\) ndet_io_atm] \ bool" - (infixl "ico" 200) -where "i ico s = (\ t \ prefixes(etraces s). - let i' = det2ndet i in ready i' (i' after t) \ ready s (s after t))" - -lemma full_det_refine: "s = det2ndet s' \ - (det2ndet i) ioco s \ (det2ndet i) oico s \ input_output_refine i s" -apply(safe) -oops - -definition ico2 :: "[('\,'o,'\)ndet_io_atm,('\,'o,'\)ndet_io_atm] \ bool" - (infixl "ico2" 200) -where "i ico2 s \ \ t \ eprefixes (ndet_io_atm.step s) (rtraces s). - ready i (i after t) \ ready s (s after t)" - - -text{* There is lots of potential for optimization. -\begin{itemize} -\item only maximal prefixes -\item draw the $\omega$ tests inside the return -\item compute the $\omega$ by the $\ioprog$, not quantify over it. -\end{itemize} -*} - -section{* Generic Monadic Test Refinement Notions *} - -text{* Generic Notion *} - -definition monadic_test_refinement :: - "('\ \ ('o, '\)MON\<^sub>S\<^sub>E) \ - '\ set \ (* state invariant *) - '\ list set \ (* coverage or test-purpose *) - ('\ list \ 'o list \ '\ \ bool) \ (* post-cond *) - ('\ \ ('o, '\)MON\<^sub>S\<^sub>E) \ bool" - ("_ \\<^sub>\_,_,_\<^sub>\ _" [100,100,100,100,100]101) -where - - - "(MODEL \\<^sub>\Init,CovCrit,conf\<^sub>\ SUT) = - (\ \\<^sub>0 \ Init. \ \s \ CovCrit. \ res. - (\\<^sub>0 \ (os \ mbind \s MODEL; return (conf \s os res))) - \ - (\\<^sub>0 \ (os \ mbind \s SUT; return (conf \s os res))))" - -subsection{* Inclusion Test Refinements *} - -definition incl_ref :: "('\ \ ('o, '\)MON\<^sub>S\<^sub>E) \ - '\ set \ (* state invariant *) - '\ list set \ (* coverage or test-purpose *) - ('\ \ ('o, '\)MON\<^sub>S\<^sub>E) \ bool" - ("_ \\<^sub>I\<^sub>\_,_\<^sub>\ _" [100,100,100,100]101) -where "(S \\<^sub>I\<^sub>\Init,CC\<^sub>\ I) = (S \\<^sub>\Init,CC,(\ is os x. length is = length os \ os=x)\<^sub>\ I)" - - -lemma inclusion_test_I : - "(\\\<^sub>0 \s res. - \\<^sub>0 \ Init \ - \s \ CC \ - \\<^sub>0 \ ( os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s S; unit\<^sub>S\<^sub>E (length \s = length os \ os = res)) - \ - \\<^sub>0 \ ( os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s I; unit\<^sub>S\<^sub>E (length \s = length os \ os = res))) - \ (S \\<^sub>I\<^sub>\Init,CC\<^sub>\ I)" -unfolding incl_ref_def monadic_test_refinement_def -by auto - -lemma inclusion_test_I_opt : - "(\\\<^sub>0 \s res. - \\<^sub>0 \ Init \ - \s \ CC \ - \\<^sub>0 \ ( os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s S; unit\<^sub>S\<^sub>E (os = res)) - \ - \\<^sub>0 \ ( os \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s I; unit\<^sub>S\<^sub>E (os = res))) - \ (S \\<^sub>I\<^sub>\Init,CC\<^sub>\ I)" -apply(rule inclusion_test_I) -apply(subst Monads.mbindFSave_vs_mbindFStop) -apply(subst (asm) Monads.mbindFSave_vs_mbindFStop) -by(auto) - - - -subsection{* Inclusion Test Refinements with Abort *} - -text{* The abort scenario allows operations to fail, so violations - to input-enabledness are permitted. The model assumes that a particular - abort-semantics is respected: the operation is assumed to fail explicitly, - \ie{} observably by the tester, and to leave the state unchanged. *} - - -definition incl_aborts_ref :: "('\ \ ('o, '\)MON\<^sub>S\<^sub>E) \ - '\ set \ (* state invariant *) - '\ list set \ (* coverage or test-purpose *) - ('\ \ ('o, '\)MON\<^sub>S\<^sub>E) \ bool" - ("_ \\<^sub>I\<^sub>A\<^sub>\_,_\<^sub>\ _" [100,100,100,100]101) -where "(S \\<^sub>I\<^sub>A\<^sub>\Inv,IS\<^sub>\ I) = (S \\<^sub>\Inv,IS,(\ is os x. os=x)\<^sub>\ I)" - - -(* TODO : Deadlock Refinement *) - -(* TODO : Monadic IOCO *) - - -end diff --git a/src/test-gen/src/test/TestScript.thy b/src/test-gen/src/test/TestScript.thy deleted file mode 100644 index 6cf2e42..0000000 --- a/src/test-gen/src/test/TestScript.thy +++ /dev/null @@ -1,164 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * TestScript.thy --- Test Script and Test Data exporter - * This file is part of HOL-TestGen. - * - * Copyright (c) 2011-2015 Achim D. Brucker, Germany - * 2016-2017 The University of Sheffield, UK - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id: TestScript.thy 13147 2017-08-18 19:42:12Z brucker $ *) - - -theory TestScript -imports TestGen - "codegen_fsharp/code_fsharp" -keywords "generate_test_script" :: thy_decl - and "export_test_data" :: diag -begin - -code_printing constant "RSF" \ - (SML) "TestHarness.rsf" - and (Fsharp) "TestHarness.rsf" - and (Scala) "TestHarness.rsf" - -datatype Lazy = "lazy" bool - -code_printing constant "lazy" \ - (SML) "fn () => (_)" - and (Fsharp) "lazy ((_))" - and (Scala) "() => ((_))" - -code_printing type_constructor Lazy \ - (SML) "(unit -> bool)" - and (Fsharp) "Lazy" - and (Scala) "() => Boolean" - -code_printing code_module "Header" \ (SML) -{* - (********************************************************) - (* This file is generated automatically. Do not edit *) - (********************************************************) -*} - -ML{* -open List; - fun gen_ts_def dataname thy = let - - val tsname = "test_script" - val fqtsname = (Context.theory_name thy)^"."^dataname^"."^tsname - val tsbnd = Binding.name tsname - val tstype = HOLogic.listT (HOLogic.mk_tupleT[HOLogic.listT @{typ "Lazy"},@{typ "Lazy"}]) - - val (td,_) = (TestGen.get_test_data thy dataname,thy); - - fun def const bnd term thy = snd (Global_Theory.add_defs false [((bnd, - (Logic.mk_equals (const, term))),[Code.add_default_eqn_attribute Code.Equation])] thy) - - val prop_terms = List.map (fn p => let val _ $ ret = Thm.concl_of p in ret end) td; - val prems_terms = map (fn data => (map (fn p => let val _ $ ret = p in ret end) - (Thm.prems_of data))) td; - val spec = map2 (fn x => fn y => (x,y)) prems_terms prop_terms - - fun mk_abs t = Const("TestScript.Lazy.lazy",@{typ "bool"} --> @{typ "Lazy"})$t - - val ts_term = HOLogic.mk_list (HOLogic.mk_tupleT[HOLogic.listT @{typ "Lazy"},@{typ "Lazy"}]) - (map (fn (a,b) => HOLogic.mk_tuple [HOLogic.mk_list @{typ "Lazy"} - (map mk_abs a), mk_abs b]) spec) - in - thy |> (Sign.add_path (space_implode "_" [dataname])) - |> Sign.add_consts [(tsbnd, tstype, NoSyn)] - |> def (Const(fqtsname,tstype)) (tsbnd) ts_term - |> (Sign.parent_path) - end - -*} - - -ML {* (Outer_Syntax.command (("generate_test_script"), Position.none) - "generate code equation representing test script" - (Parse.name >> (fn name => Toplevel.theory (gen_ts_def name)))) -*} - -ML{* -fun export_test_data ctxt filename dataname = - let - val filename = Path.explode filename - val thy = Proof_Context.theory_of ctxt - val master_dir = Resources.master_directory thy - val td = TestGen.get_test_data thy dataname - val today = (Date.toString(Date.fromTimeUniv (Time.now())))^" (UTC)"; - val abs_filename = if (Path.is_absolute filename) then filename else Path.append master_dir filename - val thyname = Context.theory_name thy - (* fun string_of_term ctxt = Pretty.unformatted_string_of o (Syntax.unparse_term ctxt) *) - val string_of_term = Sledgehammer_Util.hackish_string_of_term - fun generate () = - let - val num = Int.toString(length td) - fun string_of_prems ctxt [] = "" - | string_of_prems ctxt (p::ps) = ("\n" - ^" "^(string_of_term ctxt p) ^ "\n" - ^" \n")^(string_of_prems ctxt ps) - val thmsstrings = String.concat (map (fn t => "\n" - ^(string_of_prems ctxt (Thm.prems_of t)) - ^" \n" - ^" "^string_of_term ctxt (Thm.prop_of t) ^ "\n" - ^" \n" - ^"\n" - ) td) - in - "\n\n" - ^thmsstrings - end - - val test_data_str = Print_Mode.setmp [] generate (); - val _ = File.write (abs_filename) test_data_str - handle (IO.Io{name=name,...}) => warning ("Could not write \""^(name)^"\".") - in () end -*} - -ML {* - Outer_Syntax.command (("export_test_data"), Position.none) - "export test data to an external file" - (Parse.name -- Parse.name - >> (fn (filename,name) => - (Toplevel.keep (fn state => export_test_data (Toplevel.context_of state) filename name)))); - - *} - -end diff --git a/src/test-gen/src/test/TestSequence.thy b/src/test-gen/src/test/TestSequence.thy deleted file mode 100644 index 861e8df..0000000 --- a/src/test-gen/src/test/TestSequence.thy +++ /dev/null @@ -1,1001 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * Monads.thy --- a base testing theory for sequential computations. - * This file is part of HOL-TestGen. - * - * Copyright (c) 2005-2007, ETH Zurich, Switzerland - * 2009 B. Wolff, Univ. Paris-Sud, France - * 2009, 2012 Achim D. Brucker, Germany - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id:$ *) - -chapter {* Basic Monad Theory for Sequential Computations *} - -theory Monads imports Main -begin - - -section{*General Framework for Monad-based Sequence-Test *} -text{* As such, Higher-order Logic as a purely functional specification - formalism has no built-in mechanism for state and state-transitions. - Forms of testing involving state require therefore explicit mechanisms - for their treatment inside the logic; a well-known technique to model - states inside purely functional languages are \emph{monads} made popular - by Wadler and Moggi and extensively used in Haskell. \HOL is powerful - enough to represent the most important standard monads; - however, it is not possible to represent monads as such due to well-known - limitations of the Hindley-Milner type-system. *} - -text{* Here is a variant for state-exception monads, that models precisely - transition functions with preconditions. Next, we declare the - state-backtrack-monad. In all of them, our concept of i/o stepping - functions can be formulated; these are functions mapping input - to a given monad. Later on, we will build the usual concepts of: - \begin{enumerate} - \item deterministic i/o automata, - \item non-deterministic i/o automata, and - \item labelled transition systems (LTS) - \end{enumerate} -*} - -subsection{* Standard State Exception Monads *} -type_synonym ('o, '\) MON\<^sub>S\<^sub>E = "'\ \ ('o \ '\)" (* = '\ \ ('o \ '\)option *) - - -definition bind_SE :: "('o,'\)MON\<^sub>S\<^sub>E \ ('o \ ('o','\)MON\<^sub>S\<^sub>E) \ ('o','\)MON\<^sub>S\<^sub>E" -where "bind_SE f g = (\\. case f \ of None \ None - | Some (out, \') \ g out \')" - -notation bind_SE ("bind\<^sub>S\<^sub>E") - -syntax (xsymbols) - "_bind_SE" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>E,('o','\)MON\<^sub>S\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>E" - ("(2 _ \ _; _)" [5,8,8]8) -translations - "x \ f; g" == "CONST bind_SE f (% x . g)" - - -definition unit_SE :: "'o \ ('o, '\)MON\<^sub>S\<^sub>E" ("(return _)" 8) -where "unit_SE e = (\\. Some(e,\))" -notation unit_SE ("unit\<^sub>S\<^sub>E") - -definition fail_SE :: "('o, '\)MON\<^sub>S\<^sub>E" -where "fail_SE = (\\. None)" -notation fail_SE ("fail\<^sub>S\<^sub>E") - -definition assert_SE :: "('\ \ bool) \ (bool, '\)MON\<^sub>S\<^sub>E" -where "assert_SE P = (\\. if P \ then Some(True,\) else None)" -notation assert_SE ("assert\<^sub>S\<^sub>E") - -definition assume_SE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>E" -where "assume_SE P = (\\. if \\ . P \ then Some((), SOME \ . P \) else None)" -notation assume_SE ("assume\<^sub>S\<^sub>E") - -text{* The standard monad theorems about unit and associativity: *} - -lemma bind_left_unit [simp]: "(x \ return c; P x) = P c" - by (simp add: unit_SE_def bind_SE_def) - - -lemma bind_left_fail_SE[simp] : "(x \ fail\<^sub>S\<^sub>E; P x) = fail\<^sub>S\<^sub>E" - by (simp add: fail_SE_def bind_SE_def) - - -lemma bind_right_unit[simp]: "(x \ m; return x) = m" - apply (simp add: unit_SE_def bind_SE_def) - apply (rule ext) - apply (case_tac "m \", simp_all) - done - -lemma bind_assoc[simp]: "(y \ (x \ m; k x); h y) = (x \ m; (y \ k x; h y))" - apply (simp add: unit_SE_def bind_SE_def, rule ext) - apply (case_tac "m \", simp_all) - apply (case_tac "a", simp_all) - done - - -text{* The bind-operator in the state-exception monad yields already - a semantics for the concept of an input sequence on the meta-level: *} -lemma syntax_test: "(o1 \ f1 ; o2 \ f2; return (post o1 o2)) = X" -oops - - - -subsection {* "Pipe-free" - variant of the bind. *} - -definition seq_SE :: "[('\, '\)MON\<^sub>S\<^sub>E, ('\, '\)MON\<^sub>S\<^sub>E] \ ('\, '\)MON\<^sub>S\<^sub>E" (infixl ";-" 65) -where "f ;- g = (_ \ f ; g)" - - - -subsection {* Monadic If *} - -definition if_SE :: "['\ \ bool, ('\, '\)MON\<^sub>S\<^sub>E, ('\, '\)MON\<^sub>S\<^sub>E] \ ('\, '\)MON\<^sub>S\<^sub>E" -where "if_SE c E F = (\\. if c \ then E \ else F \)" - -syntax (xsymbols) - "_if_SE" :: "['\ \ bool,('o,'\)MON\<^sub>S\<^sub>E,('o','\)MON\<^sub>S\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>E" - ("(if\<^sub>S\<^sub>E _ then _ else _fi)" [5,8,8]8) -translations - "(if\<^sub>S\<^sub>E cond then T1 else T2 fi)" == "CONST if_SE cond T1 T2" - - - -subsubsection{* Theory of a Monadic While *} - -text{* First Step : Establishing an embedding between partial functions and relations *} -(* plongement *) -definition Mon2Rel :: "(unit, '\)MON\<^sub>S\<^sub>E \ ('\ \ '\) set" -where "Mon2Rel f = {(x, y). (f x = Some((), y))}" -(* ressortir *) -definition Rel2Mon :: " ('\ \ '\) set \ (unit, '\)MON\<^sub>S\<^sub>E " -where "Rel2Mon S = (\ \. if \\'. (\, \') \ S then Some((), SOME \'. (\, \') \ S) else None)" - -lemma Mon2Rel_Rel2Mon_id: assumes det:"single_valued R" shows "(Mon2Rel \ Rel2Mon) R = R" -apply (simp add: comp_def Mon2Rel_def Rel2Mon_def,auto) -apply (case_tac "\\'. (a, \') \ R", auto) -apply (subst some_eq_ex) back -apply (insert det[simplified single_valued_def]) -apply (auto) -done - -lemma Rel2Mon_Id: "(Rel2Mon \ Mon2Rel) x = x" -apply (rule ext) -apply (auto simp: comp_def Mon2Rel_def Rel2Mon_def) -apply (erule contrapos_pp, drule HOL.not_sym, simp) -done - -lemma single_valued_Mon2Rel: "single_valued (Mon2Rel B)" -by (auto simp: single_valued_def Mon2Rel_def) - -text{* Second Step : Proving an induction principle allowing to establish that lfp remains - deterministic *} - - -(* Due to Tobias Nipkow *) -definition chain :: "(nat => 'a set) => bool" -where "chain S = (\i. S i \ S(Suc i))" - -lemma chain_total: "chain S ==> S i \ S j \ S j \ S i" -by (metis chain_def le_cases lift_Suc_mono_le) - -definition cont :: "('a set => 'b set) => bool" -where "cont f = (\S. chain S \ f(UN n. S n) = (UN n. f(S n)))" - -lemma mono_if_cont: fixes f :: "'a set => 'b set" - assumes "cont f" shows "mono f" -proof - fix a b :: "'a set" assume "a \ b" - let ?S = "\n::nat. if n=0 then a else b" - have "chain ?S" using `a \ b` by(auto simp: chain_def) - hence "f(UN n. ?S n) = (UN n. f(?S n))" - using assms by(simp add: cont_def) - moreover have "(UN n. ?S n) = b" using `a \ b` by (auto split: if_splits) - moreover have "(UN n. f(?S n)) = f a \ f b" by (auto split: if_splits) - ultimately show "f a \ f b" by (metis Un_upper1) -qed - -lemma chain_iterates: fixes f :: "'a set => 'a set" - assumes "mono f" shows "chain(\n. (f^^n) {})" -proof- - { fix n have "(f ^^ n) {} \ (f ^^ Suc n) {}" using assms - by(induction n) (auto simp: mono_def) } - thus ?thesis by(auto simp: chain_def) -qed - -theorem lfp_if_cont: - assumes "cont f" shows "lfp f = (UN n. (f^^n) {})" (is "_ = ?U") -proof - show "lfp f \ ?U" - proof (rule lfp_lowerbound) - have "f ?U = (UN n. (f^^Suc n){})" - using chain_iterates[OF mono_if_cont[OF assms]] assms - by(simp add: cont_def) - also have "\ = (f^^0){} \ \" by simp - also have "\ = ?U" - by(auto simp del: funpow.simps) (metis not0_implies_Suc) - finally show "f ?U \ ?U" by simp - qed -next - { fix n p assume "f p \ p" - have "(f^^n){} \ p" - proof(induction n) - case 0 show ?case by simp - next - case Suc - from monoD[OF mono_if_cont[OF assms] Suc] `f p \ p` - show ?case by simp - qed - } - thus "?U \ lfp f" by(auto simp: lfp_def) -qed - - -lemma single_valued_UN_chain: - assumes "chain S" "(!!n. single_valued (S n))" - shows "single_valued(UN n. S n)" -proof(auto simp: single_valued_def) - fix m n x y z assume "(x, y) \ S m" "(x, z) \ S n" - with chain_total[OF assms(1), of m n] assms(2) - show "y = z" by (auto simp: single_valued_def) -qed - -lemma single_valued_lfp: -fixes f :: "('a \ 'a) set => ('a \ 'a) set" -assumes "cont f" "\r. single_valued r \ single_valued (f r)" -shows "single_valued(lfp f)" -unfolding lfp_if_cont[OF assms(1)] -proof(rule single_valued_UN_chain[OF chain_iterates[OF mono_if_cont[OF assms(1)]]]) - fix n show "single_valued ((f ^^ n) {})" - by(induction n)(auto simp: assms(2)) -qed - - -text{* Third Step: Definition of the Monadic While *} -definition \ :: "['\ \ bool,('\ \ '\) set] \ (('\ \ '\) set \ ('\ \ '\) set)" -where "\ b cd = (\cw. {(s,t). if b s then (s, t) \ cd O cw else s = t})" - - -definition while_SE :: "['\ \ bool, (unit, '\)MON\<^sub>S\<^sub>E] \ (unit, '\)MON\<^sub>S\<^sub>E" -where "while_SE c B \ (Rel2Mon(lfp(\ c (Mon2Rel B))))" - -syntax (xsymbols) - "_while_SE" :: "['\ \ bool, (unit, '\)MON\<^sub>S\<^sub>E] \ (unit, '\)MON\<^sub>S\<^sub>E" - ("(while\<^sub>S\<^sub>E _ do _ od)" [8,8]8) -translations - "while\<^sub>S\<^sub>E c do b od" == "CONST while_SE c b" - -lemma cont_\: "cont (\ c b)" -by (auto simp: cont_def \_def) - -text{* The fixpoint theory now allows us to establish that the lfp constructed over - @{term Mon2Rel} remains deterministic *} - -theorem single_valued_lfp_Mon2Rel: "single_valued (lfp(\ c (Mon2Rel B)))" -apply(rule single_valued_lfp, simp_all add: cont_\) -apply(auto simp: \_def single_valued_def) -apply(metis single_valued_Mon2Rel[of "B"] single_valued_def) -done - - -lemma Rel2Mon_if: - "Rel2Mon {(s, t). if b s then (s, t) \ Mon2Rel c O lfp (\ b (Mon2Rel c)) else s = t} \ = - (if b \ then Rel2Mon (Mon2Rel c O lfp (\ b (Mon2Rel c))) \ else Some ((), \))" -by (simp add: Rel2Mon_def) - -lemma Rel2Mon_homomorphism: - assumes determ_X: "single_valued X" and determ_Y: "single_valued Y" - shows "Rel2Mon (X O Y) = (Rel2Mon X) ;- (Rel2Mon Y)" -proof - - have relational_partial_next_in_O: "!!x E F. (\y. (x, y) \ (E O F)) - \ (\y. (x, y) \ E)" by (auto) -show ?thesis -apply (simp add: Rel2Mon_def seq_SE_def bind_SE_def) -apply (rule ext) -apply (case_tac " \\'. (\, \') \ X O Y") -apply (simp only: HOL.if_True) -apply (frule relational_partial_next_in_O) -apply (auto) -apply (insert determ_X determ_Y) - -defer 1 - -apply (rule_tac x=z in exI) -apply (rule someI2) -apply (assumption)+ -apply (simp add: single_valued_def) -apply (metis) - -apply (erule contrapos_pp) -apply (simp) -apply (rule_tac x=\' in exI) - apply (subgoal_tac "(SOME \'. (\, \') \ X) = \''") - apply (auto) - apply (auto simp: single_valued_def) -apply (fold single_valued_def) - - apply (subgoal_tac "(SOME \'. (x, \') \ X) = y") - apply (simp) - apply (subgoal_tac "(SOME \'. (y, \') \ Y) = z") - apply (simp) - apply (subgoal_tac "(SOME \'. (x, \') \ X O Y) = z") - apply (simp) - apply (auto simp: single_valued_def) - (* TODO IMPORTANT *) - - apply (rule someI2) - apply (assumption) - apply (rule someI2) - apply (auto) -oops -(* qed *) - -text{* Putting everything together, the theory of embedding and the invariance of - determinism of the while-body, gives us the usual unfold-theorem: *} -theorem "(while\<^sub>S\<^sub>E b do c od) = (if\<^sub>S\<^sub>E b then (c ;- (while\<^sub>S\<^sub>E b do c od)) else return () fi)" -apply (simp add: if_SE_def seq_SE_def while_SE_def unit_SE_def) -apply (subst lfp_unfold [OF mono_if_cont, OF cont_\]) -apply (rule ext) -apply (subst \_def) -apply (auto simp: Rel2Mon_if Rel2Mon_homomorphism seq_SE_def Rel2Mon_Id [simplified comp_def] - single_valued_Mon2Rel single_valued_lfp_Mon2Rel ) -done - - - -subsection{* Multi-binds *} - -text{* In order to express test-sequences also on the object-level and -to make our theory amenable to formal reasoning over test-sequences, -we represent them as lists of input and generalize the bind-operator -of the state-exception monad accordingly. The approach is straightforward, -but comes with a price: we have to encapsulate all input and output data -into one type. Assume that we have a typed interface to a module with -the operations $op_1$, $op_2$, \ldots, $op_n$ with the inputs -$\iota_1$, $\iota_2$, \ldots, $\iota_n$ (outputs are treated analogously). -Then we can encode for this interface the general input - type: -\begin{displaymath} -\texttt{datatype}\ \texttt{in}\ =\ op_1\ ::\ \iota_1\ |\ ...\ |\ \iota_n -\end{displaymath} -Obviously, we loose some type-safety in this approach; we have to express -that in traces only \emph{corresponding} input and output belonging to the -same operation will occur; this form of side-conditions have to be expressed -inside \HOL. From the user perspective, this will not make much difference, -since junk-data resulting from too weak typing can be ruled out by adopted -front-ends. -*} - -text{* Note that the subsequent notion of a test-sequence allows the io stepping -function (and the special case of a program under test) to stop execution -\emph{within} the sequence; such premature terminations are characterized by an -output list which is shorter than the input list. *} - -fun mbind :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" -where "mbind [] iostep \ = Some([], \)" | - "mbind (a#H) iostep \ = - (case iostep a \ of - None \ Some([], \) - | Some (out, \') \ (case mbind H iostep \' of - None \ Some([out],\') - | Some(outs,\'') \ Some(out#outs,\'')))" - -notation mbind ("mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e") (* future name: mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e *) - -text{* This definition is fail-safe; in case of an exception, the current state is maintained, - the computation as a whole is marked as success. - Compare to the fail-strict variant @{text "mbind'"}: *} - -lemma mbind_unit [simp]: - "mbind [] f = (return [])" - by(rule ext, simp add: unit_SE_def) - -text{* The characteristic property of @{term mbind} --- which distinguishes it from - @{text mbind'} defined in the sequel --- is that it never fails; it ``swallows'' internal - errors occuring during the computation. *} -lemma mbind_nofailure [simp]: - "mbind S f \ \ None" - apply(rule_tac x=\ in spec) - apply(induct S, auto simp:unit_SE_def) - apply(case_tac "f a x", auto) - apply(erule_tac x="b" in allE) - apply(erule exE, erule exE, simp) - done - - -fun mbind' :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" -where "mbind' [] iostep \ = Some([], \)" | - "mbind' (a#S) iostep \ = - (case iostep a \ of - None \ None - | Some (out, \') \ (case mbind' S iostep \' of - None \ None (* fail-strict *) - | Some(outs,\'') \ Some(out#outs,\'')))" -notation mbind' ("mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p") (* future name: mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p *) - -lemma mbind'_unit [simp]: - "mbind' [] f = (return [])" - by(rule ext, simp add: unit_SE_def) - -lemma mbind'_bind [simp]: - "(x \ mbind' (a#S) F; M x) = (a \ (F a); (x \ mbind' S F; M (a # x)))" - by(rule ext, rename_tac "z",simp add: bind_SE_def split: option.split) - -declare mbind'.simps[simp del] (* use only more abstract definitions *) - - -fun mbind'' :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" -where "mbind'' [] iostep \ = Some([], \)" | - "mbind'' (a#S) iostep \ = - (case iostep a \ of - None \ mbind'' S iostep \ - | Some (out, \') \ (case mbind'' S iostep \' of - None \ None (* does not occur *) - | Some(outs,\'') \ Some(out#outs,\'')))" - -notation mbind'' ("mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e") (* future name: mbind\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e\<^sub>F\<^sub>a\<^sub>i\<^sub>l *) -declare mbind''.simps[simp del] (* use only more abstract definitions *) - - -text{* mbind' as failure strict operator can be seen as a foldr on bind - - if the types would match \ldots *} - -definition try_SE :: "('o,'\) MON\<^sub>S\<^sub>E \ ('o option,'\) MON\<^sub>S\<^sub>E" ("try\<^sub>S\<^sub>E") -where "try\<^sub>S\<^sub>E ioprog = (\\. case ioprog \ of - None \ Some(None, \) - | Some(outs, \') \ Some(Some outs, \'))" -text{* In contrast, mbind as a failure safe operator can roughly be seen - as a foldr on bind - try: - m1 ; try m2 ; try m3; ... Note, that the rough equivalence only holds for - certain predicates in the sequence - length equivalence modulo None, - for example. However, if a conditional is added, the equivalence - can be made precise: *} - -lemma mbind_try: - "(x \ mbind (a#S) F; M x) = - (a' \ try\<^sub>S\<^sub>E(F a); - if a' = None - then (M []) - else (x \ mbind S F; M (the a' # x)))" -apply(rule ext) -apply(simp add: bind_SE_def try_SE_def) -apply(case_tac "F a x", auto) -apply(simp add: bind_SE_def try_SE_def) -apply(case_tac "mbind S F b", auto) -done - -text{* On this basis, a symbolic evaluation scheme can be established - that reduces mbind-code to try\_SE\_code and ite-cascades. *} - -definition alt_SE :: "[('o, '\)MON\<^sub>S\<^sub>E, ('o, '\)MON\<^sub>S\<^sub>E] \ ('o, '\)MON\<^sub>S\<^sub>E" (infixl "\\<^sub>S\<^sub>E" 10) -where "(f \\<^sub>S\<^sub>E g) = (\ \. case f \ of None \ g \ - | Some H \ Some H)" - -definition malt_SE :: "('o, '\)MON\<^sub>S\<^sub>E list \ ('o, '\)MON\<^sub>S\<^sub>E" -where "malt_SE S = foldr alt_SE S fail\<^sub>S\<^sub>E" -notation malt_SE ("\\<^sub>S\<^sub>E") - -lemma malt_SE_mt [simp]: "\\<^sub>S\<^sub>E [] = fail\<^sub>S\<^sub>E" -by(simp add: malt_SE_def) - -lemma malt_SE_cons [simp]: "\\<^sub>S\<^sub>E (a # S) = (a \\<^sub>S\<^sub>E (\\<^sub>S\<^sub>E S))" -by(simp add: malt_SE_def) - - -subsection{* State Backtrack Monads *} -text{*This subsection is still rudimentary and as such an interesting -formal analogue to the previous monad definitions. It is doubtful that it is -interesting for testing and as a cmputational stucture at all. -Clearly more relevant is ``sequence'' instead of ``set,'' which would -rephrase Isabelle's internal tactic concept. *} - -type_synonym ('o, '\) MON\<^sub>S\<^sub>B = "'\ \ ('o \ '\) set" - -definition bind_SB :: "('o, '\)MON\<^sub>S\<^sub>B \ ('o \ ('o', '\)MON\<^sub>S\<^sub>B) \ ('o', '\)MON\<^sub>S\<^sub>B" -where "bind_SB f g \ = \ ((\(out, \). (g out \)) ` (f \))" -notation bind_SB ("bind\<^sub>S\<^sub>B") - -definition unit_SB :: "'o \ ('o, '\)MON\<^sub>S\<^sub>B" ("(returns _)" 8) -where "unit_SB e = (\\. {(e,\)})" -notation unit_SB ("unit\<^sub>S\<^sub>B") - -syntax (xsymbols) - "_bind_SB" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>B,('o','\)MON\<^sub>S\<^sub>B] \ ('o','\)MON\<^sub>S\<^sub>B" - ("(2 _ := _; _)" [5,8,8]8) -translations - "x := f; g" == "CONST bind_SB f (% x . g)" - - - -lemma bind_left_unit_SB : "(x := returns a; m x) = m a" - by (rule ext,simp add: unit_SB_def bind_SB_def) - -lemma bind_right_unit_SB: "(x := m; returns x) = m" - by (rule ext, simp add: unit_SB_def bind_SB_def) - - -lemma bind_assoc_SB: "(y := (x := m; k x); h y) = (x := m; (y := k x; h y))" - by (rule ext, simp add: unit_SB_def bind_SB_def split_def) - - -subsection{* State Backtrack Exception Monad (vulgo: Boogie-PL) *} -text{* The following combination of the previous two Monad-Constructions -allows for the semantic foundation of a simple generic assertion language -in the style of Schirmers Simpl-Language or Rustan Leino's Boogie-PL language. -The key is to use the exceptional element None for violations of -the assert-statement. *} -type_synonym ('o, '\) MON\<^sub>S\<^sub>B\<^sub>E = "'\ \ (('o \ '\) set) option" - - -definition bind_SBE :: "('o,'\)MON\<^sub>S\<^sub>B\<^sub>E \ ('o \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E) \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E" -where "bind_SBE f g = (\\. case f \ of None \ None - | Some S \ (let S' = (\(out, \'). g out \') ` S - in if None \ S' then None - else Some(\ (the ` S'))))" - -syntax (xsymbols) - "_bind_SBE" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>B\<^sub>E,('o','\)MON\<^sub>S\<^sub>B\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E" - ("(2 _ :\ _; _)" [5,8,8]8) -translations - "x :\ f; g" == "CONST bind_SBE f (% x . g)" - -definition unit_SBE :: "'o \ ('o, '\)MON\<^sub>S\<^sub>B\<^sub>E" ("(returning _)" 8) -where "unit_SBE e = (\\. Some({(e,\)}))" - -definition assert_SBE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" -where "assert_SBE e = (\\. if e \ then Some({((),\)}) - else None)" -notation assert_SBE ("assert\<^sub>S\<^sub>B\<^sub>E") - -definition assume_SBE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" -where "assume_SBE e = (\\. if e \ then Some({((),\)}) - else Some {})" -notation assume_SBE ("assume\<^sub>S\<^sub>B\<^sub>E") - - -definition havoc_SBE :: " (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" -where "havoc_SBE = (\\. Some({x. True}))" -notation havoc_SBE ("havoc\<^sub>S\<^sub>B\<^sub>E") - - -lemma bind_left_unit_SBE : "(x :\ returning a; m x) = m a" - apply (rule ext,simp add: unit_SBE_def bind_SBE_def) - apply (case_tac "m a x",auto) - done - -lemma bind_right_unit_SBE: "(x :\ m; returning x) = m" - apply (rule ext, simp add: unit_SBE_def bind_SBE_def) - apply (case_tac "m x", simp_all add:Let_def) - apply (rule HOL.ccontr, simp add: Set.image_iff) - done - - -lemmas aux = trans[OF HOL.neq_commute,OF Option.not_None_eq] - -lemma bind_assoc_SBE: "(y :\ (x :\ m; k); h y) = (x :\ m; (y :\ k; h y))" -proof (rule ext, rename_tac z, simp add: unit_SBE_def bind_SBE_def, - case_tac "m z", simp_all add: Let_def Set.image_iff, safe) - case goal1 then show ?case - by(rule_tac x="(a, b)" in bexI, simp_all) -next - case goal2 then show ?case - apply(rule_tac x="(aa,b)" in bexI, simp_all add:split_def) - apply(erule_tac x="(aa,b)" in ballE) - apply(auto simp: aux image_def split_def intro!: rev_bexI) - done -next - case goal3 then show ?case - by(rule_tac x="(a, b)" in bexI, simp_all) -next - case goal4 then show ?case - apply(erule_tac Q="None = ?X" in contrapos_pp) - apply(erule_tac x="(aa,b)" and P="\ x. None \ split (\out. k) x" in ballE) - apply(auto simp: aux Option.not_None_eq image_def split_def intro!: rev_bexI) - done -next - case goal5 then show ?case - apply simp apply((erule_tac x="(ab,ba)" in ballE)+) - apply(simp_all add: aux Option.not_None_eq, (erule exE)+, simp add:split_def) - apply(erule rev_bexI,case_tac "None\(\p. h (fst p) (snd p))`y",auto simp:split_def) - done - -next - case goal6 then show ?case - apply simp apply((erule_tac x="(a,b)" in ballE)+) - apply(simp_all add: aux Option.not_None_eq, (erule exE)+, simp add:split_def) - apply(erule rev_bexI, case_tac "None\(\p. h(fst p)(snd p))`y",auto simp:split_def) - done -qed - - - -(* TODO: IF THEN ELSE and WHILE + Monadic Laws + Operational Rules. *) - - - -section{* Valid Execution Sequences in the State Exception Monad *} -text{* This is still an unstructured merge of executable monad concepts -and specification oriented high-level properties initiating test procedures. *} - -definition valid_SE :: "'\ \ (bool,'\) MON\<^sub>S\<^sub>E \ bool" (infix "\" 15) -where "(\ \ m) = (m \ \ None \ fst(the (m \)))" -text{* This notation consideres failures as valid -- a definition -inspired by I/O conformance. BUG: It is not possible to define -this concept once and for all in a Hindley-Milner type-system. -For the moment, we present it only for the state-exception -monad, although for the same definition, this notion is applicable -to other monads as well. *} - - -lemma exec_unit_SE [simp]: "(\ \ (return P)) = (P)" -by(auto simp: valid_SE_def unit_SE_def) - -lemma exec_unit_SE' [simp]: "(\\<^sub>0 \ (\\. Some (f \, \))) = (f \\<^sub>0)" -by(simp add: valid_SE_def ) - -lemma exec_fail_SE [simp]: "(\ \ fail\<^sub>S\<^sub>E) = False" -by(auto simp: valid_SE_def fail_SE_def) - - -lemma exec_fail_SE'[simp]: "\(\\<^sub>0 \ (\\. None))" -by(simp add: valid_SE_def ) - -lemma exec_bind_SE_failure: -"A \ = None \ \(\ \ ((s \ A ; M s)))" -by(simp add: valid_SE_def unit_SE_def bind_SE_def) - -lemma exec_bind_SE_success: -"A \ = Some(b,\') \ (\ \ ((s \ A ; M s))) = (\' \ (M b))" -by(simp add: valid_SE_def unit_SE_def bind_SE_def ) - -lemma exec_bind_SE_success': (* atomic boolean Monad "Query Functions" *) -"M \ = Some(f \,\) \ (\ \ M) = f \" -by(simp add: valid_SE_def unit_SE_def bind_SE_def ) - - - -lemma exec_bind_SE_success'': -"\ \ ((s \ A ; M s)) \ \ v \'. the(A \) = (v,\') \ \' \ (M v)" -apply(auto simp: valid_SE_def unit_SE_def bind_SE_def) -apply(cases "A \", simp_all) -apply(simp add: Product_Type.prod_case_unfold) -apply(drule_tac x="A \" and f=the in arg_cong, simp) -apply(rule_tac x="fst aa" in exI) -apply(rule_tac x="snd aa" in exI, auto) -done - - -lemma exec_bind_SE_success''': -"\ \ ((s \ A ; M s)) \ \ a. (A \) = Some a \ (snd a) \ (M (fst a))" -apply(auto simp: valid_SE_def unit_SE_def bind_SE_def) -apply(cases "A \", simp_all) -apply(simp_all add: Product_Type.prod_case_unfold - split: prod.splits) -apply(drule_tac x="A \" and f=the in arg_cong, simp) -apply(rule_tac x="fst aa" in exI) -apply(rule_tac x="snd aa" in exI, auto) -done - - -lemma exec_bind_SE_success'''' : -"\ \ ((s \ A ; M s)) \ \ v \'. A \ = Some(v,\') \ \' \ (M v)" -apply(auto simp: valid_SE_def unit_SE_def bind_SE_def) -apply(cases "A \", simp_all) -apply(simp add: Product_Type.prod_case_unfold) -apply(drule_tac x="A \" and f=the in arg_cong, simp) -apply(rule_tac x="fst aa" in exI) -apply(rule_tac x="snd aa" in exI, auto) -done - - -text{* Recall \verb+mbind_unit+ for the base case. *} - -lemma exec_mbindFSave_failure: -"ioprog a \ = None \ - (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; M s)) = (\ \ (M []))" -by(simp add: valid_SE_def unit_SE_def bind_SE_def) - -lemma exec_mbindFStop_failure: -"ioprog a \ = None \ - (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (a#S) ioprog ; M s)) = (False)" -by(simp add: exec_bind_SE_failure) - -lemma exec_mbindFPurge_failure: -"ioprog a \ = None \ - (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (a#S) ioprog ; M s)) = (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (S) ioprog ; M s))" -by(simp add: valid_SE_def unit_SE_def bind_SE_def mbind''.simps) - - -lemma exec_mbindFSave_success : -"ioprog a \ = Some(b,\') \ - (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; M s)) = - (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog ; M (b#s)))" -unfolding valid_SE_def unit_SE_def bind_SE_def -by(cases "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog \'", auto) - -lemma exec_mbindFStop_success : -"ioprog a \ = Some(b,\') \ - (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (a#S) ioprog ; M s)) = - (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S ioprog ; M (b#s)))" -unfolding valid_SE_def unit_SE_def bind_SE_def -by(cases "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S ioprog \'", auto simp: mbind'.simps) - -lemma exec_mbindFPurge_success : -"ioprog a \ = Some(b,\') \ - (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (a#S) ioprog ; M s)) = - (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog ; M (b#s)))" -unfolding valid_SE_def unit_SE_def bind_SE_def -by(cases "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog \'", auto simp: mbind''.simps) - -lemma exec_mbindFSave: -"(\ \ (s \ mbind (a#S) ioprog ; return (P s))) = - (case ioprog a \ of - None \ (\ \ (return (P []))) - | Some(b,\') \ (\' \ (s \ mbind S ioprog ; return (P (b#s)))))" -apply(case_tac "ioprog a \") -apply(auto simp: exec_mbindFSave_failure exec_mbindFSave_success split: prod.splits) -apply(subst (asm) exec_mbindFSave_success,assumption,simp_all) -by(subst exec_mbindFSave_success,assumption,simp)+ - - -text{* Universal splitting and symbolic execution rule *} -lemma exec_mbindFSave_E: -assumes seq : "(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e (a#S) ioprog ; (P s)))" - and none: "ioprog a \ = None \ (\ \ (P [])) \ Q" - and some: "\ b \'. ioprog a \ = Some(b,\') \ (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S ioprog;(P (b#s)))) \ Q " -shows "Q" -using seq -proof(cases "ioprog a \") - case None assume ass:"ioprog a \ = None" show "Q" - apply(rule none[OF ass]) - apply(insert ass, erule_tac ioprog1=ioprog in exec_mbindFSave_failure[THEN iffD1],rule seq) - done -next - case (Some aa) assume ass:"ioprog a \ = Some aa" show "Q" - apply(insert ass,cases "aa",simp, rename_tac "out" "\'") - apply(erule some) - apply(insert ass,simp) - apply(erule_tac ioprog1=ioprog in exec_mbindFSave_success[THEN iffD1],rule seq) - done -qed - -text{* The next rule reveals the particular interest in deduction; - as an elimination rule, it allows for a linear conversion of a validity judgement - @{term "mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p"} over an input list @{term "S"} into a constraint system; without any - branching ... Symbolic execution can even be stopped tactically whenever - @{term "ioprog a \ = Some(b,\')"} comes to a contradiction. *} -lemma exec_mbindFStop_E: -assumes seq : "(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p (a#S) ioprog ; (P s)))" - and some: "\b \'. ioprog a \ = Some(b,\') \ (\'\ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p S ioprog;(P(b#s)))) \ Q" -shows "Q" -using seq -proof(cases "ioprog a \") - case None assume ass:"ioprog a \ = None" show "Q" - apply(insert ass seq) - apply(drule_tac \=\ and S=S and M=P in exec_mbindFStop_failure, simp) - done -next - case (Some aa) assume ass:"ioprog a \ = Some aa" show "Q" - apply(insert ass,cases "aa",simp, rename_tac "out" "\'") - apply(erule some) - apply(insert ass,simp) - apply(erule_tac ioprog1=ioprog in exec_mbindFStop_success[THEN iffD1],rule seq) - done -qed - - -lemma exec_mbindFPurge_E: -assumes seq : "(\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e (a#S) ioprog ; (P s)))" - and none: "ioprog a \ = None \ (\ \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog;(P (s)))) \ Q" - and some: "\ b \'. ioprog a \ = Some(b,\') \ (\' \ (s \ mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>P\<^sub>u\<^sub>r\<^sub>g\<^sub>e S ioprog;(P (b#s)))) \ Q " -shows "Q" -using seq -proof(cases "ioprog a \") - case None assume ass:"ioprog a \ = None" show "Q" - apply(rule none[OF ass]) - apply(insert ass, erule_tac ioprog1=ioprog in exec_mbindFPurge_failure[THEN iffD1],rule seq) - done -next - case (Some aa) assume ass:"ioprog a \ = Some aa" show "Q" - apply(insert ass,cases "aa",simp, rename_tac "out" "\'") - apply(erule some) - apply(insert ass,simp) - apply(erule_tac ioprog1=ioprog in exec_mbindFPurge_success[THEN iffD1],rule seq) - done -qed - - -lemma assert_disch1 :" P \ \ (\ \ (x \ assert\<^sub>S\<^sub>E P; M x)) = (\ \ (M True))" -by(auto simp: bind_SE_def assert_SE_def valid_SE_def) - -lemma assert_disch2 :" \ P \ \ \ (\ \ (x \ assert\<^sub>S\<^sub>E P ; M s))" -by(auto simp: bind_SE_def assert_SE_def valid_SE_def) - -lemma assert_disch3 :" \ P \ \ \ (\ \ (assert\<^sub>S\<^sub>E P))" -by(auto simp: bind_SE_def assert_SE_def valid_SE_def) - -lemma assert_D : "(\ \ (x \ assert\<^sub>S\<^sub>E P; M x)) \ P \ \ (\ \ (M True))" -by(auto simp: bind_SE_def assert_SE_def valid_SE_def split: HOL.split_if_asm) - -lemma assume_D : "(\ \ (x \ assume\<^sub>S\<^sub>E P; M x)) \ \ \. (P \ \ \ \ (M ()))" -apply(auto simp: bind_SE_def assume_SE_def valid_SE_def split: HOL.split_if_asm) -apply(rule_tac x="Eps P" in exI, auto) -apply(rule_tac x="True" in exI, rule_tac x="b" in exI) -apply(subst Hilbert_Choice.someI,assumption,simp) -apply(subst Hilbert_Choice.someI,assumption,simp) -done -text{* These two rule prove that the SE Monad in connection with the notion of valid sequence -is actually sufficient for a representation of a Boogie-like language. The SBE monad with explicit -sets of states --- to be shown below --- is strictly speaking not necessary (and will therefore -be discontinued in the development). *} - -term "if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi" - -lemma if_SE_D1 : "P \ \ (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = (\ \ B\<^sub>1)" -by(auto simp: if_SE_def valid_SE_def) - -lemma if_SE_D2 : "\ P \ \ (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = (\ \ B\<^sub>2)" -by(auto simp: if_SE_def valid_SE_def) - -lemma if_SE_split_asm : " (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = ((P \ \ (\ \ B\<^sub>1)) \ (\ P \ \ (\ \ B\<^sub>2)))" -by(cases "P \",auto simp: if_SE_D1 if_SE_D2) - -lemma if_SE_split : " (\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)) = ((P \ \ (\ \ B\<^sub>1)) \ (\ P \ \ (\ \ B\<^sub>2)))" -by(cases "P \", auto simp: if_SE_D1 if_SE_D2) - - -lemma if_SE_execE: - assumes A: "\ \ (if\<^sub>S\<^sub>E P then B\<^sub>1 else B\<^sub>2 fi)" - and B: "P \ \ \ \ B\<^sub>1 \ Q" - and C: "\ P \\ \ \ B\<^sub>2 \ Q" - shows "Q" -by(insert A [simplified if_SE_split],cases "P \", simp_all, auto elim: B C) - - -lemma [code]: - "(\ \ m) = (case (m \) of None \ False | (Some (x,y)) \ x)" - apply(simp add: valid_SE_def) - apply(cases "m \ = None", simp_all) - apply(insert not_None_eq, auto) -done - - -text{* Test-Refinements will be stated in terms of the failsave @{term mbind}, opting - more generality. The following lemma allows for an optimization both in - test execution as well as in symbolic execution for an important special case of - the post-codition: Whenever the latter has the constraint that the length of input and - output sequence equal each other (that is to say: no failure occured), failsave mbind - can be reduced to failstop mbind ... *} -lemma mbindFSave_vs_mbindFStop : - "(\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s ioprog); return(length \s = length os \ P \s os))) = - (\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s ioprog); return(P \s os)))" - apply(rule_tac x=P in spec) - apply(rule_tac x=\ in spec) - proof(induct "\s") - case Nil show ?case by(simp_all add: mbind_try try_SE_def del: Monads.mbind.simps) - case (Cons a \s) show ?case - apply(rule allI, rename_tac "\",rule allI, rename_tac "P") - apply(insert Cons.hyps) - apply(case_tac "ioprog a \") - apply(simp only: exec_mbindFSave_failure exec_mbindFStop_failure, simp) - apply(simp add: split_paired_all del: Monads.mbind.simps ) - apply(rename_tac "\'") - apply(subst exec_mbindFSave_success, assumption) - apply(subst (2) exec_bind_SE_success, assumption) - apply(erule_tac x="\'" in allE) - apply(erule_tac x="\\s s. P (a # \s) (aa # s)" in allE) (* heureka ! *) - apply(simp) - done - qed - - -lemma mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e_vs_mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p: -assumes A: "\ \ \. ioprog \ \ \ None" -shows "(\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s ioprog); P os)) = - (\ \ (os \ (mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s ioprog); P os))" -proof(induct "\s") print_cases - case Nil show ?case by simp -next - case (Cons a \s) - from Cons.hyps - have B:"\ S f \. mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e S f \ \ None " by simp - have C:"\\. mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>t\<^sub>o\<^sub>p \s ioprog \ = mbind\<^sub>F\<^sub>a\<^sub>i\<^sub>l\<^sub>S\<^sub>a\<^sub>v\<^sub>e \s ioprog \" - apply(induct \s, simp) - apply(rule allI,rename_tac "\") - apply(simp add: Monads.mbind'.simps(2)) - apply(insert A, erule_tac x="a" in allE) - apply(erule_tac x="\" and P="\\ . ioprog a \ \ None" in allE) - apply(auto split:option.split) - done - show ?case - apply(insert A,erule_tac x="a" in allE,erule_tac x="\" in allE) - apply(simp, elim exE) - apply(rename_tac "out" "\'") - apply(insert B, erule_tac x=\s in allE, erule_tac x=ioprog in allE, erule_tac x=\' in allE) - apply(subst(asm) not_None_eq, elim exE) - apply(subst Monads.exec_bind_SE_success) - apply(simp split: option.split, auto) - apply(rule_tac s="(\ a b c. a # (fst c)) out \' (aa, b)" in trans, simp,rule refl) - apply(rule_tac s="(\ a b c. (snd c)) out \' (aa, b)" in trans, simp,rule refl) - apply(simp_all) - apply(subst Monads.exec_bind_SE_success, assumption) - apply(subst Monads.exec_bind_SE_success) - apply(rule_tac s="Some (aa, b)" in trans,simp_all add:C) - apply(subst(asm) Monads.exec_bind_SE_success, assumption) - apply(subst(asm) Monads.exec_bind_SE_success) - apply(rule_tac s="Some (aa, b)" in trans,simp_all add:C) - done -qed - - -section{* Valid Test Sequences in the State Exception Backtrack Monad *} -text{* This is still an unstructured merge of executable monad concepts -and specification oriented high-level properties initiating test procedures. *} - -definition valid_SBE :: "'\ \ ('a,'\) MON\<^sub>S\<^sub>B\<^sub>E \ bool" (infix "\\<^sub>S\<^sub>B\<^sub>E" 15) -where "\ \\<^sub>S\<^sub>B\<^sub>E m \ (m \ \ None)" -text{* This notation consideres all non-failures as valid. *} - - -lemma assume_assert: "(\ \\<^sub>S\<^sub>B\<^sub>E ( _ :\ assume\<^sub>S\<^sub>B\<^sub>E P ; assert\<^sub>S\<^sub>B\<^sub>E Q)) = (P \ \ Q \)" - by(simp add: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) - -lemma assert_intro: "Q \ \ \ \\<^sub>S\<^sub>B\<^sub>E (assert\<^sub>S\<^sub>B\<^sub>E Q)" - by(simp add: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) - -lemma assume_dest: - "\ \ \\<^sub>S\<^sub>B\<^sub>E (x :\ assume\<^sub>S\<^sub>B\<^sub>E Q; M x); Q \' \ \ \ \\<^sub>S\<^sub>B\<^sub>E M ()" - apply(auto simp: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) - apply(cases "Q \",simp_all) - oops - -text{* This still needs work. What would be needed is a kind - of wp - calculus that comes out of that. So far: nope. *} - -subsection{* Legacy Bindings *} - - -lemma valid_true[simp]: (* legacy: special case *) - "(\ \ (s \ return x ; return (P s))) = P x" by simp - - -(* -lemmas valid_both = exec_mbindFSave (* legacy *) -lemmas valid_success = exec_mbindFSave_success (* legacy *) -lemmas valid_success'' = exec_mbindFSave_success(* legacy *) -lemmas valid_success' = exec_bind_SE_success (* legacy *) -lemmas valid_failure = exec_mbindFSave_failure (* legacy : *) -lemmas valid_failure' = exec_bind_SE_failure (* legacy *) -lemmas valid_failure''=valid_failure (* legacy : *) -lemmas valid_failure''' = exec_mbindFStop_failure (* legacy : *) -lemmas valid_propagate_fail = exec_fail_SE (* legacy *) -lemmas valid_propagate_fail' = exec_fail_SE' (* legacy *) -lemmas valid_propoagate_3' = valid_propagate_fail' (* legacy *) -lemmas valid_propagate_2 = exec_bind_SE_success''(* legacy *) -lemmas valid_propagate_1 = exec_unit_SE (* legacy *) -lemmas valid_successElem = exec_bind_SE_success' (* legacy *) -lemmas valid_propagate_2' = exec_bind_SE_success'''(* legacy *) -lemmas valid_propagate_2'' = exec_bind_SE_success'''' (* legacy *) -lemmas valid_propoagate_3 = exec_unit_SE' (* legacy *) - *) -(* legacy ?: -lemma valid_success'': -"ioprog a \ = Some(b,\') \ - (\ \ (s \ mbind (a#S) ioprog ; return (P s))) = - (\' \ (s \ mbind S ioprog ; return (P (b#s))))" -unfolding valid_SE_def unit_SE_def bind_SE_def -by(cases "mbind S ioprog \'", auto) -*) - -end diff --git a/src/test-gen/src/test/Testing.thy b/src/test-gen/src/test/Testing.thy deleted file mode 100644 index 94c6ff0..0000000 --- a/src/test-gen/src/test/Testing.thy +++ /dev/null @@ -1,57 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * Testing.thy --- wiring everything together. - * This file is part of HOL-TestGen. - * - * Copyright (c) 2005-2007 ETH Zurich, Switzerland - * 2008-2015 Achim D. Brucker, Germany - * 2009-2016 Université Paris-Sud, France - * 2016 The University of Sheffield, UK - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id:$ *) - -chapter {* Summing up *} - -theory - Testing -imports - TestGen - TestLib - TestScript - "codegen_fsharp/code_fsharp" -begin - - -end diff --git a/src/test-gen/src/main/Automata.thy b/src/test/Automata.thy similarity index 100% rename from src/test-gen/src/main/Automata.thy rename to src/test/Automata.thy diff --git a/src/test-gen/src/main/EFSM_Toolkit.thy b/src/test/EFSM_Toolkit.thy similarity index 100% rename from src/test-gen/src/main/EFSM_Toolkit.thy rename to src/test/EFSM_Toolkit.thy diff --git a/src/test-gen/src/main/Interleaving.thy b/src/test/Interleaving.thy similarity index 100% rename from src/test-gen/src/main/Interleaving.thy rename to src/test/Interleaving.thy diff --git a/src/test-gen/src/main/Monads.thy b/src/test/Monads.thy similarity index 100% rename from src/test-gen/src/main/Monads.thy rename to src/test/Monads.thy diff --git a/src/test-gen/src/main/Observers.thy b/src/test/Observers.thy similarity index 100% rename from src/test-gen/src/main/Observers.thy rename to src/test/Observers.thy diff --git a/src/test-gen/src/main/TestEnv.ML b/src/test/TestEnv.ML similarity index 100% rename from src/test-gen/src/main/TestEnv.ML rename to src/test/TestEnv.ML diff --git a/src/test-gen/src/main/TestEnv.thy b/src/test/TestEnv.thy similarity index 100% rename from src/test-gen/src/main/TestEnv.thy rename to src/test/TestEnv.thy diff --git a/src/TestGen.thy b/src/test/TestGen.thy similarity index 100% rename from src/TestGen.thy rename to src/test/TestGen.thy diff --git a/src/test-gen/src/main/TestLib.thy b/src/test/TestLib.thy similarity index 100% rename from src/test-gen/src/main/TestLib.thy rename to src/test/TestLib.thy diff --git a/src/test-gen/src/main/TestRefinements.thy b/src/test/TestRefinements.thy similarity index 100% rename from src/test-gen/src/main/TestRefinements.thy rename to src/test/TestRefinements.thy diff --git a/src/test-gen/src/main/TestScript.thy b/src/test/TestScript.thy similarity index 100% rename from src/test-gen/src/main/TestScript.thy rename to src/test/TestScript.thy diff --git a/src/test-gen/src/main/TestSequence.thy b/src/test/TestSequence.thy similarity index 100% rename from src/test-gen/src/main/TestSequence.thy rename to src/test/TestSequence.thy diff --git a/src/test-gen/src/main/Testing.thy b/src/test/Testing.thy similarity index 100% rename from src/test-gen/src/main/Testing.thy rename to src/test/Testing.thy diff --git a/src/version.thy b/src/version.thy deleted file mode 100644 index 60b7b48..0000000 --- a/src/version.thy +++ /dev/null @@ -1,91 +0,0 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * version.thy --- Version information for HOL-TestGen - * This file is part of HOL-TestGen. - * - * Copyright (c) 2013 Achim D. Brucker, Germany - * 2016 The University of Sheffield, UK - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id: log_thy.thy 9570 2013-02-02 14:47:51Z brucker $ *) - -theory - version -imports - Main -begin - -ML_file "config.sml" - -ML {* -val testgen_version = Int.toString(testgen_ver_major)^"."^Int.toString(testgen_ver_minor)^"." - ^Int.toString(testgen_ver_micro)^testgen_ver_tag -val version = "HOL-TestGen "^testgen_version^" , based on "^(Distribution.version); -val _ = if (Distribution.version = (isabelle_version)) - then () - else let - val line0 = "Unsupported Isabelle version: \""^(Distribution.version)^"\"" - val line1 = "Only \""^isabelle_version^"\" is supported." - in - warning line0; - warning line1; - Output.physical_stderr("###\n"); - Output.physical_stderr("### "^line0^"\n"); - Output.physical_stderr("### "^line1^"\n"); - Output.physical_stderr("###\n") - end -*} - - -ML {* - let - val today = (Date.toString(Date.fromTimeUniv (Time.now())))^" (UTC)"; - val tex_header = "% This file is generated automatically. Do not edit.\n" - ^"% Generated by HOL-TestGen"^testgen_version^"\n" - ^"% on "^today^".\n" - ^"% \n"; - val tex_version = "\\newcommand{\\testgen}{HOL-TestGen}\n" - ^"\\newcommand{\\testgenFW}{HOL-TestGen/FW}\n" - ^"\\newcommand{\\isabelleversion}{"^(Distribution.version)^"}\n" - ^"\\newcommand{\\testgenversion}{"^testgen_version^"}\n" - ^"\\newcommand{\\testgenversiontag}{"^testgen_ver_tag^"}\n" - ^"\\newcommand{\\testgenurl}{\\url{"^testgen_url^"}}\n" - in - File.write (Path.explode "document/version.tex") (tex_header^tex_version) - handle (IO.Io{name=name,...}) => warning ("Could not create \""^name - ^"\". Document preparation might fail.") - end -*} - -end From 4c9e7d836801c7d1b874439fff7ba468056fa40e Mon Sep 17 00:00:00 2001 From: Serguei Mokhov Date: Fri, 25 Feb 2022 11:55:28 -0500 Subject: [PATCH 7/8] [i2021-1] commit older compat SMT folder from 2016 --- src/main/SMT/conj_disj_perm.ML | 127 ++++++ src/main/SMT/cvc4_interface.ML | 31 ++ src/main/SMT/cvc4_proof_parse.ML | 46 ++ src/main/SMT/smt_builtin.ML | 222 ++++++++++ src/main/SMT/smt_config.ML | 265 ++++++++++++ src/main/SMT/smt_datatypes.ML | 152 +++++++ src/main/SMT/smt_failure.ML | 40 ++ src/main/SMT/smt_normalize.ML | 444 +++++++++++++++++++ src/main/SMT/smt_real.ML | 115 +++++ src/main/SMT/smt_solver.ML | 307 +++++++++++++ src/main/SMT/smt_systems.ML | 154 +++++++ src/main/SMT/smt_translate.ML | 527 +++++++++++++++++++++++ src/main/SMT/smt_util.ML | 240 +++++++++++ src/main/SMT/smtlib.ML | 191 +++++++++ src/main/SMT/smtlib_interface.ML | 171 ++++++++ src/main/SMT/smtlib_isar.ML | 75 ++++ src/main/SMT/smtlib_proof.ML | 298 +++++++++++++ src/main/SMT/verit_isar.ML | 60 +++ src/main/SMT/verit_proof.ML | 324 ++++++++++++++ src/main/SMT/verit_proof_parse.ML | 78 ++++ src/main/SMT/z3_interface.ML | 192 +++++++++ src/main/SMT/z3_isar.ML | 120 ++++++ src/main/SMT/z3_proof.ML | 303 +++++++++++++ src/main/SMT/z3_real.ML | 32 ++ src/main/SMT/z3_replay.ML | 262 ++++++++++++ src/main/SMT/z3_replay_methods.ML | 685 ++++++++++++++++++++++++++++++ src/main/SMT/z3_replay_rules.ML | 54 +++ 27 files changed, 5515 insertions(+) create mode 100644 src/main/SMT/conj_disj_perm.ML create mode 100644 src/main/SMT/cvc4_interface.ML create mode 100644 src/main/SMT/cvc4_proof_parse.ML create mode 100644 src/main/SMT/smt_builtin.ML create mode 100644 src/main/SMT/smt_config.ML create mode 100644 src/main/SMT/smt_datatypes.ML create mode 100644 src/main/SMT/smt_failure.ML create mode 100644 src/main/SMT/smt_normalize.ML create mode 100644 src/main/SMT/smt_real.ML create mode 100644 src/main/SMT/smt_solver.ML create mode 100644 src/main/SMT/smt_systems.ML create mode 100644 src/main/SMT/smt_translate.ML create mode 100644 src/main/SMT/smt_util.ML create mode 100644 src/main/SMT/smtlib.ML create mode 100644 src/main/SMT/smtlib_interface.ML create mode 100644 src/main/SMT/smtlib_isar.ML create mode 100644 src/main/SMT/smtlib_proof.ML create mode 100644 src/main/SMT/verit_isar.ML create mode 100644 src/main/SMT/verit_proof.ML create mode 100644 src/main/SMT/verit_proof_parse.ML create mode 100644 src/main/SMT/z3_interface.ML create mode 100644 src/main/SMT/z3_isar.ML create mode 100644 src/main/SMT/z3_proof.ML create mode 100644 src/main/SMT/z3_real.ML create mode 100644 src/main/SMT/z3_replay.ML create mode 100644 src/main/SMT/z3_replay_methods.ML create mode 100644 src/main/SMT/z3_replay_rules.ML diff --git a/src/main/SMT/conj_disj_perm.ML b/src/main/SMT/conj_disj_perm.ML new file mode 100644 index 0000000..30b85d6 --- /dev/null +++ b/src/main/SMT/conj_disj_perm.ML @@ -0,0 +1,127 @@ +(* Title: HOL/Tools/SMT/conj_disj_perm.ML + Author: Sascha Boehme, TU Muenchen + +Tactic to prove equivalence of permutations of conjunctions and disjunctions. +*) + +signature CONJ_DISJ_PERM = +sig + val conj_disj_perm_tac: Proof.context -> int -> tactic +end + +structure Conj_Disj_Perm: CONJ_DISJ_PERM = +struct + +fun with_assumption ct f = + let val ct' = Thm.apply @{cterm HOL.Trueprop} ct + in Thm.implies_intr ct' (f (Thm.assume ct')) end + +fun eq_from_impls thm1 thm2 = thm2 INCR_COMP (thm1 INCR_COMP @{thm iffI}) + +fun add_lit thm = Termtab.update (HOLogic.dest_Trueprop (Thm.prop_of thm), thm) + +val ndisj1_rule = @{lemma "~(P | Q) ==> ~P" by auto} +val ndisj2_rule = @{lemma "~(P | Q) ==> ~Q" by auto} + +fun explode_thm thm = + (case HOLogic.dest_Trueprop (Thm.prop_of thm) of + @{const HOL.conj} $ _ $ _ => explode_conj_thm @{thm conjunct1} @{thm conjunct2} thm + | @{const HOL.Not} $ (@{const HOL.disj} $ _ $ _) => explode_conj_thm ndisj1_rule ndisj2_rule thm + | @{const HOL.Not} $ (@{const HOL.Not} $ _) => explode_thm (thm RS @{thm notnotD}) + | _ => add_lit thm) + +and explode_conj_thm rule1 rule2 thm lits = + explode_thm (thm RS rule1) (explode_thm (thm RS rule2) (add_lit thm lits)) + +val not_false_rule = @{lemma "~False" by auto} +fun explode thm = explode_thm thm (add_lit not_false_rule (add_lit @{thm TrueI} Termtab.empty)) + +fun find_dual_lit lits (@{const HOL.Not} $ t, thm) = Termtab.lookup lits t |> Option.map (pair thm) + | find_dual_lit _ _ = NONE + +fun find_dual_lits lits = Termtab.get_first (find_dual_lit lits) lits + +val not_not_rule = @{lemma "P ==> ~~P" by auto} +val ndisj_rule = @{lemma "~P ==> ~Q ==> ~(P | Q)" by auto} + +fun join lits t = + (case Termtab.lookup lits t of + SOME thm => thm + | NONE => join_term lits t) + +and join_term lits (@{const HOL.conj} $ t $ u) = @{thm conjI} OF (map (join lits) [t, u]) + | join_term lits (@{const HOL.Not} $ (@{const HOL.disj} $ t $ u)) = + ndisj_rule OF (map (join lits o HOLogic.mk_not) [t, u]) + | join_term lits (@{const HOL.Not} $ (@{const HOL.Not} $ t)) = join lits t RS not_not_rule + | join_term _ t = raise TERM ("join_term", [t]) + +fun prove_conj_disj_imp ct cu = with_assumption ct (fn thm => join (explode thm) (Thm.term_of cu)) + +fun prove_conj_disj_eq (clhs, crhs) = + let + val thm1 = prove_conj_disj_imp clhs crhs + val thm2 = prove_conj_disj_imp crhs clhs + in eq_from_impls thm1 thm2 end + +val not_not_false_rule = @{lemma "~~False ==> P" by auto} +val not_true_rule = @{lemma "~True ==> P" by auto} + +fun prove_any_imp ct = + (case Thm.term_of ct of + @{const HOL.False} => @{thm FalseE} + | @{const HOL.Not} $ (@{const HOL.Not} $ @{const HOL.False}) => not_not_false_rule + | @{const HOL.Not} $ @{const HOL.True} => not_true_rule + | _ => raise CTERM ("prove_any_imp", [ct])) + +fun prove_contradiction_imp ct = + with_assumption ct (fn thm => + let val lits = explode thm + in + (case Termtab.lookup lits @{const HOL.False} of + SOME thm' => thm' RS @{thm FalseE} + | NONE => + (case Termtab.lookup lits (@{const HOL.Not} $ @{const HOL.True}) of + SOME thm' => thm' RS not_true_rule + | NONE => + (case find_dual_lits lits of + SOME (not_lit_thm, lit_thm) => @{thm notE} OF [not_lit_thm, lit_thm] + | NONE => raise CTERM ("prove_contradiction", [ct])))) + end) + +fun prove_contradiction_eq to_right (clhs, crhs) = + let + val thm1 = if to_right then prove_contradiction_imp clhs else prove_any_imp clhs + val thm2 = if to_right then prove_any_imp crhs else prove_contradiction_imp crhs + in eq_from_impls thm1 thm2 end + +val contrapos_rule = @{lemma "(~P) = (~Q) ==> P = Q" by auto} +fun contrapos prove cp = contrapos_rule OF [prove (apply2 (Thm.apply @{cterm HOL.Not}) cp)] + +datatype kind = True | False | Conj | Disj | Other + +fun choose t _ _ _ @{const HOL.True} = t + | choose _ f _ _ @{const HOL.False} = f + | choose _ _ c _ (@{const HOL.conj} $ _ $ _) = c + | choose _ _ _ d (@{const HOL.disj} $ _ $ _) = d + | choose _ _ _ _ _ = Other + +fun kind_of (@{const HOL.Not} $ t) = choose False True Disj Conj t + | kind_of t = choose True False Conj Disj t + +fun prove_conj_disj_perm ct cp = + (case apply2 (kind_of o Thm.term_of) cp of + (Conj, Conj) => prove_conj_disj_eq cp + | (Disj, Disj) => contrapos prove_conj_disj_eq cp + | (Conj, False) => prove_contradiction_eq true cp + | (False, Conj) => prove_contradiction_eq false cp + | (Disj, True) => contrapos (prove_contradiction_eq true) cp + | (True, Disj) => contrapos (prove_contradiction_eq false) cp + | _ => raise CTERM ("prove_conj_disj_perm", [ct])) + +fun conj_disj_perm_tac ctxt = CSUBGOAL (fn (ct, i) => + (case Thm.term_of ct of + @{const HOL.Trueprop} $ (@{const HOL.eq(bool)} $ _ $ _) => + resolve_tac ctxt [prove_conj_disj_perm ct (Thm.dest_binop (Thm.dest_arg ct))] i + | _ => no_tac)) + +end diff --git a/src/main/SMT/cvc4_interface.ML b/src/main/SMT/cvc4_interface.ML new file mode 100644 index 0000000..68cad31 --- /dev/null +++ b/src/main/SMT/cvc4_interface.ML @@ -0,0 +1,31 @@ +(* Title: HOL/Tools/SMT/cvc4_interface.ML + Author: Jasmin Blanchette, TU Muenchen + +Interface to CVC4 based on an extended version of SMT-LIB. +*) + +signature CVC4_INTERFACE = +sig + val smtlib_cvc4C: SMT_Util.class +end; + +structure CVC4_Interface: CVC4_INTERFACE = +struct + +val smtlib_cvc4C = SMTLIB_Interface.smtlibC @ ["cvc4"] + + +(* interface *) + +local + fun translate_config ctxt = + {logic = K "(set-logic ALL_SUPPORTED)\n", fp_kinds = [BNF_Util.Least_FP, BNF_Util.Greatest_FP], + serialize = #serialize (SMTLIB_Interface.translate_config ctxt)} +in + +val _ = + Theory.setup (Context.theory_map (SMT_Translate.add_config (smtlib_cvc4C, translate_config))) + +end + +end; diff --git a/src/main/SMT/cvc4_proof_parse.ML b/src/main/SMT/cvc4_proof_parse.ML new file mode 100644 index 0000000..2807164 --- /dev/null +++ b/src/main/SMT/cvc4_proof_parse.ML @@ -0,0 +1,46 @@ +(* Title: HOL/Tools/SMT/cvc4_proof_parse.ML + Author: Jasmin Blanchette, TU Muenchen + +CVC4 proof (actually, unsat core) parsing. +*) + +signature CVC4_PROOF_PARSE = +sig + val parse_proof: SMT_Translate.replay_data -> + ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> + SMT_Solver.parsed_proof +end; + +structure CVC4_Proof_Parse: CVC4_PROOF_PARSE = +struct + +fun parse_proof ({ll_defs, assms, ...} : SMT_Translate.replay_data) xfacts prems _ output = + if exists (String.isPrefix "(error \"This build of CVC4 doesn't have proof support") output then + {outcome = NONE, fact_ids = NONE, atp_proof = K []} + else + let + val num_ll_defs = length ll_defs + + val id_of_index = Integer.add num_ll_defs + val index_of_id = Integer.add (~ num_ll_defs) + + val used_assert_ids = map_filter (try SMTLIB_Interface.assert_index_of_name) output + val used_assm_js = + map_filter (fn id => let val i = index_of_id id in if i >= 0 then SOME i else NONE end) + used_assert_ids + + val conjecture_i = 0 + val prems_i = conjecture_i + 1 + val num_prems = length prems + val facts_i = prems_i + num_prems + + val fact_ids' = + map_filter (fn j => + let val (i, _) = nth assms j in + try (apsnd (nth xfacts)) (id_of_index j, i - facts_i) + end) used_assm_js + in + {outcome = NONE, fact_ids = SOME fact_ids', atp_proof = K []} + end + +end; diff --git a/src/main/SMT/smt_builtin.ML b/src/main/SMT/smt_builtin.ML new file mode 100644 index 0000000..a5955c7 --- /dev/null +++ b/src/main/SMT/smt_builtin.ML @@ -0,0 +1,222 @@ +(* Title: HOL/Tools/SMT/smt_builtin.ML + Author: Sascha Boehme, TU Muenchen + +Tables of types and terms directly supported by SMT solvers. +*) + +signature SMT_BUILTIN = +sig + (*for experiments*) + val filter_builtins: (typ -> bool) -> Proof.context -> Proof.context + + (*built-in types*) + val add_builtin_typ: SMT_Util.class -> + typ * (typ -> string option) * (typ -> int -> string option) -> Context.generic -> + Context.generic + val add_builtin_typ_ext: typ * (typ -> bool) -> Context.generic -> + Context.generic + val dest_builtin_typ: Proof.context -> typ -> string option + val is_builtin_typ_ext: Proof.context -> typ -> bool + + (*built-in numbers*) + val dest_builtin_num: Proof.context -> term -> (string * typ) option + val is_builtin_num: Proof.context -> term -> bool + val is_builtin_num_ext: Proof.context -> term -> bool + + (*built-in functions*) + type 'a bfun = Proof.context -> typ -> term list -> 'a + type bfunr = string * int * term list * (term list -> term) + val add_builtin_fun: SMT_Util.class -> (string * typ) * bfunr option bfun -> Context.generic -> + Context.generic + val add_builtin_fun': SMT_Util.class -> term * string -> Context.generic -> Context.generic + val add_builtin_fun_ext: (string * typ) * term list bfun -> Context.generic -> Context.generic + val add_builtin_fun_ext': string * typ -> Context.generic -> Context.generic + val add_builtin_fun_ext'': string -> Context.generic -> Context.generic + val dest_builtin_fun: Proof.context -> string * typ -> term list -> bfunr option + val dest_builtin_eq: Proof.context -> term -> term -> bfunr option + val dest_builtin_pred: Proof.context -> string * typ -> term list -> bfunr option + val dest_builtin_conn: Proof.context -> string * typ -> term list -> bfunr option + val dest_builtin: Proof.context -> string * typ -> term list -> bfunr option + val dest_builtin_ext: Proof.context -> string * typ -> term list -> term list option + val is_builtin_fun: Proof.context -> string * typ -> term list -> bool + val is_builtin_fun_ext: Proof.context -> string * typ -> term list -> bool +end; + +structure SMT_Builtin: SMT_BUILTIN = +struct + + +(* built-in tables *) + +datatype ('a, 'b) kind = Ext of 'a | Int of 'b + +type ('a, 'b) ttab = ((typ * ('a, 'b) kind) Ord_List.T) SMT_Util.dict + +fun typ_ord ((T, _), (U, _)) = + let + fun tord (TVar _, Type _) = GREATER + | tord (Type _, TVar _) = LESS + | tord (Type (n, Ts), Type (m, Us)) = + if n = m then list_ord tord (Ts, Us) + else Term_Ord.typ_ord (T, U) + | tord TU = Term_Ord.typ_ord TU + in tord (T, U) end + +fun insert_ttab cs T f = + SMT_Util.dict_map_default (cs, []) + (Ord_List.insert typ_ord (perhaps (try Logic.varifyT_global) T, f)) + +fun merge_ttab ttabp = SMT_Util.dict_merge (Ord_List.merge typ_ord) ttabp + +fun lookup_ttab ctxt ttab T = + let fun match (U, _) = Sign.typ_instance (Proof_Context.theory_of ctxt) (T, U) + in + get_first (find_first match) (SMT_Util.dict_lookup ttab (SMT_Config.solver_class_of ctxt)) + end + +type ('a, 'b) btab = ('a, 'b) ttab Symtab.table + +fun insert_btab cs n T f = + Symtab.map_default (n, []) (insert_ttab cs T f) + +fun merge_btab btabp = Symtab.join (K merge_ttab) btabp + +fun lookup_btab ctxt btab (n, T) = + (case Symtab.lookup btab n of + NONE => NONE + | SOME ttab => lookup_ttab ctxt ttab T) + +type 'a bfun = Proof.context -> typ -> term list -> 'a + +type bfunr = string * int * term list * (term list -> term) + +structure Builtins = Generic_Data +( + type T = + (typ -> bool, (typ -> string option) * (typ -> int -> string option)) ttab * + (term list bfun, bfunr option bfun) btab + val empty = ([], Symtab.empty) + val extend = I + fun merge ((t1, b1), (t2, b2)) = (merge_ttab (t1, t2), merge_btab (b1, b2)) +) + +fun filter_ttab keep_T = map (apsnd (filter (keep_T o fst))) + +fun filter_builtins keep_T = + Context.proof_map (Builtins.map (fn (ttab, btab) => + (filter_ttab keep_T ttab, Symtab.map (K (filter_ttab keep_T)) btab))) + + +(* built-in types *) + +fun add_builtin_typ cs (T, f, g) = + Builtins.map (apfst (insert_ttab cs T (Int (f, g)))) + +fun add_builtin_typ_ext (T, f) = Builtins.map (apfst (insert_ttab SMT_Util.basicC T (Ext f))) + +fun lookup_builtin_typ ctxt = + lookup_ttab ctxt (fst (Builtins.get (Context.Proof ctxt))) + +fun dest_builtin_typ ctxt T = + (case lookup_builtin_typ ctxt T of + SOME (_, Int (f, _)) => f T + | _ => NONE) + +fun is_builtin_typ_ext ctxt T = + (case lookup_builtin_typ ctxt T of + SOME (_, Int (f, _)) => is_some (f T) + | SOME (_, Ext f) => f T + | NONE => false) + + +(* built-in numbers *) + +fun dest_builtin_num ctxt t = + (case try HOLogic.dest_number t of + NONE => NONE + | SOME (T, i) => + if i < 0 then NONE else + (case lookup_builtin_typ ctxt T of + SOME (_, Int (_, g)) => g T i |> Option.map (rpair T) + | _ => NONE)) + +val is_builtin_num = is_some oo dest_builtin_num + +fun is_builtin_num_ext ctxt t = + (case try HOLogic.dest_number t of + NONE => false + | SOME (T, _) => is_builtin_typ_ext ctxt T) + + +(* built-in functions *) + +fun add_builtin_fun cs ((n, T), f) = + Builtins.map (apsnd (insert_btab cs n T (Int f))) + +fun add_builtin_fun' cs (t, n) = + let + val c as (m, T) = Term.dest_Const t + fun app U ts = Term.list_comb (Const (m, U), ts) + fun bfun _ U ts = SOME (n, length (Term.binder_types T), ts, app U) + in add_builtin_fun cs (c, bfun) end + +fun add_builtin_fun_ext ((n, T), f) = + Builtins.map (apsnd (insert_btab SMT_Util.basicC n T (Ext f))) + +fun add_builtin_fun_ext' c = add_builtin_fun_ext (c, fn _ => fn _ => I) + +fun add_builtin_fun_ext'' n context = + let val thy = Context.theory_of context + in add_builtin_fun_ext' (n, Sign.the_const_type thy n) context end + +fun lookup_builtin_fun ctxt = + lookup_btab ctxt (snd (Builtins.get (Context.Proof ctxt))) + +fun dest_builtin_fun ctxt (c as (_, T)) ts = + (case lookup_builtin_fun ctxt c of + SOME (_, Int f) => f ctxt T ts + | _ => NONE) + +fun dest_builtin_eq ctxt t u = + let + val aT = TFree (Name.aT, @{sort type}) + val c = (@{const_name HOL.eq}, aT --> aT --> @{typ bool}) + fun mk ts = Term.list_comb (HOLogic.eq_const (Term.fastype_of (hd ts)), ts) + in + dest_builtin_fun ctxt c [] + |> Option.map (fn (n, i, _, _) => (n, i, [t, u], mk)) + end + +fun special_builtin_fun pred ctxt (c as (_, T)) ts = + if pred (Term.body_type T, Term.binder_types T) then + dest_builtin_fun ctxt c ts + else NONE + +fun dest_builtin_pred ctxt = special_builtin_fun (equal @{typ bool} o fst) ctxt + +fun dest_builtin_conn ctxt = + special_builtin_fun (forall (equal @{typ bool}) o (op ::)) ctxt + +fun dest_builtin ctxt c ts = + let val t = Term.list_comb (Const c, ts) + in + (case dest_builtin_num ctxt t of + SOME (n, _) => SOME (n, 0, [], K t) + | NONE => dest_builtin_fun ctxt c ts) + end + +fun dest_builtin_fun_ext ctxt (c as (_, T)) ts = + (case lookup_builtin_fun ctxt c of + SOME (_, Int f) => f ctxt T ts |> Option.map (fn (_, _, us, _) => us) + | SOME (_, Ext f) => SOME (f ctxt T ts) + | NONE => NONE) + +fun dest_builtin_ext ctxt c ts = + if is_builtin_num_ext ctxt (Term.list_comb (Const c, ts)) then SOME [] + else dest_builtin_fun_ext ctxt c ts + +fun is_builtin_fun ctxt c ts = is_some (dest_builtin_fun ctxt c ts) + +fun is_builtin_fun_ext ctxt c ts = is_some (dest_builtin_fun_ext ctxt c ts) + +end; diff --git a/src/main/SMT/smt_config.ML b/src/main/SMT/smt_config.ML new file mode 100644 index 0000000..8b8d029 --- /dev/null +++ b/src/main/SMT/smt_config.ML @@ -0,0 +1,265 @@ +(* Title: HOL/Tools/SMT/smt_config.ML + Author: Sascha Boehme, TU Muenchen + +Configuration options and diagnostic tools for SMT. +*) + +signature SMT_CONFIG = +sig + (*solver*) + type solver_info = { + name: string, + class: Proof.context -> SMT_Util.class, + avail: unit -> bool, + options: Proof.context -> string list } + val add_solver: solver_info -> Context.generic -> Context.generic + val set_solver_options: string * string -> Context.generic -> Context.generic + val is_available: Proof.context -> string -> bool + val available_solvers_of: Proof.context -> string list + val select_solver: string -> Context.generic -> Context.generic + val solver_of: Proof.context -> string + val solver_class_of: Proof.context -> SMT_Util.class + val solver_options_of: Proof.context -> string list + + (*options*) + val oracle: bool Config.T + val timeout: real Config.T + val reconstruction_step_timeout: real Config.T + val random_seed: int Config.T + val read_only_certificates: bool Config.T + val verbose: bool Config.T + val trace: bool Config.T + val statistics: bool Config.T + val monomorph_limit: int Config.T + val monomorph_instances: int Config.T + val infer_triggers: bool Config.T + val debug_files: string Config.T + val sat_solver: string Config.T + + (*tools*) + val with_time_limit: Proof.context -> real Config.T -> ('a -> 'b) -> 'a -> 'b + val with_timeout: Proof.context -> ('a -> 'b) -> 'a -> 'b + + (*diagnostics*) + val trace_msg: Proof.context -> ('a -> string) -> 'a -> unit + val verbose_msg: Proof.context -> ('a -> string) -> 'a -> unit + val statistics_msg: Proof.context -> ('a -> string) -> 'a -> unit + + (*certificates*) + val select_certificates: string -> Context.generic -> Context.generic + val certificates_of: Proof.context -> Cache_IO.cache option + + (*setup*) + val print_setup: Proof.context -> unit +end; + +structure SMT_Config: SMT_CONFIG = +struct + +(* solver *) + +type solver_info = { + name: string, + class: Proof.context -> SMT_Util.class, + avail: unit -> bool, + options: Proof.context -> string list} + +type data = { + solvers: (solver_info * string list) Symtab.table, + solver: string option, + certs: Cache_IO.cache option} + +fun mk_data solvers solver certs: data = {solvers=solvers, solver=solver, certs=certs} + +val empty_data = mk_data Symtab.empty NONE NONE + +fun solvers_of ({solvers, ...}: data) = solvers +fun solver_of ({solver, ...}: data) = solver +fun certs_of ({certs, ...}: data) = certs + +fun map_solvers f ({solvers, solver, certs}: data) = mk_data (f solvers) solver certs +fun map_solver f ({solvers, solver, certs}: data) = mk_data solvers (f solver) certs +fun put_certs c ({solvers, solver, ...}: data) = mk_data solvers solver c + +fun merge_data ({solvers=ss1,solver=s1,certs=c1}: data, {solvers=ss2,solver=s2,certs=c2}: data) = + mk_data (Symtab.merge (K true) (ss1, ss2)) (merge_options (s1, s2)) (merge_options (c1, c2)) + +structure Data = Generic_Data +( + type T = data + val empty = empty_data + val extend = I + val merge = merge_data +) + +fun set_solver_options (name, options) = + let val opts = String.tokens (Symbol.is_ascii_blank o str) options + in Data.map (map_solvers (Symtab.map_entry name (apsnd (K opts)))) end + +fun add_solver (info as {name, ...} : solver_info) context = + if Symtab.defined (solvers_of (Data.get context)) name then + error ("Solver already registered: " ^ quote name) + else + context + |> Data.map (map_solvers (Symtab.update (name, (info, [])))) + |> Context.map_theory (Attrib.setup (Binding.name (name ^ "_options")) + (Scan.lift (@{keyword "="} |-- Args.name) >> + (Thm.declaration_attribute o K o set_solver_options o pair name)) + ("additional command line options for SMT solver " ^ quote name)) + +fun all_solvers_of ctxt = Symtab.keys (solvers_of (Data.get (Context.Proof ctxt))) + +fun solver_name_of ctxt = solver_of (Data.get (Context.Proof ctxt)) + +fun is_available ctxt name = + (case Symtab.lookup (solvers_of (Data.get (Context.Proof ctxt))) name of + SOME ({avail, ...}, _) => avail () + | NONE => false) + +fun available_solvers_of ctxt = + filter (is_available ctxt) (all_solvers_of ctxt) + +fun warn_solver (Context.Proof ctxt) name = + if Context_Position.is_visible ctxt then + warning ("The SMT solver " ^ quote name ^ " is not installed") + else () + | warn_solver _ _ = () + +fun select_solver name context = + let + val ctxt = Context.proof_of context + val upd = Data.map (map_solver (K (SOME name))) + in + if not (member (op =) (all_solvers_of ctxt) name) then + error ("Trying to select unknown solver: " ^ quote name) + else if not (is_available ctxt name) then + (warn_solver context name; upd context) + else upd context + end + +fun no_solver_err () = error "No SMT solver selected" + +fun solver_of ctxt = + (case solver_name_of ctxt of + SOME name => name + | NONE => no_solver_err ()) + +fun solver_info_of default select ctxt = + (case solver_name_of ctxt of + NONE => default () + | SOME name => select (Symtab.lookup (solvers_of (Data.get (Context.Proof ctxt))) name)) + +fun solver_class_of ctxt = + let fun class_of ({class, ...}: solver_info, _) = class ctxt + in solver_info_of no_solver_err (class_of o the) ctxt end + +fun solver_options_of ctxt = + let + fun all_options NONE = [] + | all_options (SOME ({options, ...} : solver_info, opts)) = + opts @ options ctxt + in solver_info_of (K []) all_options ctxt end + +val setup_solver = + Attrib.setup @{binding smt_solver} + (Scan.lift (@{keyword "="} |-- Args.name) >> + (Thm.declaration_attribute o K o select_solver)) + "SMT solver configuration" + + +(* options *) + +val oracle = Attrib.setup_config_bool @{binding smt_oracle} (K true) +val timeout = Attrib.setup_config_real @{binding smt_timeout} (K 30.0) +val reconstruction_step_timeout = Attrib.setup_config_real @{binding smt_reconstruction_step_timeout} (K 10.0) +val random_seed = Attrib.setup_config_int @{binding smt_random_seed} (K 1) +val read_only_certificates = Attrib.setup_config_bool @{binding smt_read_only_certificates} (K false) +val verbose = Attrib.setup_config_bool @{binding smt_verbose} (K true) +val trace = Attrib.setup_config_bool @{binding smt_trace} (K false) +val statistics = Attrib.setup_config_bool @{binding smt_statistics} (K false) +val monomorph_limit = Attrib.setup_config_int @{binding smt_monomorph_limit} (K 10) +val monomorph_instances = Attrib.setup_config_int @{binding smt_monomorph_instances} (K 500) +val infer_triggers = Attrib.setup_config_bool @{binding smt_infer_triggers} (K false) +val debug_files = Attrib.setup_config_string @{binding smt_debug_files} (K "") +val sat_solver = Attrib.setup_config_string @{binding smt_sat_solver} (K "cdclite") + + +(* diagnostics *) + +fun cond_trace flag f x = if flag then tracing ("SMT: " ^ f x) else () + +fun verbose_msg ctxt = cond_trace (Config.get ctxt verbose) +fun trace_msg ctxt = cond_trace (Config.get ctxt trace) +fun statistics_msg ctxt = cond_trace (Config.get ctxt statistics) + + +(* tools *) + +fun with_time_limit ctxt timeout_config f x = + Timeout.apply (seconds (Config.get ctxt timeout_config)) f x + handle Timeout.TIMEOUT _ => raise SMT_Failure.SMT SMT_Failure.Time_Out + +fun with_timeout ctxt = with_time_limit ctxt timeout + + +(* certificates *) + +val certificates_of = certs_of o Data.get o Context.Proof + +val get_certificates_path = Option.map (Cache_IO.cache_path_of) o certificates_of + +fun select_certificates name context = context |> Data.map (put_certs ( + if name = "" then NONE + else + Path.explode name + |> Path.append (Resources.master_directory (Context.theory_of context)) + |> SOME o Cache_IO.unsynchronized_init)) + +val setup_certificates = + Attrib.setup @{binding smt_certificates} + (Scan.lift (@{keyword "="} |-- Args.name) >> + (Thm.declaration_attribute o K o select_certificates)) + "SMT certificates configuration" + + +(* setup *) + +val _ = Theory.setup ( + setup_solver #> + setup_certificates) + +fun print_setup ctxt = + let + fun string_of_bool b = if b then "true" else "false" + + val names = available_solvers_of ctxt + val ns = if null names then ["(none)"] else sort_strings names + val n = the_default "(none)" (solver_name_of ctxt) + val opts = solver_options_of ctxt + + val t = string_of_real (Config.get ctxt timeout) + + val certs_filename = + (case get_certificates_path ctxt of + SOME path => Path.print path + | NONE => "(disabled)") + in + Pretty.writeln (Pretty.big_list "SMT setup:" [ + Pretty.str ("Current SMT solver: " ^ n), + Pretty.str ("Current SMT solver options: " ^ space_implode " " opts), + Pretty.str_list "Available SMT solvers: " "" ns, + Pretty.str ("Current timeout: " ^ t ^ " seconds"), + Pretty.str ("With proofs: " ^ + string_of_bool (not (Config.get ctxt oracle))), + Pretty.str ("Certificates cache: " ^ certs_filename), + Pretty.str ("Fixed certificates: " ^ + string_of_bool (Config.get ctxt read_only_certificates))]) + end + +val _ = + Outer_Syntax.command @{command_keyword smt_status} + "show the available SMT solvers, the currently selected SMT solver, \ + \and the values of SMT configuration options" + (Scan.succeed (Toplevel.keep (print_setup o Toplevel.context_of))) + +end; diff --git a/src/main/SMT/smt_datatypes.ML b/src/main/SMT/smt_datatypes.ML new file mode 100644 index 0000000..2467cab --- /dev/null +++ b/src/main/SMT/smt_datatypes.ML @@ -0,0 +1,152 @@ +(* Title: HOL/Tools/SMT/smt_datatypes.ML + Author: Sascha Boehme, TU Muenchen + +Collector functions for common type declarations and their representation +as (co)algebraic datatypes. +*) + +signature SMT_DATATYPES = +sig + val add_decls: BNF_Util.fp_kind list -> typ -> + (BNF_Util.fp_kind * (typ * (term * term list) list)) list list * Proof.context -> + (BNF_Util.fp_kind * (typ * (term * term list) list)) list list * Proof.context +end; + +structure SMT_Datatypes: SMT_DATATYPES = +struct + +fun mk_selectors T Ts sels = + if null sels then + Variable.variant_fixes (replicate (length Ts) "select") + #>> map2 (fn U => fn n => Free (n, T --> U)) Ts + else + pair sels + + +(* free constructor type declarations *) + +fun get_ctr_sugar_decl ({ctrs = ctrs0, selss = selss0, ...} : Ctr_Sugar.ctr_sugar) T Ts ctxt = + let + val selss = map (map (Ctr_Sugar.mk_disc_or_sel Ts)) selss0 + val ctrs = map (Ctr_Sugar.mk_ctr Ts) ctrs0 + + fun mk_constr ctr sels = + mk_selectors T (binder_types (fastype_of ctr)) sels #>> pair ctr + + val selss' = + (if has_duplicates (op aconv) (flat selss) orelse + exists (exists (can (dest_funT o range_type o fastype_of))) selss then + [] + else + selss) + |> Ctr_Sugar_Util.pad_list [] (length ctrs) + in + @{fold_map 2} mk_constr ctrs selss' ctxt + |>> (pair T #> single) + end + + +(* typedef declarations *) + +fun get_typedef_decl (({Abs_name, Rep_name, abs_type, rep_type, ...}, {Abs_inverse, ...}) + : Typedef.info) T Ts = + if can (curry (op RS) @{thm UNIV_I}) Abs_inverse then + let + val env = snd (Term.dest_Type abs_type) ~~ Ts + val instT = Term.map_atyps (perhaps (AList.lookup (op =) env)) + + val constr = Const (Abs_name, instT (rep_type --> abs_type)) + val select = Const (Rep_name, instT (abs_type --> rep_type)) + in [(T, [(constr, [select])])] end + else + [] + + +(* collection of declarations *) + +val extN = "_ext" (* cf. "HOL/Tools/typedef.ML" *) + +fun get_decls fps T n Ts ctxt = + let + fun maybe_typedef () = + (case Typedef.get_info ctxt n of + [] => ([], ctxt) + | info :: _ => (map (pair (hd fps)) (get_typedef_decl info T Ts), ctxt)) + in + (case BNF_FP_Def_Sugar.fp_sugar_of ctxt n of + SOME {fp, fp_res = {Ts = fp_Ts, ...}, fp_ctr_sugar = {ctr_sugar, ...}, ...} => + if member (op =) fps fp then + let + val ns = map (fst o dest_Type) fp_Ts + val mutual_fp_sugars = map_filter (BNF_FP_Def_Sugar.fp_sugar_of ctxt) ns + val Xs = map #X mutual_fp_sugars + val ctrXs_Tsss = map (#ctrXs_Tss o #fp_ctr_sugar) mutual_fp_sugars + + (* Datatypes nested through datatypes and codatatypes nested through codatatypes are + allowed. So are mutually (co)recursive (co)datatypes. *) + fun is_same_fp s = + (case BNF_FP_Def_Sugar.fp_sugar_of ctxt s of + SOME {fp = fp', ...} => fp' = fp + | NONE => false) + fun is_homogenously_nested_co_recursive (Type (s, Ts)) = + forall (if is_same_fp s then is_homogenously_nested_co_recursive + else not o BNF_FP_Rec_Sugar_Util.exists_subtype_in Xs) Ts + | is_homogenously_nested_co_recursive _ = true + + val Type (_, As) :: _ = fp_Ts + val substAs = Term.typ_subst_atomic (As ~~ Ts); + in + (* TODO/FIXME: The "bool" check is there to work around a CVC4 bug + (http://church.cims.nyu.edu/bugzilla3/show_bug.cgi?id=597). It should be removed once + the bug is fixed. *) + if forall (forall (forall (is_homogenously_nested_co_recursive))) ctrXs_Tsss andalso + forall (forall (forall (curry (op <>) @{typ bool}))) + (map (map (map substAs)) ctrXs_Tsss) then + get_ctr_sugar_decl ctr_sugar T Ts ctxt |>> map (pair fp) + else + maybe_typedef () + end + else + ([], ctxt) + | NONE => + if String.isSuffix extN n then + (* for records (FIXME: hack) *) + (case Ctr_Sugar.ctr_sugar_of ctxt n of + SOME ctr_sugar => + get_ctr_sugar_decl ctr_sugar T Ts ctxt |>> map (pair (hd fps)) + | NONE => maybe_typedef ()) + else + maybe_typedef ()) + end + +fun add_decls fps T (declss, ctxt) = + let + fun declared T = exists (exists (equal T o fst o snd)) + fun declared' T = exists (exists (equal T o fst o snd) o snd) + fun depends ds = exists (member (op =) (map (fst o snd) ds)) + + fun add (TFree _) = I + | add (TVar _) = I + | add (T as Type (@{type_name fun}, _)) = + fold add (Term.body_type T :: Term.binder_types T) + | add @{typ bool} = I + | add (T as Type (n, Ts)) = (fn (dss, ctxt1) => + if declared T declss orelse declared' T dss then + (dss, ctxt1) + else if SMT_Builtin.is_builtin_typ_ext ctxt1 T then + (dss, ctxt1) + else + (case get_decls fps T n Ts ctxt1 of + ([], _) => (dss, ctxt1) + | (ds, ctxt2) => + let + val constrTs = maps (map (snd o Term.dest_Const o fst) o snd o snd) ds + val Us = fold (union (op =) o Term.binder_types) constrTs [] + + fun ins [] = [(Us, ds)] + | ins ((Uds as (Us', _)) :: Udss) = + if depends ds Us' then (Us, ds) :: Uds :: Udss else Uds :: ins Udss + in fold add Us (ins dss, ctxt2) end)) + in add T ([], ctxt) |>> append declss o map snd end + +end; diff --git a/src/main/SMT/smt_failure.ML b/src/main/SMT/smt_failure.ML new file mode 100644 index 0000000..ba892ae --- /dev/null +++ b/src/main/SMT/smt_failure.ML @@ -0,0 +1,40 @@ +(* Title: HOL/Tools/SMT/smt_failure.ML + Author: Sascha Boehme, TU Muenchen + +Failures and exception of SMT. +*) + +signature SMT_FAILURE = +sig + datatype failure = + Counterexample of bool | + Time_Out | + Out_Of_Memory | + Abnormal_Termination of int | + Other_Failure of string + val string_of_failure: failure -> string + exception SMT of failure +end; + +structure SMT_Failure: SMT_FAILURE = +struct + +datatype failure = + Counterexample of bool | + Time_Out | + Out_Of_Memory | + Abnormal_Termination of int | + Other_Failure of string + +fun string_of_failure (Counterexample genuine) = + if genuine then "Counterexample found (possibly spurious)" + else "Potential counterexample found" + | string_of_failure Time_Out = "Timed out" + | string_of_failure Out_Of_Memory = "Ran out of memory" + | string_of_failure (Abnormal_Termination err) = + "Solver terminated abnormally with error code " ^ string_of_int err + | string_of_failure (Other_Failure msg) = msg + +exception SMT of failure + +end; diff --git a/src/main/SMT/smt_normalize.ML b/src/main/SMT/smt_normalize.ML new file mode 100644 index 0000000..98e820b --- /dev/null +++ b/src/main/SMT/smt_normalize.ML @@ -0,0 +1,444 @@ +(* Title: HOL/Tools/SMT/smt_normalize.ML + Author: Sascha Boehme, TU Muenchen + +Normalization steps on theorems required by SMT solvers. +*) + +signature SMT_NORMALIZE = +sig + val drop_fact_warning: Proof.context -> thm -> unit + val atomize_conv: Proof.context -> conv + + val special_quant_table: (string * thm) list + val case_bool_entry: string * thm + val abs_min_max_table: (string * thm) list + + type extra_norm = Proof.context -> thm list * thm list -> thm list * thm list + val add_extra_norm: SMT_Util.class * extra_norm -> Context.generic -> Context.generic + val normalize: Proof.context -> thm list -> (int * thm) list +end; + +structure SMT_Normalize: SMT_NORMALIZE = +struct + +fun drop_fact_warning ctxt = + SMT_Config.verbose_msg ctxt (prefix "Warning: dropping assumption: " o + Thm.string_of_thm ctxt) + + +(* general theorem normalizations *) + +(** instantiate elimination rules **) + +local + val (cpfalse, cfalse) = `SMT_Util.mk_cprop (Thm.cterm_of @{context} @{const False}) + + fun inst f ct thm = + let val cv = f (Drule.strip_imp_concl (Thm.cprop_of thm)) + in Thm.instantiate ([], [(dest_Var (Thm.term_of cv), ct)]) thm end +in + +fun instantiate_elim thm = + (case Thm.concl_of thm of + @{const Trueprop} $ Var (_, @{typ bool}) => inst Thm.dest_arg cfalse thm + | Var _ => inst I cpfalse thm + | _ => thm) + +end + + +(** normalize definitions **) + +fun norm_def thm = + (case Thm.prop_of thm of + @{const Trueprop} $ (Const (@{const_name HOL.eq}, _) $ _ $ Abs _) => + norm_def (thm RS @{thm fun_cong}) + | Const (@{const_name Pure.eq}, _) $ _ $ Abs _ => norm_def (thm RS @{thm meta_eq_to_obj_eq}) + | _ => thm) + + +(** atomization **) + +fun atomize_conv ctxt ct = + (case Thm.term_of ct of + @{const Pure.imp} $ _ $ _ => + Conv.binop_conv (atomize_conv ctxt) then_conv Conv.rewr_conv @{thm atomize_imp} + | Const (@{const_name Pure.eq}, _) $ _ $ _ => + Conv.binop_conv (atomize_conv ctxt) then_conv Conv.rewr_conv @{thm atomize_eq} + | Const (@{const_name Pure.all}, _) $ Abs _ => + Conv.binder_conv (atomize_conv o snd) ctxt then_conv Conv.rewr_conv @{thm atomize_all} + | _ => Conv.all_conv) ct + handle CTERM _ => Conv.all_conv ct + +val setup_atomize = + fold SMT_Builtin.add_builtin_fun_ext'' [@{const_name Pure.imp}, @{const_name Pure.eq}, + @{const_name Pure.all}, @{const_name Trueprop}] + + +(** unfold special quantifiers **) + +val special_quant_table = [ + (@{const_name Ex1}, @{thm Ex1_def_raw}), + (@{const_name Ball}, @{thm Ball_def_raw}), + (@{const_name Bex}, @{thm Bex_def_raw})] + +local + fun special_quant (Const (n, _)) = AList.lookup (op =) special_quant_table n + | special_quant _ = NONE + + fun special_quant_conv _ ct = + (case special_quant (Thm.term_of ct) of + SOME thm => Conv.rewr_conv thm + | NONE => Conv.all_conv) ct +in + +fun unfold_special_quants_conv ctxt = + SMT_Util.if_exists_conv (is_some o special_quant) (Conv.top_conv special_quant_conv ctxt) + +val setup_unfolded_quants = fold (SMT_Builtin.add_builtin_fun_ext'' o fst) special_quant_table + +end + + +(** trigger inference **) + +local + (*** check trigger syntax ***) + + fun dest_trigger (Const (@{const_name pat}, _) $ _) = SOME true + | dest_trigger (Const (@{const_name nopat}, _) $ _) = SOME false + | dest_trigger _ = NONE + + fun eq_list [] = false + | eq_list (b :: bs) = forall (equal b) bs + + fun proper_trigger t = + t + |> these o try SMT_Util.dest_symb_list + |> map (map_filter dest_trigger o these o try SMT_Util.dest_symb_list) + |> (fn [] => false | bss => forall eq_list bss) + + fun proper_quant inside f t = + (case t of + Const (@{const_name All}, _) $ Abs (_, _, u) => proper_quant true f u + | Const (@{const_name Ex}, _) $ Abs (_, _, u) => proper_quant true f u + | @{const trigger} $ p $ u => + (if inside then f p else false) andalso proper_quant false f u + | Abs (_, _, u) => proper_quant false f u + | u1 $ u2 => proper_quant false f u1 andalso proper_quant false f u2 + | _ => true) + + fun check_trigger_error ctxt t = + error ("SMT triggers must only occur under quantifier and multipatterns " ^ + "must have the same kind: " ^ Syntax.string_of_term ctxt t) + + fun check_trigger_conv ctxt ct = + if proper_quant false proper_trigger (SMT_Util.term_of ct) then Conv.all_conv ct + else check_trigger_error ctxt (Thm.term_of ct) + + + (*** infer simple triggers ***) + + fun dest_cond_eq ct = + (case Thm.term_of ct of + Const (@{const_name HOL.eq}, _) $ _ $ _ => Thm.dest_binop ct + | @{const HOL.implies} $ _ $ _ => dest_cond_eq (Thm.dest_arg ct) + | _ => raise CTERM ("no equation", [ct])) + + fun get_constrs thy (Type (n, _)) = these (BNF_LFP_Compat.get_constrs thy n) + | get_constrs _ _ = [] + + fun is_constr thy (n, T) = + let fun match (m, U) = m = n andalso Sign.typ_instance thy (T, U) + in can (the o find_first match o get_constrs thy o Term.body_type) T end + + fun is_constr_pat thy t = + (case Term.strip_comb t of + (Free _, []) => true + | (Const c, ts) => is_constr thy c andalso forall (is_constr_pat thy) ts + | _ => false) + + fun is_simp_lhs ctxt t = + (case Term.strip_comb t of + (Const c, ts as _ :: _) => + not (SMT_Builtin.is_builtin_fun_ext ctxt c ts) andalso + forall (is_constr_pat (Proof_Context.theory_of ctxt)) ts + | _ => false) + + fun has_all_vars vs t = + subset (op aconv) (vs, map Free (Term.add_frees t [])) + + fun minimal_pats vs ct = + if has_all_vars vs (Thm.term_of ct) then + (case Thm.term_of ct of + _ $ _ => + (case apply2 (minimal_pats vs) (Thm.dest_comb ct) of + ([], []) => [[ct]] + | (ctss, ctss') => union (eq_set (op aconvc)) ctss ctss') + | _ => []) + else [] + + fun proper_mpat _ _ _ [] = false + | proper_mpat thy gen u cts = + let + val tps = (op ~~) (`gen (map Thm.term_of cts)) + fun some_match u = tps |> exists (fn (t', t) => + Pattern.matches thy (t', u) andalso not (t aconv u)) + in not (Term.exists_subterm some_match u) end + + val pat = SMT_Util.mk_const_pat @{theory} @{const_name pat} SMT_Util.destT1 + fun mk_pat ct = Thm.apply (SMT_Util.instT' ct pat) ct + + fun mk_clist T = + apply2 (Thm.cterm_of @{context}) (SMT_Util.symb_cons_const T, SMT_Util.symb_nil_const T) + fun mk_list (ccons, cnil) f cts = fold_rev (Thm.mk_binop ccons o f) cts cnil + val mk_pat_list = mk_list (mk_clist @{typ pattern}) + val mk_mpat_list = mk_list (mk_clist @{typ "pattern symb_list"}) + fun mk_trigger ctss = mk_mpat_list (mk_pat_list mk_pat) ctss + + val trigger_eq = mk_meta_eq @{lemma "p = trigger t p" by (simp add: trigger_def)} + + fun insert_trigger_conv [] ct = Conv.all_conv ct + | insert_trigger_conv ctss ct = + let + val (ctr, cp) = Thm.dest_binop (Thm.rhs_of trigger_eq) ||> rpair ct + val inst = map (apfst (dest_Var o Thm.term_of)) [cp, (ctr, mk_trigger ctss)] + in Thm.instantiate ([], inst) trigger_eq end + + fun infer_trigger_eq_conv outer_ctxt (ctxt, cvs) ct = + let + val (lhs, rhs) = dest_cond_eq ct + + val vs = map Thm.term_of cvs + val thy = Proof_Context.theory_of ctxt + + fun get_mpats ct = + if is_simp_lhs ctxt (Thm.term_of ct) then minimal_pats vs ct + else [] + val gen = Variable.export_terms ctxt outer_ctxt + val filter_mpats = filter (proper_mpat thy gen (Thm.term_of rhs)) + + in insert_trigger_conv (filter_mpats (get_mpats lhs)) ct end + + fun has_trigger (@{const trigger} $ _ $ _) = true + | has_trigger _ = false + + fun try_trigger_conv cv ct = + if SMT_Util.under_quant has_trigger (SMT_Util.term_of ct) then Conv.all_conv ct + else Conv.try_conv cv ct + + fun infer_trigger_conv ctxt = + if Config.get ctxt SMT_Config.infer_triggers then + try_trigger_conv (SMT_Util.under_quant_conv (infer_trigger_eq_conv ctxt) ctxt) + else Conv.all_conv +in + +fun trigger_conv ctxt = + SMT_Util.prop_conv (check_trigger_conv ctxt then_conv infer_trigger_conv ctxt) + +val setup_trigger = + fold SMT_Builtin.add_builtin_fun_ext'' + [@{const_name pat}, @{const_name nopat}, @{const_name trigger}] + +end + + +(** combined general normalizations **) + +fun gen_normalize1_conv ctxt = + atomize_conv ctxt then_conv + unfold_special_quants_conv ctxt then_conv + Thm.beta_conversion true then_conv + trigger_conv ctxt + +fun gen_normalize1 ctxt = + instantiate_elim #> + norm_def #> + Conv.fconv_rule (Thm.beta_conversion true then_conv Thm.eta_conversion) #> + Drule.forall_intr_vars #> + Conv.fconv_rule (gen_normalize1_conv ctxt) #> + (* Z3 4.3.1 silently normalizes "P --> Q --> R" to "P & Q --> R" *) + Raw_Simplifier.rewrite_rule ctxt @{thms HOL.imp_conjL[symmetric, THEN eq_reflection]} + +fun gen_norm1_safe ctxt (i, thm) = + (case try (gen_normalize1 ctxt) thm of + SOME thm' => SOME (i, thm') + | NONE => (drop_fact_warning ctxt thm; NONE)) + +fun gen_normalize ctxt iwthms = map_filter (gen_norm1_safe ctxt) iwthms + + +(* unfolding of definitions and theory-specific rewritings *) + +fun expand_head_conv cv ct = + (case Thm.term_of ct of + _ $ _ => + Conv.fun_conv (expand_head_conv cv) then_conv + Conv.try_conv (Thm.beta_conversion false) + | _ => cv) ct + + +(** rewrite bool case expressions as if expressions **) + +val case_bool_entry = (@{const_name "bool.case_bool"}, @{thm case_bool_if}) + +local + fun is_case_bool (Const (@{const_name "bool.case_bool"}, _)) = true + | is_case_bool _ = false + + fun unfold_conv _ = + SMT_Util.if_true_conv (is_case_bool o Term.head_of) + (expand_head_conv (Conv.rewr_conv @{thm case_bool_if})) +in + +fun rewrite_case_bool_conv ctxt = + SMT_Util.if_exists_conv is_case_bool (Conv.top_conv unfold_conv ctxt) + +val setup_case_bool = SMT_Builtin.add_builtin_fun_ext'' @{const_name "bool.case_bool"} + +end + + +(** unfold abs, min and max **) + +val abs_min_max_table = [ + (@{const_name min}, @{thm min_def_raw}), + (@{const_name max}, @{thm max_def_raw}), + (@{const_name abs}, @{thm abs_if_raw})] + +local + fun abs_min_max ctxt (Const (n, Type (@{type_name fun}, [T, _]))) = + (case AList.lookup (op =) abs_min_max_table n of + NONE => NONE + | SOME thm => if SMT_Builtin.is_builtin_typ_ext ctxt T then SOME thm else NONE) + | abs_min_max _ _ = NONE + + fun unfold_amm_conv ctxt ct = + (case abs_min_max ctxt (Term.head_of (Thm.term_of ct)) of + SOME thm => expand_head_conv (Conv.rewr_conv thm) + | NONE => Conv.all_conv) ct +in + +fun unfold_abs_min_max_conv ctxt = + SMT_Util.if_exists_conv (is_some o abs_min_max ctxt) (Conv.top_conv unfold_amm_conv ctxt) + +val setup_abs_min_max = fold (SMT_Builtin.add_builtin_fun_ext'' o fst) abs_min_max_table + +end + + +(** normalize numerals **) + +local + (* + rewrite Numeral1 into 1 + rewrite - 0 into 0 + *) + + fun is_irregular_number (Const (@{const_name numeral}, _) $ Const (@{const_name num.One}, _)) = + true + | is_irregular_number (Const (@{const_name uminus}, _) $ Const (@{const_name Groups.zero}, _)) = + true + | is_irregular_number _ = false + + fun is_strange_number ctxt t = is_irregular_number t andalso SMT_Builtin.is_builtin_num ctxt t + + val proper_num_ss = + simpset_of (put_simpset HOL_ss @{context} addsimps @{thms Num.numeral_One minus_zero}) + + fun norm_num_conv ctxt = + SMT_Util.if_conv (is_strange_number ctxt) (Simplifier.rewrite (put_simpset proper_num_ss ctxt)) + Conv.no_conv +in + +fun normalize_numerals_conv ctxt = + SMT_Util.if_exists_conv (is_strange_number ctxt) (Conv.top_sweep_conv norm_num_conv ctxt) + +end + + +(** combined unfoldings and rewritings **) + +fun unfold_conv ctxt = + rewrite_case_bool_conv ctxt then_conv + unfold_abs_min_max_conv ctxt then_conv + Thm.beta_conversion true + +fun unfold_polymorph ctxt = map (apsnd (Conv.fconv_rule (unfold_conv ctxt))) +fun unfold_monomorph ctxt = map (apsnd (Conv.fconv_rule (normalize_numerals_conv ctxt))) + + +(* overall normalization *) + +fun burrow_ids f ithms = + let + val (is, thms) = split_list ithms + val (thms', extra_thms) = f thms + in (is ~~ thms') @ map (pair ~1) extra_thms end + +type extra_norm = Proof.context -> thm list * thm list -> thm list * thm list + +structure Extra_Norms = Generic_Data +( + type T = extra_norm SMT_Util.dict + val empty = [] + val extend = I + fun merge data = SMT_Util.dict_merge fst data +) + +fun add_extra_norm (cs, norm) = Extra_Norms.map (SMT_Util.dict_update (cs, norm)) + +fun apply_extra_norms ctxt ithms = + let + val cs = SMT_Config.solver_class_of ctxt + val es = SMT_Util.dict_lookup (Extra_Norms.get (Context.Proof ctxt)) cs + in burrow_ids (fold (fn e => e ctxt) es o rpair []) ithms end + +local + val ignored = member (op =) [@{const_name All}, @{const_name Ex}, + @{const_name Let}, @{const_name If}, @{const_name HOL.eq}] + + val schematic_consts_of = + let + fun collect (@{const trigger} $ p $ t) = collect_trigger p #> collect t + | collect (t $ u) = collect t #> collect u + | collect (Abs (_, _, t)) = collect t + | collect (t as Const (n, _)) = + if not (ignored n) then Monomorph.add_schematic_consts_of t else I + | collect _ = I + and collect_trigger t = + let val dest = these o try SMT_Util.dest_symb_list + in fold (fold collect_pat o dest) (dest t) end + and collect_pat (Const (@{const_name pat}, _) $ t) = collect t + | collect_pat (Const (@{const_name nopat}, _) $ t) = collect t + | collect_pat _ = I + in (fn t => collect t Symtab.empty) end +in + +fun monomorph ctxt xthms = + let val (xs, thms) = split_list xthms + in + map (pair 1) thms + |> Monomorph.monomorph schematic_consts_of ctxt + |> maps (uncurry (map o pair)) o map2 pair xs o map (map snd) + end + +end + +fun normalize ctxt wthms = + wthms + |> map_index I + |> gen_normalize ctxt + |> unfold_polymorph ctxt + |> monomorph ctxt + |> unfold_monomorph ctxt + |> apply_extra_norms ctxt + +val _ = Theory.setup (Context.theory_map ( + setup_atomize #> + setup_unfolded_quants #> + setup_trigger #> + setup_case_bool #> + setup_abs_min_max)) + +end; diff --git a/src/main/SMT/smt_real.ML b/src/main/SMT/smt_real.ML new file mode 100644 index 0000000..8e08d4c --- /dev/null +++ b/src/main/SMT/smt_real.ML @@ -0,0 +1,115 @@ +(* Title: HOL/Tools/SMT/smt_real.ML + Author: Sascha Boehme, TU Muenchen + +SMT setup for reals. +*) + +structure SMT_Real: sig end = +struct + + +(* SMT-LIB logic *) + +fun smtlib_logic ts = + if exists (Term.exists_type (Term.exists_subtype (equal @{typ real}))) ts + then SOME "AUFLIRA" + else NONE + + +(* SMT-LIB and Z3 built-ins *) + +local + fun real_num _ i = SOME (string_of_int i ^ ".0") + + fun is_linear [t] = SMT_Util.is_number t + | is_linear [t, u] = SMT_Util.is_number t orelse SMT_Util.is_number u + | is_linear _ = false + + fun mk_times ts = Term.list_comb (@{const times (real)}, ts) + + fun times _ _ ts = if is_linear ts then SOME ("*", 2, ts, mk_times) else NONE +in + +val setup_builtins = + SMT_Builtin.add_builtin_typ SMTLIB_Interface.smtlibC + (@{typ real}, K (SOME "Real"), real_num) #> + fold (SMT_Builtin.add_builtin_fun' SMTLIB_Interface.smtlibC) [ + (@{const less (real)}, "<"), + (@{const less_eq (real)}, "<="), + (@{const uminus (real)}, "-"), + (@{const plus (real)}, "+"), + (@{const minus (real)}, "-") ] #> + SMT_Builtin.add_builtin_fun SMTLIB_Interface.smtlibC + (Term.dest_Const @{const times (real)}, times) #> + SMT_Builtin.add_builtin_fun' Z3_Interface.smtlib_z3C + (@{const times (real)}, "*") #> + SMT_Builtin.add_builtin_fun' Z3_Interface.smtlib_z3C + (@{const divide (real)}, "/") + +end + + +(* Z3 constructors *) + +local + fun z3_mk_builtin_typ (Z3_Interface.Sym ("Real", _)) = SOME @{typ real} + | z3_mk_builtin_typ (Z3_Interface.Sym ("real", _)) = SOME @{typ real} + (*FIXME: delete*) + | z3_mk_builtin_typ _ = NONE + + fun z3_mk_builtin_num _ i T = + if T = @{typ real} then SOME (Numeral.mk_cnumber @{ctyp real} i) + else NONE + + fun mk_nary _ cu [] = cu + | mk_nary ct _ cts = uncurry (fold_rev (Thm.mk_binop ct)) (split_last cts) + + val mk_uminus = Thm.apply (Thm.cterm_of @{context} @{const uminus (real)}) + val add = Thm.cterm_of @{context} @{const plus (real)} + val real0 = Numeral.mk_cnumber @{ctyp real} 0 + val mk_sub = Thm.mk_binop (Thm.cterm_of @{context} @{const minus (real)}) + val mk_mul = Thm.mk_binop (Thm.cterm_of @{context} @{const times (real)}) + val mk_div = Thm.mk_binop (Thm.cterm_of @{context} @{const divide (real)}) + val mk_lt = Thm.mk_binop (Thm.cterm_of @{context} @{const less (real)}) + val mk_le = Thm.mk_binop (Thm.cterm_of @{context} @{const less_eq (real)}) + + fun z3_mk_builtin_fun (Z3_Interface.Sym ("-", _)) [ct] = SOME (mk_uminus ct) + | z3_mk_builtin_fun (Z3_Interface.Sym ("+", _)) cts = SOME (mk_nary add real0 cts) + | z3_mk_builtin_fun (Z3_Interface.Sym ("-", _)) [ct, cu] = SOME (mk_sub ct cu) + | z3_mk_builtin_fun (Z3_Interface.Sym ("*", _)) [ct, cu] = SOME (mk_mul ct cu) + | z3_mk_builtin_fun (Z3_Interface.Sym ("/", _)) [ct, cu] = SOME (mk_div ct cu) + | z3_mk_builtin_fun (Z3_Interface.Sym ("<", _)) [ct, cu] = SOME (mk_lt ct cu) + | z3_mk_builtin_fun (Z3_Interface.Sym ("<=", _)) [ct, cu] = SOME (mk_le ct cu) + | z3_mk_builtin_fun (Z3_Interface.Sym (">", _)) [ct, cu] = SOME (mk_lt cu ct) + | z3_mk_builtin_fun (Z3_Interface.Sym (">=", _)) [ct, cu] = SOME (mk_le cu ct) + | z3_mk_builtin_fun _ _ = NONE +in + +val z3_mk_builtins = { + mk_builtin_typ = z3_mk_builtin_typ, + mk_builtin_num = z3_mk_builtin_num, + mk_builtin_fun = (fn _ => fn sym => fn cts => + (case try (Thm.typ_of_cterm o hd) cts of + SOME @{typ real} => z3_mk_builtin_fun sym cts + | _ => NONE)) } + +end + + +(* Z3 proof replay *) + +val real_linarith_proc = + Simplifier.make_simproc @{context} "fast_real_arith" + {lhss = [@{term "(m::real) < n"}, @{term "(m::real) \ n"}, @{term "(m::real) = n"}], + proc = K Lin_Arith.simproc} + + +(* setup *) + +val _ = Theory.setup (Context.theory_map ( + SMTLIB_Interface.add_logic (10, smtlib_logic) #> + setup_builtins #> + Z3_Interface.add_mk_builtins z3_mk_builtins #> + Z3_Replay_Util.add_simproc real_linarith_proc)) + +end; diff --git a/src/main/SMT/smt_solver.ML b/src/main/SMT/smt_solver.ML new file mode 100644 index 0000000..7ff85dd --- /dev/null +++ b/src/main/SMT/smt_solver.ML @@ -0,0 +1,307 @@ +(* Title: HOL/Tools/SMT/smt_solver.ML + Author: Sascha Boehme, TU Muenchen + +SMT solvers registry and SMT tactic. +*) + +signature SMT_SOLVER = +sig + (*configuration*) + datatype outcome = Unsat | Sat | Unknown + + type parsed_proof = + {outcome: SMT_Failure.failure option, + fact_ids: (int * ((string * ATP_Problem_Generate.stature) * thm)) list option, + atp_proof: unit -> (term, string) ATP_Proof.atp_step list} + + type solver_config = + {name: string, + class: Proof.context -> SMT_Util.class, + avail: unit -> bool, + command: unit -> string list, + options: Proof.context -> string list, + smt_options: (string * string) list, + default_max_relevant: int, + outcome: string -> string list -> outcome * string list, + parse_proof: (Proof.context -> SMT_Translate.replay_data -> + ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> + parsed_proof) option, + replay: (Proof.context -> SMT_Translate.replay_data -> string list -> thm) option} + + (*registry*) + val add_solver: solver_config -> theory -> theory + val default_max_relevant: Proof.context -> string -> int + + (*filter*) + val smt_filter: Proof.context -> thm -> ((string * ATP_Problem_Generate.stature) * thm) list -> + int -> Time.time -> parsed_proof + + (*tactic*) + val smt_tac: Proof.context -> thm list -> int -> tactic + val smt_tac': Proof.context -> thm list -> int -> tactic +end; + +structure SMT_Solver: SMT_SOLVER = +struct + +(* interface to external solvers *) + +local + +fun make_command command options problem_path proof_path = + "(exec 2>&1;" :: map Bash.string (command () @ options) @ + [File.bash_path problem_path, ")", ">", File.bash_path proof_path] + |> space_implode " " + +fun with_trace ctxt msg f x = + let val _ = SMT_Config.trace_msg ctxt (fn () => msg) () + in f x end + +fun run ctxt name mk_cmd input = + (case SMT_Config.certificates_of ctxt of + NONE => + if not (SMT_Config.is_available ctxt name) then + error ("The SMT solver " ^ quote name ^ " is not installed") + else if Config.get ctxt SMT_Config.debug_files = "" then + with_trace ctxt ("Invoking SMT solver " ^ quote name ^ " ...") (Cache_IO.run mk_cmd) input + else + let + val base_path = Path.explode (Config.get ctxt SMT_Config.debug_files) + val in_path = Path.ext "smt_in" base_path + val out_path = Path.ext "smt_out" base_path + in Cache_IO.raw_run mk_cmd input in_path out_path end + | SOME certs => + (case Cache_IO.lookup certs input of + (NONE, key) => + if Config.get ctxt SMT_Config.read_only_certificates then + error ("Bad certificate cache: missing certificate") + else + Cache_IO.run_and_cache certs key mk_cmd input + | (SOME output, _) => + with_trace ctxt ("Using cached certificate from " ^ + Path.print (Cache_IO.cache_path_of certs) ^ " ...") I output)) + +(* Z3 returns 1 if "get-proof" or "get-model" fails. veriT returns 255. *) +val normal_return_codes = [0, 1, 255] + +fun run_solver ctxt name mk_cmd input = + let + fun pretty tag lines = Pretty.string_of (Pretty.big_list tag (map Pretty.str lines)) + + val _ = SMT_Config.trace_msg ctxt (pretty "Problem:" o split_lines) input + + val {redirected_output = res, output = err, return_code} = + SMT_Config.with_timeout ctxt (run ctxt name mk_cmd) input + val _ = SMT_Config.trace_msg ctxt (pretty "Solver:") err + + val output = fst (take_suffix (equal "") res) + val _ = SMT_Config.trace_msg ctxt (pretty "Result:") output + + val _ = member (op =) normal_return_codes return_code orelse + raise SMT_Failure.SMT (SMT_Failure.Abnormal_Termination return_code) + in output end + +fun trace_assms ctxt = + SMT_Config.trace_msg ctxt (Pretty.string_of o + Pretty.big_list "Assertions:" o map (Thm.pretty_thm ctxt o snd)) + +fun trace_replay_data ({context = ctxt, typs, terms, ...} : SMT_Translate.replay_data) = + let + fun pretty_eq n p = Pretty.block [Pretty.str n, Pretty.str " = ", p] + fun p_typ (n, T) = pretty_eq n (Syntax.pretty_typ ctxt T) + fun p_term (n, t) = pretty_eq n (Syntax.pretty_term ctxt t) + in + SMT_Config.trace_msg ctxt (fn () => + Pretty.string_of (Pretty.big_list "Names:" [ + Pretty.big_list "sorts:" (map p_typ (Symtab.dest typs)), + Pretty.big_list "functions:" (map p_term (Symtab.dest terms))])) () + end + +in + +fun invoke name command smt_options ithms ctxt = + let + val options = SMT_Config.solver_options_of ctxt + val comments = [space_implode " " options] + + val (str, replay_data as {context = ctxt', ...}) = + ithms + |> tap (trace_assms ctxt) + |> SMT_Translate.translate ctxt smt_options comments + ||> tap trace_replay_data + in (run_solver ctxt' name (make_command command options) str, replay_data) end + +end + + +(* configuration *) + +datatype outcome = Unsat | Sat | Unknown + +type parsed_proof = + {outcome: SMT_Failure.failure option, + fact_ids: (int * ((string * ATP_Problem_Generate.stature) * thm)) list option, + atp_proof: unit -> (term, string) ATP_Proof.atp_step list} + +type solver_config = + {name: string, + class: Proof.context -> SMT_Util.class, + avail: unit -> bool, + command: unit -> string list, + options: Proof.context -> string list, + smt_options: (string * string) list, + default_max_relevant: int, + outcome: string -> string list -> outcome * string list, + parse_proof: (Proof.context -> SMT_Translate.replay_data -> + ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> + parsed_proof) option, + replay: (Proof.context -> SMT_Translate.replay_data -> string list -> thm) option} + + +(* check well-sortedness *) + +val has_topsort = Term.exists_type (Term.exists_subtype (fn + TFree (_, []) => true + | TVar (_, []) => true + | _ => false)) + +(* top sorts cause problems with atomization *) +fun check_topsort ctxt thm = + if has_topsort (Thm.prop_of thm) then (SMT_Normalize.drop_fact_warning ctxt thm; TrueI) else thm + + +(* registry *) + +type solver_info = { + command: unit -> string list, + smt_options: (string * string) list, + default_max_relevant: int, + parse_proof: Proof.context -> SMT_Translate.replay_data -> + ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> + parsed_proof, + replay: Proof.context -> SMT_Translate.replay_data -> string list -> thm} + +structure Solvers = Generic_Data +( + type T = solver_info Symtab.table + val empty = Symtab.empty + val extend = I + fun merge data = Symtab.merge (K true) data +) + +local + fun parse_proof outcome parse_proof0 outer_ctxt replay_data xfacts prems concl output = + (case outcome output of + (Unsat, lines) => + (case parse_proof0 of + SOME pp => pp outer_ctxt replay_data xfacts prems concl lines + | NONE => {outcome = NONE, fact_ids = NONE, atp_proof = K []}) + | (result, _) => raise SMT_Failure.SMT (SMT_Failure.Counterexample (result = Sat))) + + fun replay outcome replay0 oracle outer_ctxt + (replay_data as {context = ctxt, ...} : SMT_Translate.replay_data) output = + (case outcome output of + (Unsat, lines) => + if not (Config.get ctxt SMT_Config.oracle) andalso is_some replay0 + then the replay0 outer_ctxt replay_data lines + else oracle () + | (result, _) => raise SMT_Failure.SMT (SMT_Failure.Counterexample (result = Sat))) + + val cfalse = Thm.cterm_of @{context} @{prop False} +in + +fun add_solver ({name, class, avail, command, options, smt_options, default_max_relevant, outcome, + parse_proof = parse_proof0, replay = replay0} : solver_config) = + let + fun solver oracle = { + command = command, + smt_options = smt_options, + default_max_relevant = default_max_relevant, + parse_proof = parse_proof (outcome name) parse_proof0, + replay = replay (outcome name) replay0 oracle} + + val info = {name = name, class = class, avail = avail, options = options} + in + Thm.add_oracle (Binding.name name, K cfalse) #-> (fn (_, oracle) => + Context.theory_map (Solvers.map (Symtab.update_new (name, solver oracle)))) #> + Context.theory_map (SMT_Config.add_solver info) + end + +end + +fun get_info ctxt name = the (Symtab.lookup (Solvers.get (Context.Proof ctxt)) name) + +fun name_and_info_of ctxt = + let val name = SMT_Config.solver_of ctxt + in (name, get_info ctxt name) end + +val default_max_relevant = #default_max_relevant oo get_info + +fun apply_solver_and_replay ctxt thms0 = + let + val thms = map (check_topsort ctxt) thms0 + val (name, {command, smt_options, replay, ...}) = name_and_info_of ctxt + val (output, replay_data) = + invoke name command smt_options (SMT_Normalize.normalize ctxt thms) ctxt + in replay ctxt replay_data output end + + +(* filter *) + +fun smt_filter ctxt0 goal xfacts i time_limit = + let + val ctxt = ctxt0 |> Config.put SMT_Config.timeout (Time.toReal time_limit) + + val ({context = ctxt, prems, concl, ...}, _) = Subgoal.focus ctxt i NONE goal + fun negate ct = Thm.dest_comb ct ||> Thm.apply @{cterm Not} |-> Thm.apply + val cprop = + (case try negate (Thm.rhs_of (SMT_Normalize.atomize_conv ctxt concl)) of + SOME ct => ct + | NONE => raise SMT_Failure.SMT (SMT_Failure.Other_Failure "cannot atomize goal")) + + val conjecture = Thm.assume cprop + val facts = map snd xfacts + val thms = conjecture :: prems @ facts + val thms' = map (check_topsort ctxt) thms + + val (name, {command, smt_options, parse_proof, ...}) = name_and_info_of ctxt + val (output, replay_data) = + invoke name command smt_options (SMT_Normalize.normalize ctxt thms') ctxt + in + parse_proof ctxt replay_data xfacts (map Thm.prop_of prems) (Thm.term_of concl) output + end + handle SMT_Failure.SMT fail => {outcome = SOME fail, fact_ids = NONE, atp_proof = K []} + + +(* SMT tactic *) + +local + fun str_of ctxt fail = + "Solver " ^ SMT_Config.solver_of ctxt ^ ": " ^ SMT_Failure.string_of_failure fail + + fun safe_solve ctxt facts = SOME (apply_solver_and_replay ctxt facts) + handle + SMT_Failure.SMT (fail as SMT_Failure.Counterexample _) => + (SMT_Config.verbose_msg ctxt (str_of ctxt) fail; NONE) + | SMT_Failure.SMT (fail as SMT_Failure.Time_Out) => + error ("SMT: Solver " ^ quote (SMT_Config.solver_of ctxt) ^ ": " ^ + SMT_Failure.string_of_failure fail ^ " (setting the " ^ + "configuration option " ^ quote (Config.name_of SMT_Config.timeout) ^ " might help)") + | SMT_Failure.SMT fail => error (str_of ctxt fail) + + fun resolve ctxt (SOME thm) = resolve_tac ctxt [thm] 1 + | resolve _ NONE = no_tac + + fun tac prove ctxt rules = + CONVERSION (SMT_Normalize.atomize_conv ctxt) + THEN' resolve_tac ctxt @{thms ccontr} + THEN' SUBPROOF (fn {context = ctxt', prems, ...} => + resolve ctxt' (prove ctxt' (rules @ prems))) ctxt +in + +val smt_tac = tac safe_solve +val smt_tac' = tac (SOME oo apply_solver_and_replay) + +end + +end; diff --git a/src/main/SMT/smt_systems.ML b/src/main/SMT/smt_systems.ML new file mode 100644 index 0000000..b7581cb --- /dev/null +++ b/src/main/SMT/smt_systems.ML @@ -0,0 +1,154 @@ +(* Title: HOL/Tools/SMT/smt_systems.ML + Author: Sascha Boehme, TU Muenchen + +Setup SMT solvers. +*) + +signature SMT_SYSTEMS = +sig + val cvc4_extensions: bool Config.T + val z3_extensions: bool Config.T +end; + +structure SMT_Systems: SMT_SYSTEMS = +struct + +(* helper functions *) + +fun make_avail name () = getenv (name ^ "_SOLVER") <> "" + +fun make_command name () = [getenv (name ^ "_SOLVER")] + +fun outcome_of unsat sat unknown solver_name line = + if String.isPrefix unsat line then SMT_Solver.Unsat + else if String.isPrefix sat line then SMT_Solver.Sat + else if String.isPrefix unknown line then SMT_Solver.Unknown + else raise SMT_Failure.SMT (SMT_Failure.Other_Failure ("Solver " ^ quote solver_name ^ + " failed -- enable tracing using the " ^ quote (Config.name_of SMT_Config.trace) ^ + " option for details")) + +fun is_blank_or_error_line "" = true + | is_blank_or_error_line s = String.isPrefix "(error " s + +fun on_first_line test_outcome solver_name lines = + let + val split_first = (fn [] => ("", []) | l :: ls => (l, ls)) + val (l, ls) = split_first (snd (take_prefix is_blank_or_error_line lines)) + in (test_outcome solver_name l, ls) end + +fun on_first_non_unsupported_line test_outcome solver_name lines = + on_first_line test_outcome solver_name (filter (curry (op <>) "unsupported") lines) + +(* CVC3 *) + +local + fun cvc3_options ctxt = [ + "-seed", string_of_int (Config.get ctxt SMT_Config.random_seed), + "-lang", "smt2", + "-timeout", string_of_int (Real.ceil (Config.get ctxt SMT_Config.timeout))] +in + +val cvc3: SMT_Solver.solver_config = { + name = "cvc3", + class = K SMTLIB_Interface.smtlibC, + avail = make_avail "CVC3", + command = make_command "CVC3", + options = cvc3_options, + smt_options = [], + default_max_relevant = 400 (* FUDGE *), + outcome = on_first_line (outcome_of "unsat" "sat" "unknown"), + parse_proof = NONE, + replay = NONE } + +end + +(* CVC4 *) + +val cvc4_extensions = Attrib.setup_config_bool @{binding cvc4_extensions} (K false) + +local + fun cvc4_options ctxt = [ + "--no-statistics", + "--random-seed=" ^ string_of_int (Config.get ctxt SMT_Config.random_seed), + "--lang=smt2", + "--continued-execution", + "--tlimit", string_of_int (Real.ceil (1000.0 * Config.get ctxt SMT_Config.timeout))] + + fun select_class ctxt = + if Config.get ctxt cvc4_extensions then CVC4_Interface.smtlib_cvc4C + else SMTLIB_Interface.smtlibC +in + +val cvc4: SMT_Solver.solver_config = { + name = "cvc4", + class = select_class, + avail = make_avail "CVC4", + command = make_command "CVC4", + options = cvc4_options, + smt_options = [(":produce-unsat-cores", "true")], + default_max_relevant = 400 (* FUDGE *), + outcome = on_first_line (outcome_of "unsat" "sat" "unknown"), + parse_proof = SOME (K CVC4_Proof_Parse.parse_proof), + replay = NONE } + +end + +(* veriT *) + +val veriT: SMT_Solver.solver_config = { + name = "verit", + class = K SMTLIB_Interface.smtlibC, + avail = make_avail "VERIT", + command = make_command "VERIT", + options = (fn ctxt => [ + "--proof-version=1", + "--proof-prune", + "--proof-merge", + "--disable-print-success", + "--disable-banner", + "--max-time=" ^ string_of_int (Real.ceil (Config.get ctxt SMT_Config.timeout))]), + smt_options = [(":produce-proofs", "true")], + default_max_relevant = 200 (* FUDGE *), + outcome = on_first_non_unsupported_line (outcome_of "unsat" "sat" "unknown"), + parse_proof = SOME (K VeriT_Proof_Parse.parse_proof), + replay = NONE } + +(* Z3 *) + +val z3_extensions = Attrib.setup_config_bool @{binding z3_extensions} (K false) + +local + fun z3_options ctxt = + ["smt.random_seed=" ^ string_of_int (Config.get ctxt SMT_Config.random_seed), + "smt.refine_inj_axioms=false", + "-T:" ^ string_of_int (Real.ceil (Config.get ctxt SMT_Config.timeout)), + "-smt2"] + + fun select_class ctxt = + if Config.get ctxt z3_extensions then Z3_Interface.smtlib_z3C else SMTLIB_Interface.smtlibC +in + +val z3: SMT_Solver.solver_config = { + name = "z3", + class = select_class, + avail = make_avail "Z3", + command = make_command "Z3", + options = z3_options, + smt_options = [(":produce-proofs", "true")], + default_max_relevant = 350 (* FUDGE *), + outcome = on_first_line (outcome_of "unsat" "sat" "unknown"), + parse_proof = SOME Z3_Replay.parse_proof, + replay = SOME Z3_Replay.replay } + +end + + +(* overall setup *) + +val _ = Theory.setup ( + SMT_Solver.add_solver cvc3 #> + SMT_Solver.add_solver cvc4 #> + SMT_Solver.add_solver veriT #> + SMT_Solver.add_solver z3) + +end; diff --git a/src/main/SMT/smt_translate.ML b/src/main/SMT/smt_translate.ML new file mode 100644 index 0000000..9e9bb6a --- /dev/null +++ b/src/main/SMT/smt_translate.ML @@ -0,0 +1,527 @@ +(* Title: HOL/Tools/SMT/smt_translate.ML + Author: Sascha Boehme, TU Muenchen + +Translate theorems into an SMT intermediate format and serialize them. +*) + +signature SMT_TRANSLATE = +sig + (*intermediate term structure*) + datatype squant = SForall | SExists + datatype 'a spattern = SPat of 'a list | SNoPat of 'a list + datatype sterm = + SVar of int | + SApp of string * sterm list | + SLet of string * sterm * sterm | + SQua of squant * string list * sterm spattern list * sterm + + (*translation configuration*) + type sign = { + logic: string, + sorts: string list, + dtyps: (BNF_Util.fp_kind * (string * (string * (string * string) list) list)) list, + funcs: (string * (string list * string)) list } + type config = { + logic: term list -> string, + fp_kinds: BNF_Util.fp_kind list, + serialize: (string * string) list -> string list -> sign -> sterm list -> string } + type replay_data = { + context: Proof.context, + typs: typ Symtab.table, + terms: term Symtab.table, + ll_defs: term list, + rewrite_rules: thm list, + assms: (int * thm) list } + + (*translation*) + val add_config: SMT_Util.class * (Proof.context -> config) -> Context.generic -> Context.generic + val translate: Proof.context -> (string * string) list -> string list -> (int * thm) list -> + string * replay_data +end; + +structure SMT_Translate: SMT_TRANSLATE = +struct + + +(* intermediate term structure *) + +datatype squant = SForall | SExists + +datatype 'a spattern = SPat of 'a list | SNoPat of 'a list + +datatype sterm = + SVar of int | + SApp of string * sterm list | + SLet of string * sterm * sterm | + SQua of squant * string list * sterm spattern list * sterm + + +(* translation configuration *) + +type sign = { + logic: string, + sorts: string list, + dtyps: (BNF_Util.fp_kind * (string * (string * (string * string) list) list)) list, + funcs: (string * (string list * string)) list } + +type config = { + logic: term list -> string, + fp_kinds: BNF_Util.fp_kind list, + serialize: (string * string) list -> string list -> sign -> sterm list -> string } + +type replay_data = { + context: Proof.context, + typs: typ Symtab.table, + terms: term Symtab.table, + ll_defs: term list, + rewrite_rules: thm list, + assms: (int * thm) list } + + +(* translation context *) + +fun add_components_of_typ (Type (s, Ts)) = + cons (Long_Name.base_name s) #> fold_rev add_components_of_typ Ts + | add_components_of_typ (TFree (s, _)) = cons (perhaps (try (unprefix "'")) s) + | add_components_of_typ _ = I; + +fun suggested_name_of_typ T = space_implode "_" (add_components_of_typ T []); + +fun suggested_name_of_term (Const (s, _)) = Long_Name.base_name s + | suggested_name_of_term (Free (s, _)) = s + | suggested_name_of_term _ = Name.uu + +val empty_tr_context = (Name.context, Typtab.empty, Termtab.empty) +val safe_suffix = "$" + +fun add_typ T proper (cx as (names, typs, terms)) = + (case Typtab.lookup typs T of + SOME (name, _) => (name, cx) + | NONE => + let + val sugg = Name.desymbolize (SOME true) (suggested_name_of_typ T) ^ safe_suffix + val (name, names') = Name.variant sugg names + val typs' = Typtab.update (T, (name, proper)) typs + in (name, (names', typs', terms)) end) + +fun add_fun t sort (cx as (names, typs, terms)) = + (case Termtab.lookup terms t of + SOME (name, _) => (name, cx) + | NONE => + let + val sugg = Name.desymbolize (SOME false) (suggested_name_of_term t) ^ safe_suffix + val (name, names') = Name.variant sugg names + val terms' = Termtab.update (t, (name, sort)) terms + in (name, (names', typs, terms')) end) + +fun sign_of logic dtyps (_, typs, terms) = { + logic = logic, + sorts = Typtab.fold (fn (_, (n, true)) => cons n | _ => I) typs [], + dtyps = dtyps, + funcs = Termtab.fold (fn (_, (n, SOME ss)) => cons (n,ss) | _ => I) terms []} + +fun replay_data_of ctxt ll_defs rules assms (_, typs, terms) = + let + fun add_typ (T, (n, _)) = Symtab.update (n, T) + val typs' = Typtab.fold add_typ typs Symtab.empty + + fun add_fun (t, (n, _)) = Symtab.update (n, t) + val terms' = Termtab.fold add_fun terms Symtab.empty + in + {context = ctxt, typs = typs', terms = terms', ll_defs = ll_defs, rewrite_rules = rules, + assms = assms} + end + + +(* preprocessing *) + +(** (co)datatype declarations **) + +fun collect_co_datatypes fp_kinds (tr_context, ctxt) ts = + let + val (fp_decls, ctxt') = + ([], ctxt) + |> fold (Term.fold_types (SMT_Datatypes.add_decls fp_kinds)) ts + |>> flat + + fun is_decl_typ T = exists (equal T o fst o snd) fp_decls + + fun add_typ' T proper = + (case SMT_Builtin.dest_builtin_typ ctxt' T of + SOME n => pair n + | NONE => add_typ T proper) + + fun tr_select sel = + let val T = Term.range_type (Term.fastype_of sel) + in add_fun sel NONE ##>> add_typ' T (not (is_decl_typ T)) end + fun tr_constr (constr, selects) = + add_fun constr NONE ##>> fold_map tr_select selects + fun tr_typ (fp, (T, cases)) = + add_typ' T false ##>> fold_map tr_constr cases #>> pair fp + + val (fp_decls', tr_context') = fold_map tr_typ fp_decls tr_context + + fun add (constr, selects) = + Termtab.update (constr, length selects) #> + fold (Termtab.update o rpair 1) selects + + val funcs = fold (fold add o snd o snd) fp_decls Termtab.empty + + in ((funcs, fp_decls', tr_context', ctxt'), ts) end + (* FIXME: also return necessary (co)datatype theorems *) + + +(** eta-expand quantifiers, let expressions and built-ins *) + +local + fun eta f T t = Abs (Name.uu, T, f (Term.incr_boundvars 1 t $ Bound 0)) + + fun exp f T = eta f (Term.domain_type (Term.domain_type T)) + + fun exp2 T q = + let val U = Term.domain_type T + in Abs (Name.uu, U, q $ eta I (Term.domain_type U) (Bound 0)) end + + fun expf k i T t = + let val Ts = drop i (fst (SMT_Util.dest_funT k T)) + in + Term.incr_boundvars (length Ts) t + |> fold_rev (fn i => fn u => u $ Bound i) (0 upto length Ts - 1) + |> fold_rev (fn T => fn u => Abs (Name.uu, T, u)) Ts + end +in + +fun eta_expand ctxt funcs = + let + fun exp_func t T ts = + (case Termtab.lookup funcs t of + SOME k => Term.list_comb (t, ts) |> k <> length ts ? expf k (length ts) T + | NONE => Term.list_comb (t, ts)) + + fun expand ((q as Const (@{const_name All}, _)) $ Abs a) = q $ abs_expand a + | expand ((q as Const (@{const_name All}, T)) $ t) = q $ exp expand T t + | expand (q as Const (@{const_name All}, T)) = exp2 T q + | expand ((q as Const (@{const_name Ex}, _)) $ Abs a) = q $ abs_expand a + | expand ((q as Const (@{const_name Ex}, T)) $ t) = q $ exp expand T t + | expand (q as Const (@{const_name Ex}, T)) = exp2 T q + | expand (Const (@{const_name Let}, _) $ t $ u) = expand (Term.betapply (u, t)) + | expand (Const (@{const_name Let}, T) $ t) = + let val U = Term.domain_type (Term.range_type T) + in Abs (Name.uu, U, Bound 0 $ Term.incr_boundvars 1 t) end + | expand (Const (@{const_name Let}, T)) = + let val U = Term.domain_type (Term.range_type T) + in Abs (Name.uu, Term.domain_type T, Abs (Name.uu, U, Bound 0 $ Bound 1)) end + | expand t = + (case Term.strip_comb t of + (u as Const (c as (_, T)), ts) => + (case SMT_Builtin.dest_builtin ctxt c ts of + SOME (_, k, us, mk) => + if k = length us then mk (map expand us) + else if k < length us then chop k (map expand us) |>> mk |> Term.list_comb + else expf k (length ts) T (mk (map expand us)) + | NONE => exp_func u T (map expand ts)) + | (u as Free (_, T), ts) => exp_func u T (map expand ts) + | (Abs a, ts) => Term.list_comb (abs_expand a, map expand ts) + | (u, ts) => Term.list_comb (u, map expand ts)) + + and abs_expand (n, T, t) = Abs (n, T, expand t) + + in map expand end + +end + + +(** introduce explicit applications **) + +local + (* + Make application explicit for functions with varying number of arguments. + *) + + fun add t i = apfst (Termtab.map_default (t, i) (Integer.min i)) + fun add_type T = apsnd (Typtab.update (T, ())) + + fun min_arities t = + (case Term.strip_comb t of + (u as Const _, ts) => add u (length ts) #> fold min_arities ts + | (u as Free _, ts) => add u (length ts) #> fold min_arities ts + | (Abs (_, T, u), ts) => (can dest_funT T ? add_type T) #> min_arities u #> fold min_arities ts + | (_, ts) => fold min_arities ts) + + fun minimize types t i = + let + fun find_min j [] _ = j + | find_min j (U :: Us) T = + if Typtab.defined types T then j else find_min (j + 1) Us (U --> T) + + val (Ts, T) = Term.strip_type (Term.type_of t) + in find_min 0 (take i (rev Ts)) T end + + fun app u (t, T) = (Const (@{const_name fun_app}, T --> T) $ t $ u, Term.range_type T) + + fun apply i t T ts = + let + val (ts1, ts2) = chop i ts + val (_, U) = SMT_Util.dest_funT i T + in fst (fold app ts2 (Term.list_comb (t, ts1), U)) end +in + +fun intro_explicit_application ctxt funcs ts = + let + val (arities, types) = fold min_arities ts (Termtab.empty, Typtab.empty) + val arities' = Termtab.map (minimize types) arities (* FIXME: highly suspicious *) + + fun app_func t T ts = + if is_some (Termtab.lookup funcs t) then Term.list_comb (t, ts) + else apply (the (Termtab.lookup arities' t)) t T ts + + fun in_list T f t = SMT_Util.mk_symb_list T (map f (SMT_Util.dest_symb_list t)) + + fun traverse Ts t = + (case Term.strip_comb t of + (q as Const (@{const_name All}, _), [Abs (x, T, u)]) => + q $ Abs (x, T, in_trigger (T :: Ts) u) + | (q as Const (@{const_name Ex}, _), [Abs (x, T, u)]) => + q $ Abs (x, T, in_trigger (T :: Ts) u) + | (q as Const (@{const_name Let}, _), [u1, u2 as Abs _]) => + q $ traverse Ts u1 $ traverse Ts u2 + | (u as Const (c as (_, T)), ts) => + (case SMT_Builtin.dest_builtin ctxt c ts of + SOME (_, k, us, mk) => + let + val (ts1, ts2) = chop k (map (traverse Ts) us) + val U = Term.strip_type T |>> snd o chop k |> (op --->) + in apply 0 (mk ts1) U ts2 end + | NONE => app_func u T (map (traverse Ts) ts)) + | (u as Free (_, T), ts) => app_func u T (map (traverse Ts) ts) + | (u as Bound i, ts) => apply 0 u (nth Ts i) (map (traverse Ts) ts) + | (Abs (n, T, u), ts) => traverses Ts (Abs (n, T, traverse (T::Ts) u)) ts + | (u, ts) => traverses Ts u ts) + and in_trigger Ts ((c as @{const trigger}) $ p $ t) = c $ in_pats Ts p $ traverse Ts t + | in_trigger Ts t = traverse Ts t + and in_pats Ts ps = + in_list @{typ "pattern symb_list"} (in_list @{typ pattern} (in_pat Ts)) ps + and in_pat Ts ((p as Const (@{const_name pat}, _)) $ t) = p $ traverse Ts t + | in_pat Ts ((p as Const (@{const_name nopat}, _)) $ t) = p $ traverse Ts t + | in_pat _ t = raise TERM ("bad pattern", [t]) + and traverses Ts t ts = Term.list_comb (t, map (traverse Ts) ts) + in map (traverse []) ts end + +val fun_app_eq = mk_meta_eq @{thm fun_app_def} + +end + + +(** map HOL formulas to FOL formulas (i.e., separate formulas froms terms) **) + +local + val is_quant = member (op =) [@{const_name All}, @{const_name Ex}] + + val fol_rules = [ + Let_def, + @{lemma "P = True == P" by (rule eq_reflection) simp}] + + exception BAD_PATTERN of unit + + fun is_builtin_conn_or_pred ctxt c ts = + is_some (SMT_Builtin.dest_builtin_conn ctxt c ts) orelse + is_some (SMT_Builtin.dest_builtin_pred ctxt c ts) +in + +fun folify ctxt = + let + fun in_list T f t = SMT_Util.mk_symb_list T (map_filter f (SMT_Util.dest_symb_list t)) + + fun in_term pat t = + (case Term.strip_comb t of + (@{const True}, []) => t + | (@{const False}, []) => t + | (u as Const (@{const_name If}, _), [t1, t2, t3]) => + if pat then raise BAD_PATTERN () else u $ in_form t1 $ in_term pat t2 $ in_term pat t3 + | (Const (c as (n, _)), ts) => + if is_builtin_conn_or_pred ctxt c ts orelse is_quant n then + if pat then raise BAD_PATTERN () else in_form t + else + Term.list_comb (Const c, map (in_term pat) ts) + | (Free c, ts) => Term.list_comb (Free c, map (in_term pat) ts) + | _ => t) + + and in_pat ((p as Const (@{const_name pat}, _)) $ t) = + p $ in_term true t + | in_pat ((p as Const (@{const_name nopat}, _)) $ t) = + p $ in_term true t + | in_pat t = raise TERM ("bad pattern", [t]) + + and in_pats ps = + in_list @{typ "pattern symb_list"} (SOME o in_list @{typ pattern} (try in_pat)) ps + + and in_trigger ((c as @{const trigger}) $ p $ t) = c $ in_pats p $ in_form t + | in_trigger t = in_form t + + and in_form t = + (case Term.strip_comb t of + (q as Const (qn, _), [Abs (n, T, u)]) => + if is_quant qn then q $ Abs (n, T, in_trigger u) + else in_term false t + | (Const c, ts) => + (case SMT_Builtin.dest_builtin_conn ctxt c ts of + SOME (_, _, us, mk) => mk (map in_form us) + | NONE => + (case SMT_Builtin.dest_builtin_pred ctxt c ts of + SOME (_, _, us, mk) => mk (map (in_term false) us) + | NONE => in_term false t)) + | _ => in_term false t) + in + map in_form #> + pair (fol_rules, I) + end + +end + + +(* translation into intermediate format *) + +(** utility functions **) + +val quantifier = (fn + @{const_name All} => SOME SForall + | @{const_name Ex} => SOME SExists + | _ => NONE) + +fun group_quant qname Ts (t as Const (q, _) $ Abs (_, T, u)) = + if q = qname then group_quant qname (T :: Ts) u else (Ts, t) + | group_quant _ Ts t = (Ts, t) + +fun dest_pat (Const (@{const_name pat}, _) $ t) = (t, true) + | dest_pat (Const (@{const_name nopat}, _) $ t) = (t, false) + | dest_pat t = raise TERM ("bad pattern", [t]) + +fun dest_pats [] = I + | dest_pats ts = + (case map dest_pat ts |> split_list ||> distinct (op =) of + (ps, [true]) => cons (SPat ps) + | (ps, [false]) => cons (SNoPat ps) + | _ => raise TERM ("bad multi-pattern", ts)) + +fun dest_trigger (@{const trigger} $ tl $ t) = + (rev (fold (dest_pats o SMT_Util.dest_symb_list) (SMT_Util.dest_symb_list tl) []), t) + | dest_trigger t = ([], t) + +fun dest_quant qn T t = quantifier qn |> Option.map (fn q => + let + val (Ts, u) = group_quant qn [T] t + val (ps, p) = dest_trigger u + in (q, rev Ts, ps, p) end) + +fun fold_map_pat f (SPat ts) = fold_map f ts #>> SPat + | fold_map_pat f (SNoPat ts) = fold_map f ts #>> SNoPat + + +(** translation from Isabelle terms into SMT intermediate terms **) + +fun intermediate logic dtyps builtin ctxt ts trx = + let + fun transT (T as TFree _) = add_typ T true + | transT (T as TVar _) = (fn _ => raise TYPE ("bad SMT type", [T], [])) + | transT (T as Type _) = + (case SMT_Builtin.dest_builtin_typ ctxt T of + SOME n => pair n + | NONE => add_typ T true) + + fun app n ts = SApp (n, ts) + + fun trans t = + (case Term.strip_comb t of + (Const (qn, _), [Abs (_, T, t1)]) => + (case dest_quant qn T t1 of + SOME (q, Ts, ps, b) => + fold_map transT Ts ##>> fold_map (fold_map_pat trans) ps ##>> + trans b #>> (fn ((Ts', ps'), b') => SQua (q, Ts', ps', b')) + | NONE => raise TERM ("unsupported quantifier", [t])) + | (Const (@{const_name Let}, _), [t1, Abs (_, T, t2)]) => + transT T ##>> trans t1 ##>> trans t2 #>> (fn ((U, u1), u2) => SLet (U, u1, u2)) + | (u as Const (c as (_, T)), ts) => + (case builtin ctxt c ts of + SOME (n, _, us, _) => fold_map trans us #>> app n + | NONE => transs u T ts) + | (u as Free (_, T), ts) => transs u T ts + | (Bound i, []) => pair (SVar i) + | _ => raise TERM ("bad SMT term", [t])) + + and transs t T ts = + let val (Us, U) = SMT_Util.dest_funT (length ts) T + in + fold_map transT Us ##>> transT U #-> (fn Up => + add_fun t (SOME Up) ##>> fold_map trans ts #>> SApp) + end + + val (us, trx') = fold_map trans ts trx + in ((sign_of (logic ts) dtyps trx', us), trx') end + + +(* translation *) + +structure Configs = Generic_Data +( + type T = (Proof.context -> config) SMT_Util.dict + val empty = [] + val extend = I + fun merge data = SMT_Util.dict_merge fst data +) + +fun add_config (cs, cfg) = Configs.map (SMT_Util.dict_update (cs, cfg)) + +fun get_config ctxt = + let val cs = SMT_Config.solver_class_of ctxt + in + (case SMT_Util.dict_get (Configs.get (Context.Proof ctxt)) cs of + SOME cfg => cfg ctxt + | NONE => error ("SMT: no translation configuration found " ^ + "for solver class " ^ quote (SMT_Util.string_of_class cs))) + end + +fun translate ctxt smt_options comments ithms = + let + val {logic, fp_kinds, serialize} = get_config ctxt + + fun no_dtyps (tr_context, ctxt) ts = + ((Termtab.empty, [], tr_context, ctxt), ts) + + val ts1 = map (Envir.beta_eta_contract o SMT_Util.prop_of o snd) ithms + + val ((funcs, dtyps, tr_context, ctxt1), ts2) = + ((empty_tr_context, ctxt), ts1) + |-> (if null fp_kinds then no_dtyps else collect_co_datatypes fp_kinds) + + fun is_binder (Const (@{const_name Let}, _) $ _) = true + | is_binder t = Lambda_Lifting.is_quantifier t + + fun mk_trigger ((q as Const (@{const_name All}, _)) $ Abs (n, T, t)) = + q $ Abs (n, T, mk_trigger t) + | mk_trigger (eq as (Const (@{const_name HOL.eq}, T) $ lhs $ _)) = + Term.domain_type T --> @{typ pattern} + |> (fn T => Const (@{const_name pat}, T) $ lhs) + |> SMT_Util.mk_symb_list @{typ pattern} o single + |> SMT_Util.mk_symb_list @{typ "pattern symb_list"} o single + |> (fn t => @{const trigger} $ t $ eq) + | mk_trigger t = t + + val (ctxt2, (ts3, ll_defs)) = + ts2 + |> eta_expand ctxt1 funcs + |> rpair ctxt1 + |-> Lambda_Lifting.lift_lambdas NONE is_binder + |-> (fn (ts', ll_defs) => fn ctxt' => + (ctxt', (intro_explicit_application ctxt' funcs (map mk_trigger ll_defs @ ts'), ll_defs))) + + val ((rewrite_rules, builtin), ts4) = folify ctxt2 ts3 + |>> apfst (cons fun_app_eq) +val _ = dtyps : (BNF_Util.fp_kind * (string * (string * (string * string) list) list)) list (*###*) + in + (ts4, tr_context) + |-> intermediate logic dtyps (builtin SMT_Builtin.dest_builtin) ctxt2 + |>> uncurry (serialize smt_options comments) + ||> replay_data_of ctxt2 ll_defs rewrite_rules ithms + end + +end; diff --git a/src/main/SMT/smt_util.ML b/src/main/SMT/smt_util.ML new file mode 100644 index 0000000..387c204 --- /dev/null +++ b/src/main/SMT/smt_util.ML @@ -0,0 +1,240 @@ +(* Title: HOL/Tools/SMT/smt_util.ML + Author: Sascha Boehme, TU Muenchen + +General utility functions. +*) + +signature SMT_UTIL = +sig + (*basic combinators*) + val repeat: ('a -> 'a option) -> 'a -> 'a + val repeat_yield: ('a -> 'b -> ('a * 'b) option) -> 'a -> 'b -> 'a * 'b + + (*class dictionaries*) + type class = string list + val basicC: class + val string_of_class: class -> string + type 'a dict = (class * 'a) Ord_List.T + val dict_map_default: class * 'a -> ('a -> 'a) -> 'a dict -> 'a dict + val dict_update: class * 'a -> 'a dict -> 'a dict + val dict_merge: ('a * 'a -> 'a) -> 'a dict * 'a dict -> 'a dict + val dict_lookup: 'a dict -> class -> 'a list + val dict_get: 'a dict -> class -> 'a option + + (*types*) + val dest_funT: int -> typ -> typ list * typ + + (*terms*) + val dest_conj: term -> term * term + val dest_disj: term -> term * term + val under_quant: (term -> 'a) -> term -> 'a + val is_number: term -> bool + + (*symbolic lists*) + val symb_nil_const: typ -> term + val symb_cons_const: typ -> term + val mk_symb_list: typ -> term list -> term + val dest_symb_list: term -> term list + + (*patterns and instantiations*) + val mk_const_pat: theory -> string -> (ctyp -> 'a) -> 'a * cterm + val destT1: ctyp -> ctyp + val destT2: ctyp -> ctyp + val instTs: ctyp list -> ctyp list * cterm -> cterm + val instT: ctyp -> ctyp * cterm -> cterm + val instT': cterm -> ctyp * cterm -> cterm + + (*certified terms*) + val dest_cabs: cterm -> Proof.context -> cterm * Proof.context + val dest_all_cabs: cterm -> Proof.context -> cterm * Proof.context + val dest_cbinder: cterm -> Proof.context -> cterm * Proof.context + val dest_all_cbinders: cterm -> Proof.context -> cterm * Proof.context + val mk_cprop: cterm -> cterm + val dest_cprop: cterm -> cterm + val mk_cequals: cterm -> cterm -> cterm + val term_of: cterm -> term + val prop_of: thm -> term + + (*conversions*) + val if_conv: (term -> bool) -> conv -> conv -> conv + val if_true_conv: (term -> bool) -> conv -> conv + val if_exists_conv: (term -> bool) -> conv -> conv + val binders_conv: (Proof.context -> conv) -> Proof.context -> conv + val under_quant_conv: (Proof.context * cterm list -> conv) -> + Proof.context -> conv + val prop_conv: conv -> conv +end; + +structure SMT_Util: SMT_UTIL = +struct + +(* basic combinators *) + +fun repeat f = + let fun rep x = (case f x of SOME y => rep y | NONE => x) + in rep end + +fun repeat_yield f = + let fun rep x y = (case f x y of SOME (x', y') => rep x' y' | NONE => (x, y)) + in rep end + + +(* class dictionaries *) + +type class = string list + +val basicC = [] + +fun string_of_class [] = "basic" + | string_of_class cs = "basic." ^ space_implode "." cs + +type 'a dict = (class * 'a) Ord_List.T + +fun class_ord ((cs1, _), (cs2, _)) = + rev_order (list_ord fast_string_ord (cs1, cs2)) + +fun dict_insert (cs, x) d = + if AList.defined (op =) d cs then d + else Ord_List.insert class_ord (cs, x) d + +fun dict_map_default (cs, x) f = + dict_insert (cs, x) #> AList.map_entry (op =) cs f + +fun dict_update (e as (_, x)) = dict_map_default e (K x) + +fun dict_merge val_merge = sort class_ord o AList.join (op =) (K val_merge) + +fun dict_lookup d cs = + let fun match (cs', x) = if is_prefix (op =) cs' cs then SOME x else NONE + in map_filter match d end + +fun dict_get d cs = + (case AList.lookup (op =) d cs of + NONE => (case cs of [] => NONE | _ => dict_get d (take (length cs - 1) cs)) + | SOME x => SOME x) + + +(* types *) + +val dest_funT = + let + fun dest Ts 0 T = (rev Ts, T) + | dest Ts i (Type ("fun", [T, U])) = dest (T::Ts) (i-1) U + | dest _ _ T = raise TYPE ("not a function type", [T], []) + in dest [] end + + +(* terms *) + +fun dest_conj (@{const HOL.conj} $ t $ u) = (t, u) + | dest_conj t = raise TERM ("not a conjunction", [t]) + +fun dest_disj (@{const HOL.disj} $ t $ u) = (t, u) + | dest_disj t = raise TERM ("not a disjunction", [t]) + +fun under_quant f t = + (case t of + Const (@{const_name All}, _) $ Abs (_, _, u) => under_quant f u + | Const (@{const_name Ex}, _) $ Abs (_, _, u) => under_quant f u + | _ => f t) + +val is_number = + let + fun is_num env (Const (@{const_name Let}, _) $ t $ Abs (_, _, u)) = is_num (t :: env) u + | is_num env (Bound i) = i < length env andalso is_num env (nth env i) + | is_num _ t = can HOLogic.dest_number t + in is_num [] end + + +(* symbolic lists *) + +fun symb_listT T = Type (@{type_name symb_list}, [T]) + +fun symb_nil_const T = Const (@{const_name Symb_Nil}, symb_listT T) + +fun symb_cons_const T = + let val listT = symb_listT T in Const (@{const_name Symb_Cons}, T --> listT --> listT) end + +fun mk_symb_list T ts = + fold_rev (fn t => fn u => symb_cons_const T $ t $ u) ts (symb_nil_const T) + +fun dest_symb_list (Const (@{const_name Symb_Nil}, _)) = [] + | dest_symb_list (Const (@{const_name Symb_Cons}, _) $ t $ u) = t :: dest_symb_list u + + +(* patterns and instantiations *) + +fun mk_const_pat thy name destT = + let val cpat = Thm.global_cterm_of thy (Const (name, Sign.the_const_type thy name)) + in (destT (Thm.ctyp_of_cterm cpat), cpat) end + +val destT1 = hd o Thm.dest_ctyp +val destT2 = hd o tl o Thm.dest_ctyp + +fun instTs cUs (cTs, ct) = Thm.instantiate_cterm (map (dest_TVar o Thm.typ_of) cTs ~~ cUs, []) ct +fun instT cU (cT, ct) = instTs [cU] ([cT], ct) +fun instT' ct = instT (Thm.ctyp_of_cterm ct) + + +(* certified terms *) + +fun dest_cabs ct ctxt = + (case Thm.term_of ct of + Abs _ => + let val (n, ctxt') = yield_singleton Variable.variant_fixes Name.uu ctxt + in (snd (Thm.dest_abs (SOME n) ct), ctxt') end + | _ => raise CTERM ("no abstraction", [ct])) + +val dest_all_cabs = repeat_yield (try o dest_cabs) + +fun dest_cbinder ct ctxt = + (case Thm.term_of ct of + Const _ $ Abs _ => dest_cabs (Thm.dest_arg ct) ctxt + | _ => raise CTERM ("not a binder", [ct])) + +val dest_all_cbinders = repeat_yield (try o dest_cbinder) + +val mk_cprop = Thm.apply (Thm.cterm_of @{context} @{const Trueprop}) + +fun dest_cprop ct = + (case Thm.term_of ct of + @{const Trueprop} $ _ => Thm.dest_arg ct + | _ => raise CTERM ("not a property", [ct])) + +val equals = mk_const_pat @{theory} @{const_name Pure.eq} destT1 +fun mk_cequals ct cu = Thm.mk_binop (instT' ct equals) ct cu + +val dest_prop = (fn @{const Trueprop} $ t => t | t => t) +fun term_of ct = dest_prop (Thm.term_of ct) +fun prop_of thm = dest_prop (Thm.prop_of thm) + + +(* conversions *) + +fun if_conv pred cv1 cv2 ct = if pred (Thm.term_of ct) then cv1 ct else cv2 ct + +fun if_true_conv pred cv = if_conv pred cv Conv.all_conv + +fun if_exists_conv pred = if_true_conv (Term.exists_subterm pred) + +fun binders_conv cv ctxt = + Conv.binder_conv (binders_conv cv o snd) ctxt else_conv cv ctxt + +fun under_quant_conv cv ctxt = + let + fun quant_conv inside ctxt cvs ct = + (case Thm.term_of ct of + Const (@{const_name All}, _) $ Abs _ => + Conv.binder_conv (under_conv cvs) ctxt + | Const (@{const_name Ex}, _) $ Abs _ => + Conv.binder_conv (under_conv cvs) ctxt + | _ => if inside then cv (ctxt, cvs) else Conv.all_conv) ct + and under_conv cvs (cv, ctxt) = quant_conv true ctxt (cv :: cvs) + in quant_conv false ctxt [] end + +fun prop_conv cv ct = + (case Thm.term_of ct of + @{const Trueprop} $ _ => Conv.arg_conv cv ct + | _ => raise CTERM ("not a property", [ct])) + +end; diff --git a/src/main/SMT/smtlib.ML b/src/main/SMT/smtlib.ML new file mode 100644 index 0000000..e20b0ba --- /dev/null +++ b/src/main/SMT/smtlib.ML @@ -0,0 +1,191 @@ +(* Title: HOL/Tools/SMT/smtlib.ML + Author: Sascha Boehme, TU Muenchen + +Parsing and generating SMT-LIB 2. +*) + +signature SMTLIB = +sig + exception PARSE of int * string + datatype tree = + Num of int | + Dec of int * int | + Str of string | + Sym of string | + Key of string | + S of tree list + val parse: string list -> tree + val pretty_tree: tree -> Pretty.T + val str_of: tree -> string +end; + +structure SMTLIB: SMTLIB = +struct + +(* data structures *) + +exception PARSE of int * string + +datatype tree = + Num of int | + Dec of int * int | + Str of string | + Sym of string | + Key of string | + S of tree list + +datatype unfinished = None | String of string | Symbol of string + + +(* utilities *) + +fun read_raw pred l cs = + (case take_prefix pred cs of + ([], []) => raise PARSE (l, "empty token") + | ([], c :: _) => raise PARSE (l, "unexpected character " ^ quote c) + | x => x) + + +(* numerals and decimals *) + +fun int_of cs = fst (read_int cs) + +fun read_num l cs = + (case read_raw Symbol.is_ascii_digit l cs of + (cs1, "." :: cs') => + let val (cs2, cs'') = read_raw Symbol.is_ascii_digit l cs' + in (Dec (int_of cs1, int_of cs2), cs'') end + | (cs1, cs2) => (Num (int_of cs1), cs2)) + + +(* binary numbers *) + +fun is_bin c = (c = "0" orelse c = "1") + +fun read_bin l cs = read_raw is_bin l cs |>> Num o fst o read_radix_int 2 + + +(* hex numbers *) + +val is_hex = member (op =) (raw_explode "0123456789abcdefABCDEF") + +fun within c1 c2 c = (ord c1 <= ord c andalso ord c <= ord c2) + +fun unhex i [] = i + | unhex i (c :: cs) = + if within "0" "9" c then unhex (i * 16 + (ord c - ord "0")) cs + else if within "a" "f" c then unhex (i * 16 + (ord c - ord "a" + 10)) cs + else if within "A" "F" c then unhex (i * 16 + (ord c - ord "A" + 10)) cs + else raise Fail ("bad hex character " ^ quote c) + +fun read_hex l cs = read_raw is_hex l cs |>> Num o unhex 0 + + +(* symbols *) + +val symbol_chars = raw_explode "~!@$%^&*_+=<>.?/-" + +fun is_sym c = + Symbol.is_ascii_letter c orelse + Symbol.is_ascii_digit c orelse + member (op =) symbol_chars c + +fun read_sym f l cs = read_raw is_sym l cs |>> f o implode + + +(* quoted tokens *) + +fun read_quoted stop (escape, replacement) cs = + let + fun read _ [] = NONE + | read rs (cs as (c :: cs')) = + if is_prefix (op =) stop cs then + SOME (implode (rev rs), drop (length stop) cs) + else if not (null escape) andalso is_prefix (op =) escape cs then + read (replacement :: rs) (drop (length escape) cs) + else read (c :: rs) cs' + in read [] cs end + +fun read_string cs = read_quoted ["\\", "\""] (["\\", "\\"], "\\") cs +fun read_symbol cs = read_quoted ["|"] ([], "") cs + + +(* core parser *) + +fun read _ [] rest tss = (rest, tss) + | read l ("(" :: cs) None tss = read l cs None ([] :: tss) + | read l (")" :: cs) None (ts1 :: ts2 :: tss) = + read l cs None ((S (rev ts1) :: ts2) :: tss) + | read l ("#" :: "x" :: cs) None (ts :: tss) = + token read_hex l cs ts tss + | read l ("#" :: "b" :: cs) None (ts :: tss) = + token read_bin l cs ts tss + | read l (":" :: cs) None (ts :: tss) = + token (read_sym Key) l cs ts tss + | read l ("\"" :: cs) None (ts :: tss) = + quoted read_string String Str l "" cs ts tss + | read l ("|" :: cs) None (ts :: tss) = + quoted read_symbol Symbol Sym l "" cs ts tss + | read l ((c as "!") :: cs) None (ts :: tss) = + token (fn _ => pair (Sym c)) l cs ts tss + | read l (c :: cs) None (ts :: tss) = + if Symbol.is_ascii_blank c then read l cs None (ts :: tss) + else if Symbol.is_digit c then token read_num l (c :: cs) ts tss + else token (read_sym Sym) l (c :: cs) ts tss + | read l cs (String s) (ts :: tss) = + quoted read_string String Str l s cs ts tss + | read l cs (Symbol s) (ts :: tss) = + quoted read_symbol Symbol Sym l s cs ts tss + | read l _ _ [] = raise PARSE (l, "bad parser state") + +and token f l cs ts tss = + let val (t, cs') = f l cs + in read l cs' None ((t :: ts) :: tss) end + +and quoted r f g l s cs ts tss = + (case r cs of + NONE => (f (s ^ implode cs), ts :: tss) + | SOME (s', cs') => read l cs' None ((g (s ^ s') :: ts) :: tss)) + + + +(* overall parser *) + +fun read_line l line = read l (raw_explode line) + +fun add_line line (l, (None, tss)) = + if size line = 0 orelse nth_string line 0 = ";" then (l + 1, (None, tss)) + else (l + 1, read_line l line None tss) + | add_line line (l, (unfinished, tss)) = + (l + 1, read_line l line unfinished tss) + +fun finish (_, (None, [[t]])) = t + | finish (l, _) = raise PARSE (l, "bad nesting") + +fun parse lines = finish (fold add_line lines (1, (None, [[]]))) + + +(* pretty printer *) + +fun pretty_tree (Num i) = Pretty.str (string_of_int i) + | pretty_tree (Dec (i, j)) = + Pretty.str (string_of_int i ^ "." ^ string_of_int j) + | pretty_tree (Str s) = + raw_explode s + |> maps (fn "\"" => ["\\", "\""] | "\\" => ["\\", "\\"] | c => [c]) + |> implode + |> enclose "\"" "\"" + |> Pretty.str + | pretty_tree (Sym s) = + if String.isPrefix "(" s (* for bit vector functions *) orelse + forall is_sym (raw_explode s) then + Pretty.str s + else + Pretty.str ("|" ^ s ^ "|") + | pretty_tree (Key s) = Pretty.str (":" ^ s) + | pretty_tree (S trees) = + Pretty.enclose "(" ")" (Pretty.separate "" (map pretty_tree trees)) + +val str_of = Pretty.unformatted_string_of o pretty_tree + +end; diff --git a/src/main/SMT/smtlib_interface.ML b/src/main/SMT/smtlib_interface.ML new file mode 100644 index 0000000..37ffb50 --- /dev/null +++ b/src/main/SMT/smtlib_interface.ML @@ -0,0 +1,171 @@ +(* Title: HOL/Tools/SMT/smtlib_interface.ML + Author: Sascha Boehme, TU Muenchen + Author: Jasmin Blanchette, TU Muenchen + +Interface to SMT solvers based on the SMT-LIB 2 format. +*) + +signature SMTLIB_INTERFACE = +sig + val smtlibC: SMT_Util.class + val add_logic: int * (term list -> string option) -> Context.generic -> Context.generic + val translate_config: Proof.context -> SMT_Translate.config + val assert_name_of_index: int -> string + val assert_index_of_name: string -> int + val assert_prefix : string +end; + +structure SMTLIB_Interface: SMTLIB_INTERFACE = +struct + +val smtlibC = ["smtlib"] + + +(* builtins *) + +local + fun int_num _ i = SOME (string_of_int i) + + fun is_linear [t] = SMT_Util.is_number t + | is_linear [t, u] = SMT_Util.is_number t orelse SMT_Util.is_number u + | is_linear _ = false + + fun times _ _ ts = + let val mk = Term.list_comb o pair @{const times (int)} + in if is_linear ts then SOME ("*", 2, ts, mk) else NONE end +in + +val setup_builtins = + fold (SMT_Builtin.add_builtin_typ smtlibC) [ + (@{typ bool}, K (SOME "Bool"), K (K NONE)), + (@{typ int}, K (SOME "Int"), int_num)] #> + fold (SMT_Builtin.add_builtin_fun' smtlibC) [ + (@{const True}, "true"), + (@{const False}, "false"), + (@{const Not}, "not"), + (@{const HOL.conj}, "and"), + (@{const HOL.disj}, "or"), + (@{const HOL.implies}, "=>"), + (@{const HOL.eq ('a)}, "="), + (@{const If ('a)}, "ite"), + (@{const less (int)}, "<"), + (@{const less_eq (int)}, "<="), + (@{const uminus (int)}, "-"), + (@{const plus (int)}, "+"), + (@{const minus (int)}, "-")] #> + SMT_Builtin.add_builtin_fun smtlibC + (Term.dest_Const @{const times (int)}, times) + +end + + +(* serialization *) + +(** logic **) + +fun fst_int_ord ((i1, _), (i2, _)) = int_ord (i1, i2) + +structure Logics = Generic_Data +( + type T = (int * (term list -> string option)) list + val empty = [] + val extend = I + fun merge data = Ord_List.merge fst_int_ord data +) + +fun add_logic pf = Logics.map (Ord_List.insert fst_int_ord pf) + +fun choose_logic ctxt ts = + let + fun choose [] = "AUFLIA" + | choose ((_, f) :: fs) = (case f ts of SOME s => s | NONE => choose fs) + in + (case choose (Logics.get (Context.Proof ctxt)) of + "" => "" (* for default Z3 logic, a subset of everything *) + | logic => "(set-logic " ^ logic ^ ")\n") + end + + +(** serialization **) + +fun var i = "?v" ^ string_of_int i + +fun tree_of_sterm l (SMT_Translate.SVar i) = SMTLIB.Sym (var (l - i - 1)) + | tree_of_sterm _ (SMT_Translate.SApp (n, [])) = SMTLIB.Sym n + | tree_of_sterm l (SMT_Translate.SApp (n, ts)) = + SMTLIB.S (SMTLIB.Sym n :: map (tree_of_sterm l) ts) + | tree_of_sterm _ (SMT_Translate.SLet _) = + raise Fail "SMT-LIB: unsupported let expression" + | tree_of_sterm l (SMT_Translate.SQua (q, ss, pats, t)) = + let + val l' = l + length ss + + fun quant_name SMT_Translate.SForall = "forall" + | quant_name SMT_Translate.SExists = "exists" + + fun gen_trees_of_pat keyword ps = + [SMTLIB.Key keyword, SMTLIB.S (map (tree_of_sterm l') ps)] + fun trees_of_pat (SMT_Translate.SPat ps) = gen_trees_of_pat "pattern" ps + | trees_of_pat (SMT_Translate.SNoPat ps) = gen_trees_of_pat "no-pattern" ps + fun tree_of_pats [] t = t + | tree_of_pats pats t = SMTLIB.S (SMTLIB.Sym "!" :: t :: maps trees_of_pat pats) + + val vs = map_index (fn (i, ty) => + SMTLIB.S [SMTLIB.Sym (var (l + i)), SMTLIB.Sym ty]) ss + + val body = t + |> tree_of_sterm l' + |> tree_of_pats pats + in + SMTLIB.S [SMTLIB.Sym (quant_name q), SMTLIB.S vs, body] + end + + +fun sctrarg (sel, typ) = "(" ^ sel ^ " " ^ typ ^ ")" +fun sctr (name, args) = enclose "(" ")" (space_implode " " (name :: map sctrarg args)) +fun sdatatype (name, ctrs) = enclose "(" ")" (space_implode " " (name :: map sctr ctrs)) + +fun string_of_fun (f, (ss, s)) = f ^ " (" ^ space_implode " " ss ^ ") " ^ s + +fun named_sterm s t = SMTLIB.S [SMTLIB.Sym "!", t, SMTLIB.Key "named", SMTLIB.Sym s] + +val assert_prefix = "a" + +fun assert_name_of_index i = assert_prefix ^ string_of_int i +fun assert_index_of_name s = the_default ~1 (Int.fromString (unprefix assert_prefix s)) + +fun sdtyp (fp, dtyps) = + Buffer.add (enclose ("(declare-" ^ BNF_FP_Util.co_prefix fp ^ "datatypes () (") "))\n" + (space_implode "\n " (map sdatatype dtyps))) + +fun serialize smt_options comments {logic, sorts, dtyps, funcs} ts = + let + val unsat_core = member (op =) smt_options (":produce-unsat-cores", "true") + in + Buffer.empty + |> fold (Buffer.add o enclose "; " "\n") comments + |> fold (fn (k, v) => Buffer.add ("(set-option " ^ k ^ " " ^ v ^ ")\n")) smt_options + |> Buffer.add logic + |> fold (Buffer.add o enclose "(declare-sort " " 0)\n") (sort fast_string_ord sorts) + |> fold sdtyp (AList.coalesce (op =) dtyps) + |> fold (Buffer.add o enclose "(declare-fun " ")\n" o string_of_fun) + (sort (fast_string_ord o apply2 fst) funcs) + |> fold (fn (i, t) => Buffer.add (enclose "(assert " ")\n" + (SMTLIB.str_of (named_sterm (assert_name_of_index i) (tree_of_sterm 0 t))))) (map_index I ts) + |> Buffer.add "(check-sat)\n" + |> Buffer.add (if unsat_core then "(get-unsat-core)\n" else "(get-proof)\n") + |> Buffer.content + end + +(* interface *) + +fun translate_config ctxt = { + logic = choose_logic ctxt, + fp_kinds = [], + serialize = serialize} + +val _ = Theory.setup (Context.theory_map + (setup_builtins #> + SMT_Translate.add_config (smtlibC, translate_config))) + +end; diff --git a/src/main/SMT/smtlib_isar.ML b/src/main/SMT/smtlib_isar.ML new file mode 100644 index 0000000..2f7ae04 --- /dev/null +++ b/src/main/SMT/smtlib_isar.ML @@ -0,0 +1,75 @@ +(* Title: HOL/Tools/SMT/smtlib_isar.ML + Author: Jasmin Blanchette, TU Muenchen + Author: Mathias Fleury, ENS Rennes + +General tools for Isar proof reconstruction. +*) + +signature SMTLIB_ISAR = +sig + val unlift_term: term list -> term -> term + val postprocess_step_conclusion: Proof.context -> thm list -> term list -> term -> term + val normalizing_prems : Proof.context -> term -> (string * string list) list + val distinguish_conjecture_and_hypothesis : ''a list -> ''b -> ''b -> ''b list -> + (''a * term) list -> term list -> term -> (ATP_Problem.atp_formula_role * term) option + val unskolemize_names: Proof.context -> term -> term +end; + +structure SMTLIB_Isar: SMTLIB_ISAR = +struct + +open ATP_Util +open ATP_Problem +open ATP_Proof_Reconstruct + +fun unlift_term ll_defs = + let + val lifted = map (ATP_Util.extract_lambda_def dest_Free o ATP_Util.hol_open_form I) ll_defs + + fun un_free (t as Free (s, _)) = + (case AList.lookup (op =) lifted s of + SOME t => un_term t + | NONE => t) + | un_free t = t + and un_term t = map_aterms un_free t + in un_term end + +(* Remove the "__" suffix for newly introduced variables (Skolems). It is not clear why "__" is + generated also for abstraction variables, but this is repaired here. *) +fun unskolemize_names ctxt = + Term.map_abs_vars (perhaps (try Name.dest_skolem)) + #> Term.map_aterms (perhaps (try (fn Free (s, T) => + Free (s |> not (Variable.is_fixed ctxt s) ? Name.dest_skolem, T)))) + +fun postprocess_step_conclusion ctxt rewrite_rules ll_defs = + let val thy = Proof_Context.theory_of ctxt in + Raw_Simplifier.rewrite_term thy rewrite_rules [] + #> Object_Logic.atomize_term ctxt + #> not (null ll_defs) ? unlift_term ll_defs + #> simplify_bool + #> unskolemize_names ctxt + #> HOLogic.mk_Trueprop + end + +fun normalizing_prems ctxt concl0 = + SMT_Normalize.case_bool_entry :: SMT_Normalize.special_quant_table @ + SMT_Normalize.abs_min_max_table + |> map_filter (fn (c, th) => + if exists_Const (curry (op =) c o fst) concl0 then + let val s = short_thm_name ctxt th in SOME (s, [s]) end + else + NONE) + +fun distinguish_conjecture_and_hypothesis ss id conjecture_id prem_ids fact_helper_ts hyp_ts + concl_t = + (case ss of + [s] => SOME (Axiom, the (AList.lookup (op =) fact_helper_ts s)) + | _ => + if id = conjecture_id then + SOME (Conjecture, concl_t) + else + (case find_index (curry (op =) id) prem_ids of + ~1 => NONE (* lambda-lifting definition *) + | i => SOME (Hypothesis, close_form (nth hyp_ts i)))) + +end; diff --git a/src/main/SMT/smtlib_proof.ML b/src/main/SMT/smtlib_proof.ML new file mode 100644 index 0000000..909b7a5 --- /dev/null +++ b/src/main/SMT/smtlib_proof.ML @@ -0,0 +1,298 @@ +(* Title: HOL/Tools/SMT/smtlib_proof.ML + Author: Sascha Boehme, TU Muenchen + Author: Mathias Fleury, ENS Rennes + Author: Jasmin Blanchette, TU Muenchen + +SMT-LIB-2-style proofs: parsing and abstract syntax tree. +*) + +signature SMTLIB_PROOF = +sig + datatype 'b shared = Tree of SMTLIB.tree | Term of term | Proof of 'b | None + type ('a, 'b) context + + val mk_context: Proof.context -> int -> 'b shared Symtab.table -> typ Symtab.table -> + term Symtab.table -> 'a -> ('a, 'b) context + val empty_context: Proof.context -> typ Symtab.table -> term Symtab.table -> ('a list, 'b) context + val ctxt_of: ('a, 'b) context -> Proof.context + val lookup_binding: ('a, 'b) context -> string -> 'b shared + val update_binding: string * 'b shared -> ('a, 'b) context -> ('a, 'b) context + val with_bindings: (string * 'b shared) list -> (('a, 'b) context -> 'c * ('d, 'b) context) -> + ('a, 'b) context -> 'c * ('d, 'b) context + val next_id: ('a, 'b) context -> int * ('a, 'b) context + val with_fresh_names: (('a list, 'b) context -> + term * ((string * (string * typ)) list, 'b) context) -> ('c, 'b) context -> (term * string list) + + (*type and term parsers*) + type type_parser = SMTLIB.tree * typ list -> typ option + type term_parser = SMTLIB.tree * term list -> term option + val add_type_parser: type_parser -> Context.generic -> Context.generic + val add_term_parser: term_parser -> Context.generic -> Context.generic + + exception SMTLIB_PARSE of string * SMTLIB.tree + + val declare_fun: string -> typ -> ((string * typ) list, 'a) context -> + ((string * typ) list, 'a) context + val dest_binding: SMTLIB.tree -> string * 'a shared + val type_of: ('a, 'b) context -> SMTLIB.tree -> typ + val term_of: SMTLIB.tree -> ((string * (string * typ)) list, 'a) context -> + term * ((string * (string * typ)) list, 'a) context +end; + +structure SMTLIB_Proof: SMTLIB_PROOF = +struct + +(* proof parser context *) + +datatype 'b shared = Tree of SMTLIB.tree | Term of term | Proof of 'b | None + +type ('a, 'b) context = { + ctxt: Proof.context, + id: int, + syms: 'b shared Symtab.table, + typs: typ Symtab.table, + funs: term Symtab.table, + extra: 'a} + +fun mk_context ctxt id syms typs funs extra: ('a, 'b) context = + {ctxt = ctxt, id = id, syms = syms, typs = typs, funs = funs, extra = extra} + +fun empty_context ctxt typs funs = mk_context ctxt 1 Symtab.empty typs funs [] + +fun ctxt_of ({ctxt, ...}: ('a, 'b) context) = ctxt + +fun lookup_binding ({syms, ...}: ('a, 'b) context) = + the_default None o Symtab.lookup syms + +fun map_syms f ({ctxt, id, syms, typs, funs, extra}: ('a, 'b) context) = + mk_context ctxt id (f syms) typs funs extra + +fun update_binding b = map_syms (Symtab.update b) + +fun with_bindings bs f cx = + let val bs' = map (lookup_binding cx o fst) bs + in + cx + |> fold update_binding bs + |> f + ||> fold2 (fn (name, _) => update_binding o pair name) bs bs' + end + +fun next_id ({ctxt, id, syms, typs, funs, extra}: ('a, 'b) context) = + (id, mk_context ctxt (id + 1) syms typs funs extra) + +fun with_fresh_names f ({ctxt, id, syms, typs, funs, ...}: ('a, 'b) context) = + let + fun bind (_, v as (_, T)) t = Logic.all_const T $ Term.absfree v t + + val needs_inferT = equal Term.dummyT orf Term.is_TVar + val needs_infer = Term.exists_type (Term.exists_subtype needs_inferT) + fun infer_types ctxt = + singleton (Type_Infer_Context.infer_types ctxt) #> + singleton (Proof_Context.standard_term_check_finish ctxt) + fun infer ctxt t = if needs_infer t then infer_types ctxt t else t + + val (t, {ctxt = ctxt', extra = names, ...}: ((string * (string * typ)) list, 'b) context) = + f (mk_context ctxt id syms typs funs []) + val t' = infer ctxt' (fold_rev bind names (HOLogic.mk_Trueprop t)) + in + (t', map fst names) + end + +fun lookup_typ ({typs, ...}: ('a, 'b) context) = Symtab.lookup typs +fun lookup_fun ({funs, ...}: ('a, 'b) context) = Symtab.lookup funs + + +(* core type and term parser *) + +fun core_type_parser (SMTLIB.Sym "Bool", []) = SOME @{typ HOL.bool} + | core_type_parser (SMTLIB.Sym "Int", []) = SOME @{typ Int.int} + | core_type_parser _ = NONE + +fun mk_unary n t = + let val T = fastype_of t + in Const (n, T --> T) $ t end + +fun mk_binary' n T U t1 t2 = Const (n, [T, T] ---> U) $ t1 $ t2 + +fun mk_binary n t1 t2 = + let val T = fastype_of t1 + in mk_binary' n T T t1 t2 end + +fun mk_rassoc f t ts = + let val us = rev (t :: ts) + in fold f (tl us) (hd us) end + +fun mk_lassoc f t ts = fold (fn u1 => fn u2 => f u2 u1) ts t + +fun mk_lassoc' n = mk_lassoc (mk_binary n) + +fun mk_binary_pred n S t1 t2 = + let + val T1 = fastype_of t1 + val T2 = fastype_of t2 + val T = + if T1 <> Term.dummyT then T1 + else if T2 <> Term.dummyT then T2 + else TVar (("?a", serial ()), S) + in mk_binary' n T @{typ HOL.bool} t1 t2 end + +fun mk_less t1 t2 = mk_binary_pred @{const_name ord_class.less} @{sort linorder} t1 t2 +fun mk_less_eq t1 t2 = mk_binary_pred @{const_name ord_class.less_eq} @{sort linorder} t1 t2 + +fun core_term_parser (SMTLIB.Sym "true", _) = SOME @{const HOL.True} + | core_term_parser (SMTLIB.Sym "false", _) = SOME @{const HOL.False} + | core_term_parser (SMTLIB.Sym "not", [t]) = SOME (HOLogic.mk_not t) + | core_term_parser (SMTLIB.Sym "and", t :: ts) = SOME (mk_rassoc (curry HOLogic.mk_conj) t ts) + | core_term_parser (SMTLIB.Sym "or", t :: ts) = SOME (mk_rassoc (curry HOLogic.mk_disj) t ts) + | core_term_parser (SMTLIB.Sym "=>", [t1, t2]) = SOME (HOLogic.mk_imp (t1, t2)) + | core_term_parser (SMTLIB.Sym "implies", [t1, t2]) = SOME (HOLogic.mk_imp (t1, t2)) + | core_term_parser (SMTLIB.Sym "=", [t1, t2]) = SOME (HOLogic.mk_eq (t1, t2)) + | core_term_parser (SMTLIB.Sym "~", [t1, t2]) = SOME (HOLogic.mk_eq (t1, t2)) + | core_term_parser (SMTLIB.Sym "ite", [t1, t2, t3]) = + let + val T = fastype_of t2 + val c = Const (@{const_name HOL.If}, [@{typ HOL.bool}, T, T] ---> T) + in SOME (c $ t1 $ t2 $ t3) end + | core_term_parser (SMTLIB.Num i, []) = SOME (HOLogic.mk_number @{typ Int.int} i) + | core_term_parser (SMTLIB.Sym "-", [t]) = SOME (mk_unary @{const_name uminus_class.uminus} t) + | core_term_parser (SMTLIB.Sym "~", [t]) = SOME (mk_unary @{const_name uminus_class.uminus} t) + | core_term_parser (SMTLIB.Sym "+", t :: ts) = + SOME (mk_lassoc' @{const_name plus_class.plus} t ts) + | core_term_parser (SMTLIB.Sym "-", t :: ts) = + SOME (mk_lassoc' @{const_name minus_class.minus} t ts) + | core_term_parser (SMTLIB.Sym "*", t :: ts) = + SOME (mk_lassoc' @{const_name times_class.times} t ts) + | core_term_parser (SMTLIB.Sym "div", [t1, t2]) = SOME (mk_binary @{const_name z3div} t1 t2) + | core_term_parser (SMTLIB.Sym "mod", [t1, t2]) = SOME (mk_binary @{const_name z3mod} t1 t2) + | core_term_parser (SMTLIB.Sym "<", [t1, t2]) = SOME (mk_less t1 t2) + | core_term_parser (SMTLIB.Sym ">", [t1, t2]) = SOME (mk_less t2 t1) + | core_term_parser (SMTLIB.Sym "<=", [t1, t2]) = SOME (mk_less_eq t1 t2) + | core_term_parser (SMTLIB.Sym ">=", [t1, t2]) = SOME (mk_less_eq t2 t1) + | core_term_parser _ = NONE + + +(* custom type and term parsers *) + +type type_parser = SMTLIB.tree * typ list -> typ option + +type term_parser = SMTLIB.tree * term list -> term option + +fun id_ord ((id1, _), (id2, _)) = int_ord (id1, id2) + +structure Parsers = Generic_Data +( + type T = (int * type_parser) list * (int * term_parser) list + val empty : T = ([(serial (), core_type_parser)], [(serial (), core_term_parser)]) + val extend = I + fun merge ((tys1, ts1), (tys2, ts2)) = + (Ord_List.merge id_ord (tys1, tys2), Ord_List.merge id_ord (ts1, ts2)) +) + +fun add_type_parser type_parser = + Parsers.map (apfst (Ord_List.insert id_ord (serial (), type_parser))) + +fun add_term_parser term_parser = + Parsers.map (apsnd (Ord_List.insert id_ord (serial (), term_parser))) + +fun get_type_parsers ctxt = map snd (fst (Parsers.get (Context.Proof ctxt))) +fun get_term_parsers ctxt = map snd (snd (Parsers.get (Context.Proof ctxt))) + +fun apply_parsers parsers x = + let + fun apply [] = NONE + | apply (parser :: parsers) = + (case parser x of + SOME y => SOME y + | NONE => apply parsers) + in apply parsers end + + +(* type and term parsing *) + +exception SMTLIB_PARSE of string * SMTLIB.tree + +val desymbolize = Name.desymbolize (SOME false) o perhaps (try (unprefix "?")) + +fun fresh_fun add name n T ({ctxt, id, syms, typs, funs, extra}: ('a, 'b) context) = + let + val (n', ctxt') = yield_singleton Variable.variant_fixes n ctxt + val t = Free (n', T) + val funs' = Symtab.update (name, t) funs + in (t, mk_context ctxt' id syms typs funs' (add (n', T) extra)) end + +fun declare_fun name = snd oo fresh_fun cons name (desymbolize name) +fun declare_free name = fresh_fun (cons o pair name) name (desymbolize name) + +fun parse_type cx ty Ts = + (case apply_parsers (get_type_parsers (ctxt_of cx)) (ty, Ts) of + SOME T => T + | NONE => + (case ty of + SMTLIB.Sym name => + (case lookup_typ cx name of + SOME T => T + | NONE => raise SMTLIB_PARSE ("unknown SMT type", ty)) + | _ => raise SMTLIB_PARSE ("bad SMT type format", ty))) + +fun parse_term t ts cx = + (case apply_parsers (get_term_parsers (ctxt_of cx)) (t, ts) of + SOME u => (u, cx) + | NONE => + (case t of + SMTLIB.Sym name => + (case lookup_fun cx name of + SOME u => (Term.list_comb (u, ts), cx) + | NONE => + if null ts then declare_free name Term.dummyT cx + else raise SMTLIB_PARSE ("bad SMT term", t)) + | _ => raise SMTLIB_PARSE ("bad SMT term format", t))) + +fun type_of cx ty = + (case try (parse_type cx ty) [] of + SOME T => T + | NONE => + (case ty of + SMTLIB.S (ty' :: tys) => parse_type cx ty' (map (type_of cx) tys) + | _ => raise SMTLIB_PARSE ("bad SMT type", ty))) + +fun dest_var cx (SMTLIB.S [SMTLIB.Sym name, ty]) = (name, (desymbolize name, type_of cx ty)) + | dest_var _ v = raise SMTLIB_PARSE ("bad SMT quantifier variable format", v) + +fun dest_body (SMTLIB.S (SMTLIB.Sym "!" :: body :: _)) = dest_body body + | dest_body body = body + +fun dest_binding (SMTLIB.S [SMTLIB.Sym name, t]) = (name, Tree t) + | dest_binding b = raise SMTLIB_PARSE ("bad SMT let binding format", b) + +fun term_of t cx = + (case t of + SMTLIB.S [SMTLIB.Sym "forall", SMTLIB.S vars, body] => quant HOLogic.mk_all vars body cx + | SMTLIB.S [SMTLIB.Sym "exists", SMTLIB.S vars, body] => quant HOLogic.mk_exists vars body cx + | SMTLIB.S [SMTLIB.Sym "let", SMTLIB.S bindings, body] => + with_bindings (map dest_binding bindings) (term_of body) cx + | SMTLIB.S (SMTLIB.Sym "!" :: t :: _) => term_of t cx + | SMTLIB.S (f :: args) => + cx + |> fold_map term_of args + |-> parse_term f + | SMTLIB.Sym name => + (case lookup_binding cx name of + Tree u => + cx + |> term_of u + |-> (fn u' => pair u' o update_binding (name, Term u')) + | Term u => (u, cx) + | None => parse_term t [] cx + | _ => raise SMTLIB_PARSE ("bad SMT term format", t)) + | _ => parse_term t [] cx) + +and quant q vars body cx = + let val vs = map (dest_var cx) vars + in + cx + |> with_bindings (map (apsnd (Term o Free)) vs) (term_of (dest_body body)) + |>> fold_rev (fn (_, (n, T)) => fn t => q (n, T, t)) vs + end + +end; diff --git a/src/main/SMT/verit_isar.ML b/src/main/SMT/verit_isar.ML new file mode 100644 index 0000000..28ee6d9 --- /dev/null +++ b/src/main/SMT/verit_isar.ML @@ -0,0 +1,60 @@ +(* Title: HOL/Tools/SMT/verit_isar.ML + Author: Mathias Fleury, TU Muenchen + Author: Jasmin Blanchette, TU Muenchen + +VeriT proofs as generic ATP proofs for Isar proof reconstruction. +*) + +signature VERIT_ISAR = +sig + type ('a, 'b) atp_step = ('a, 'b) ATP_Proof.atp_step + val atp_proof_of_veriT_proof: Proof.context -> term list -> thm list -> term list -> term -> + (string * term) list -> int list -> int -> (int * string) list -> VeriT_Proof.veriT_step list -> + (term, string) ATP_Proof.atp_step list +end; + +structure VeriT_Isar: VERIT_ISAR = +struct + +open ATP_Util +open ATP_Problem +open ATP_Proof +open ATP_Proof_Reconstruct +open SMTLIB_Interface +open SMTLIB_Isar +open VeriT_Proof + +fun atp_proof_of_veriT_proof ctxt ll_defs rewrite_rules hyp_ts concl_t fact_helper_ts prem_ids + conjecture_id fact_helper_ids = + let + fun steps_of (VeriT_Proof.VeriT_Step {id, rule, prems, concl, ...}) = + let + val concl' = postprocess_step_conclusion ctxt rewrite_rules ll_defs concl + fun standard_step role = ((id, []), role, concl', rule, map (rpair []) prems) + in + if rule = veriT_input_rule then + let + val id_num = the (Int.fromString (unprefix assert_prefix id)) + val ss = the_list (AList.lookup (op =) fact_helper_ids id_num) + in + (case distinguish_conjecture_and_hypothesis ss id_num conjecture_id prem_ids + fact_helper_ts hyp_ts concl_t of + NONE => [] + | SOME (role0, concl00) => + let + val name0 = (id ^ "a", ss) + val concl0 = unskolemize_names ctxt concl00 + in + [(name0, role0, concl0, rule, []), + ((id, []), Plain, concl', veriT_rewrite_rule, + name0 :: normalizing_prems ctxt concl0)] + end) + end + else + [standard_step (if null prems then Lemma else Plain)] + end + in + maps steps_of + end + +end; diff --git a/src/main/SMT/verit_proof.ML b/src/main/SMT/verit_proof.ML new file mode 100644 index 0000000..1dab112 --- /dev/null +++ b/src/main/SMT/verit_proof.ML @@ -0,0 +1,324 @@ +(* Title: HOL/Tools/SMT/verit_proof.ML + Author: Mathias Fleury, ENS Rennes + Author: Sascha Boehme, TU Muenchen + +VeriT proofs: parsing and abstract syntax tree. +*) + +signature VERIT_PROOF = +sig + (*proofs*) + datatype veriT_step = VeriT_Step of { + id: string, + rule: string, + prems: string list, + concl: term, + fixes: string list} + + (*proof parser*) + val parse: typ Symtab.table -> term Symtab.table -> string list -> + Proof.context -> veriT_step list * Proof.context + + val veriT_step_prefix : string + val veriT_input_rule: string + val veriT_la_generic_rule : string + val veriT_rewrite_rule : string + val veriT_simp_arith_rule : string + val veriT_tmp_ite_elim_rule : string + val veriT_tmp_skolemize_rule : string +end; + +structure VeriT_Proof: VERIT_PROOF = +struct + +open SMTLIB_Proof + +datatype veriT_node = VeriT_Node of { + id: string, + rule: string, + prems: string list, + concl: term, + bounds: string list} + +fun mk_node id rule prems concl bounds = + VeriT_Node {id = id, rule = rule, prems = prems, concl = concl, bounds = bounds} + +datatype veriT_step = VeriT_Step of { + id: string, + rule: string, + prems: string list, + concl: term, + fixes: string list} + +fun mk_step id rule prems concl fixes = + VeriT_Step {id = id, rule = rule, prems = prems, concl = concl, fixes = fixes} + +val veriT_step_prefix = ".c" +val veriT_input_rule = "input" +val veriT_la_generic_rule = "la_generic" +val veriT_rewrite_rule = "__rewrite" (* arbitrary *) +val veriT_simp_arith_rule = "simp_arith" +val veriT_tmp_alphaconv_rule = "tmp_alphaconv" +val veriT_tmp_ite_elim_rule = "tmp_ite_elim" +val veriT_tmp_skolemize_rule = "tmp_skolemize" + +(* proof parser *) + +fun node_of p cx = + ([], cx) + ||>> `(with_fresh_names (term_of p)) + |>> snd + +(*in order to get Z3-style quantification*) +fun repair_quantification (SMTLIB.S (SMTLIB.Sym "forall" :: l)) = + let val (quantified_vars, t) = split_last (map repair_quantification l) + in + SMTLIB.S (SMTLIB.Sym "forall" :: SMTLIB.S quantified_vars :: t :: []) + end + | repair_quantification (SMTLIB.S (SMTLIB.Sym "exists" :: l)) = + let val (quantified_vars, t) = split_last (map repair_quantification l) + in + SMTLIB.S (SMTLIB.Sym "exists" :: SMTLIB.S quantified_vars :: t :: []) + end + | repair_quantification (SMTLIB.S l) = SMTLIB.S (map repair_quantification l) + | repair_quantification x = x + +fun replace_bound_var_by_free_var (q $ Abs (var, ty, u)) free_var = + (case List.find (fn v => String.isPrefix v var) free_var of + NONE => q $ Abs (var, ty, replace_bound_var_by_free_var u free_var) + | SOME _ => replace_bound_var_by_free_var (Term.subst_bound (Free (var, ty), u)) free_var) + | replace_bound_var_by_free_var (u $ v) free_vars = replace_bound_var_by_free_var u free_vars $ + replace_bound_var_by_free_var v free_vars + | replace_bound_var_by_free_var u _ = u + +fun find_type_in_formula (Abs (v, T, u)) var_name = + if String.isPrefix var_name v then SOME T else find_type_in_formula u var_name + | find_type_in_formula (u $ v) var_name = + (case find_type_in_formula u var_name of + NONE => find_type_in_formula v var_name + | some_T => some_T) + | find_type_in_formula _ _ = NONE + +fun add_bound_variables_to_ctxt concl = + fold (update_binding o + (fn s => (s, Term (Free (s, the_default dummyT (find_type_in_formula concl s)))))) + +fun update_step_and_cx (node as VeriT_Node {id, rule, prems, concl, bounds}) cx = + if rule = veriT_tmp_ite_elim_rule then + (mk_node id rule prems concl bounds, add_bound_variables_to_ctxt concl bounds cx) + else if rule = veriT_tmp_skolemize_rule then + let val concl' = replace_bound_var_by_free_var concl bounds in + (mk_node id rule prems concl' [], add_bound_variables_to_ctxt concl bounds cx) + end + else + (node, cx) + +fun fix_subproof_steps ((((id_of_father_step, rule), prems), subproof), ((step_concl, bounds), + cx)) = + let + fun mk_prop_of_term concl = + concl |> fastype_of concl = @{typ bool} ? curry (op $) @{term Trueprop} + fun update_prems assumption_id prems = + map (fn prem => id_of_father_step ^ prem) + (filter_out (curry (op =) assumption_id) prems) + fun inline_assumption assumption assumption_id + (VeriT_Node {id, rule, prems, concl, bounds}) = + mk_node id rule (update_prems assumption_id prems) + (@{const Pure.imp} $ mk_prop_of_term assumption $ mk_prop_of_term concl) bounds + fun find_input_steps_and_inline [] last_step = ([], last_step) + | find_input_steps_and_inline (VeriT_Node {id = id', rule, prems, concl, bounds} :: steps) + last_step = + if rule = veriT_input_rule then + find_input_steps_and_inline (map (inline_assumption concl id') steps) last_step + else + apfst (cons (mk_node (id_of_father_step ^ id') rule prems concl bounds)) + (find_input_steps_and_inline steps (id_of_father_step ^ id')) + val (subproof', last_step_id) = find_input_steps_and_inline subproof "" + val prems' = + if last_step_id = "" then + prems + else + (case prems of + NONE => SOME [last_step_id] + | SOME l => SOME (last_step_id :: l)) + in + (subproof', (((((id_of_father_step, rule), prems'), step_concl), bounds), cx)) + end + +(* +(set id rule :clauses(...) :args(..) :conclusion (...)). +or +(set id subproof (set ...) :conclusion (...)). +*) + +fun parse_proof_step cx = + let + fun rotate_pair (a, (b, c)) = ((a, b), c) + fun get_id (SMTLIB.S [SMTLIB.Sym "set", SMTLIB.Sym id, SMTLIB.S l]) = (id, l) + | get_id t = raise Fail ("unrecognized VeriT proof " ^ @{make_string} t) + fun parse_rule (SMTLIB.Sym rule :: l) = (rule, l) + fun parse_source (SMTLIB.Key "clauses" :: SMTLIB.S source ::l) = + (SOME (map (fn (SMTLIB.Sym id) => id) source), l) + | parse_source l = (NONE, l) + fun parse_subproof cx id_of_father_step ((subproof_step as SMTLIB.S (SMTLIB.Sym "set" :: _)) :: l) = + let val (subproof_steps, cx') = parse_proof_step cx subproof_step in + apfst (apfst (curry (op @) subproof_steps)) (parse_subproof cx' id_of_father_step l) + end + | parse_subproof cx _ l = (([], cx), l) + fun skip_args (SMTLIB.Key "args" :: SMTLIB.S _ :: l) = l + | skip_args l = l + fun parse_conclusion (SMTLIB.Key "conclusion" :: SMTLIB.S concl :: []) = concl + fun make_or_from_clausification l = + foldl1 (fn ((concl1, bounds1), (concl2, bounds2)) => + (HOLogic.mk_disj (perhaps (try HOLogic.dest_Trueprop) concl1, + perhaps (try HOLogic.dest_Trueprop) concl2), bounds1 @ bounds2)) l + fun to_node (((((id, rule), prems), concl), bounds), cx) = + (mk_node id rule (the_default [] prems) concl bounds, cx) + in + get_id + ##> parse_rule + #> rotate_pair + ##> parse_source + #> rotate_pair + ##> skip_args + #> (fn (((id, rule), prems), sub) => (((id, rule), prems), parse_subproof cx id sub)) + #> rotate_pair + ##> parse_conclusion + ##> map repair_quantification + #> (fn ((((id, rule), prems), (subproof, cx)), terms) => + (((((id, rule), prems), subproof), fold_map (fn t => fn cx => node_of t cx) terms cx))) + ##> apfst (fn [] => (@{const False}, []) | concls => make_or_from_clausification concls) + #> fix_subproof_steps + ##> to_node + #> (fn (subproof, (step, cx)) => (subproof @ [step], cx)) + #-> fold_map update_step_and_cx + end + +(*subproofs are written on multiple lines: SMTLIB can not parse then, because parentheses are +unbalanced on each line*) +fun seperate_into_steps lines = + let + fun count ("(" :: l) n = count l (n + 1) + | count (")" :: l) n = count l (n - 1) + | count (_ :: l) n = count l n + | count [] n = n + fun seperate (line :: l) actual_lines m = + let val n = count (raw_explode line) 0 in + if m + n = 0 then + [actual_lines ^ line] :: seperate l "" 0 + else + seperate l (actual_lines ^ line) (m + n) + end + | seperate [] _ 0 = [] + in + seperate lines "" 0 + end + +(* VeriT adds "@" before every variable. *) +fun remove_all_at (SMTLIB.Sym v :: l) = + SMTLIB.Sym (perhaps (try (unprefix "@")) v) :: remove_all_at l + | remove_all_at (SMTLIB.S l :: l') = SMTLIB.S (remove_all_at l) :: remove_all_at l' + | remove_all_at (SMTLIB.Key v :: l) = SMTLIB.Key v :: remove_all_at l + | remove_all_at (v :: l) = v :: remove_all_at l + | remove_all_at [] = [] + +fun find_in_which_step_defined var (VeriT_Node {id, bounds, ...} :: l) = + (case List.find (fn v => String.isPrefix v var) bounds of + NONE => find_in_which_step_defined var l + | SOME _ => id) + | find_in_which_step_defined var _ = raise Fail ("undefined " ^ var) + +(*Yes every case is possible: the introduced var is not on a special size of the equality sign.*) +fun find_ite_var_in_term (Const (@{const_name If}, _) $ _ $ + (Const (@{const_name HOL.eq}, _) $ Free (var1, _) $ Free (var2, _) ) $ + (Const (@{const_name HOL.eq}, _) $ Free (var3, _) $ Free (var4, _) )) = + let + fun get_number_of_ite_transformed_var var = + perhaps (try (unprefix "ite")) var + |> Int.fromString + fun is_equal_and_has_correct_substring var var' var'' = + if var = var' andalso String.isPrefix "ite" var then SOME var' + else if var = var'' andalso String.isPrefix "ite" var then SOME var'' else NONE + val var1_introduced_var = is_equal_and_has_correct_substring var1 var3 var4 + val var2_introduced_var = is_equal_and_has_correct_substring var3 var1 var2 + in + (case (var1_introduced_var, var2_introduced_var) of + (SOME a, SOME b) => + (*ill-generated case, might be possible when applying the rule to max a a. Only if the + variable have been introduced before. Probably an impossible edge case*) + (case (get_number_of_ite_transformed_var a, get_number_of_ite_transformed_var b) of + (SOME a, SOME b) => if a < b then var2_introduced_var else var1_introduced_var + (*Otherwise, it is a name clase between a parameter name and the introduced variable. + Or the name convention has been changed.*) + | (NONE, SOME _) => var2_introduced_var + | (SOME _, NONE) => var2_introduced_var) + | (_, SOME _) => var2_introduced_var + | (SOME _, _) => var1_introduced_var) + end + | find_ite_var_in_term (Const (@{const_name If}, _) $ _ $ + (Const (@{const_name HOL.eq}, _) $ Free (var, _) $ _ ) $ + (Const (@{const_name HOL.eq}, _) $ Free (var', _) $ _ )) = + if var = var' then SOME var else NONE + | find_ite_var_in_term (Const (@{const_name If}, _) $ _ $ + (Const (@{const_name HOL.eq}, _) $ _ $ Free (var, _)) $ + (Const (@{const_name HOL.eq}, _) $ _ $ Free (var', _))) = + if var = var' then SOME var else NONE + | find_ite_var_in_term (p $ q) = + (case find_ite_var_in_term p of + NONE => find_ite_var_in_term q + | x => x) + | find_ite_var_in_term (Abs (_, _, body)) = find_ite_var_in_term body + | find_ite_var_in_term _ = NONE + +fun correct_veriT_step steps (node as VeriT_Node {id, rule, prems, concl, bounds}) = + if rule = veriT_tmp_ite_elim_rule then + if bounds = [] then + (*if the introduced var has already been defined, adding the definition as a dependency*) + let + val new_prems = prems + |> (case find_ite_var_in_term concl of + NONE => I + | SOME var => cons (find_in_which_step_defined var steps)) + in + VeriT_Node {id = id, rule = rule, prems = new_prems, concl = concl, bounds = bounds} + end + else + (*some new variables are created*) + let val concl' = replace_bound_var_by_free_var concl bounds in + mk_node id rule prems concl' [] + end + else + node + +fun remove_alpha_conversion _ [] = [] + | remove_alpha_conversion replace_table (VeriT_Node {id, rule, prems, concl, bounds} :: steps) = + let + val correct_dependency = map (perhaps (Symtab.lookup replace_table)) + val find_predecessor = perhaps (Symtab.lookup replace_table) + in + if rule = veriT_tmp_alphaconv_rule then + remove_alpha_conversion (Symtab.update (id, find_predecessor (hd prems)) + replace_table) steps + else + VeriT_Node {id = id, rule = rule, prems = correct_dependency prems, + concl = concl, bounds = bounds} :: remove_alpha_conversion replace_table steps + end + +fun correct_veriT_steps steps = + steps + |> map (correct_veriT_step steps) + |> remove_alpha_conversion Symtab.empty + +fun parse typs funs lines ctxt = + let + val smtlib_lines_without_at = remove_all_at (map SMTLIB.parse (seperate_into_steps lines)) + val (u, env) = apfst flat (fold_map (fn l => fn cx => parse_proof_step cx l) + smtlib_lines_without_at (empty_context ctxt typs funs)) + val t = correct_veriT_steps u + fun node_to_step (VeriT_Node {id, rule, prems, concl, bounds, ...}) = + mk_step id rule prems concl bounds + in + (map node_to_step t, ctxt_of env) + end + +end; diff --git a/src/main/SMT/verit_proof_parse.ML b/src/main/SMT/verit_proof_parse.ML new file mode 100644 index 0000000..cddc609 --- /dev/null +++ b/src/main/SMT/verit_proof_parse.ML @@ -0,0 +1,78 @@ +(* Title: HOL/Tools/SMT/verit_proof_parse.ML + Author: Mathias Fleury, TU Muenchen + Author: Jasmin Blanchette, TU Muenchen + +VeriT proof parsing. +*) + +signature VERIT_PROOF_PARSE = +sig + type ('a, 'b) atp_step = ('a, 'b) ATP_Proof.atp_step + val parse_proof: SMT_Translate.replay_data -> + ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> + SMT_Solver.parsed_proof +end; + +structure VeriT_Proof_Parse: VERIT_PROOF_PARSE = +struct + +open ATP_Util +open ATP_Problem +open ATP_Proof +open ATP_Proof_Reconstruct +open VeriT_Isar +open VeriT_Proof + +fun add_used_asserts_in_step (VeriT_Proof.VeriT_Step {prems, ...}) = + union (op =) (map_filter (try SMTLIB_Interface.assert_index_of_name) prems) + +fun parse_proof + ({context = ctxt, typs, terms, ll_defs, rewrite_rules, assms} : SMT_Translate.replay_data) + xfacts prems concl output = + let + val num_ll_defs = length ll_defs + + val id_of_index = Integer.add num_ll_defs + val index_of_id = Integer.add (~ num_ll_defs) + + fun step_of_assume j (_, th) = + VeriT_Proof.VeriT_Step {id = SMTLIB_Interface.assert_name_of_index (id_of_index j), + rule = veriT_input_rule, prems = [], concl = Thm.prop_of th, fixes = []} + + val (actual_steps, _) = VeriT_Proof.parse typs terms output ctxt + val used_assert_ids = fold add_used_asserts_in_step actual_steps [] + val used_assm_js = + map_filter (fn id => let val i = index_of_id id in if i >= 0 then SOME i else NONE end) + used_assert_ids + val used_assms = map (nth assms) used_assm_js + val assm_steps = map2 step_of_assume used_assm_js used_assms + val steps = assm_steps @ actual_steps + + val conjecture_i = 0 + val prems_i = conjecture_i + 1 + val num_prems = length prems + val facts_i = prems_i + num_prems + val num_facts = length xfacts + val helpers_i = facts_i + num_facts + + val conjecture_id = id_of_index conjecture_i + val prem_ids = map id_of_index (prems_i upto prems_i + num_prems - 1) + val fact_ids' = + map_filter (fn j => + let val (i, _) = nth assms j in + try (apsnd (nth xfacts)) (id_of_index j, i - facts_i) + end) used_assm_js + val helper_ids' = filter (fn (i, _) => i >= helpers_i) used_assms + + val fact_helper_ts = + map (fn (_, th) => (ATP_Util.short_thm_name ctxt th, Thm.prop_of th)) helper_ids' @ + map (fn (_, ((s, _), th)) => (s, Thm.prop_of th)) fact_ids' + val fact_helper_ids' = + map (apsnd (ATP_Util.short_thm_name ctxt)) helper_ids' @ map (apsnd (fst o fst)) fact_ids' + in + {outcome = NONE, fact_ids = SOME fact_ids', + atp_proof = fn () => atp_proof_of_veriT_proof ctxt ll_defs rewrite_rules prems concl + fact_helper_ts prem_ids conjecture_id fact_helper_ids' steps} + end + +end; diff --git a/src/main/SMT/z3_interface.ML b/src/main/SMT/z3_interface.ML new file mode 100644 index 0000000..588458a --- /dev/null +++ b/src/main/SMT/z3_interface.ML @@ -0,0 +1,192 @@ +(* Title: HOL/Tools/SMT/z3_interface.ML + Author: Sascha Boehme, TU Muenchen + +Interface to Z3 based on a relaxed version of SMT-LIB. +*) + +signature Z3_INTERFACE = +sig + val smtlib_z3C: SMT_Util.class + + datatype sym = Sym of string * sym list + type mk_builtins = { + mk_builtin_typ: sym -> typ option, + mk_builtin_num: theory -> int -> typ -> cterm option, + mk_builtin_fun: theory -> sym -> cterm list -> cterm option } + val add_mk_builtins: mk_builtins -> Context.generic -> Context.generic + val mk_builtin_typ: Proof.context -> sym -> typ option + val mk_builtin_num: Proof.context -> int -> typ -> cterm option + val mk_builtin_fun: Proof.context -> sym -> cterm list -> cterm option + + val is_builtin_theory_term: Proof.context -> term -> bool +end; + +structure Z3_Interface: Z3_INTERFACE = +struct + +val smtlib_z3C = SMTLIB_Interface.smtlibC @ ["z3"] + + +(* interface *) + +local + fun translate_config ctxt = + {logic = K "", fp_kinds = [BNF_Util.Least_FP], + serialize = #serialize (SMTLIB_Interface.translate_config ctxt)} + + fun is_div_mod @{const divide (int)} = true + | is_div_mod @{const modulo (int)} = true + | is_div_mod _ = false + + val have_int_div_mod = exists (Term.exists_subterm is_div_mod o Thm.prop_of) + + fun add_div_mod _ (thms, extra_thms) = + if have_int_div_mod thms orelse have_int_div_mod extra_thms then + (thms, @{thms div_as_z3div mod_as_z3mod} @ extra_thms) + else (thms, extra_thms) + + val setup_builtins = + SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const times (int)}, "*") #> + SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const z3div}, "div") #> + SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const z3mod}, "mod") +in + +val _ = Theory.setup (Context.theory_map ( + setup_builtins #> + SMT_Normalize.add_extra_norm (smtlib_z3C, add_div_mod) #> + SMT_Translate.add_config (smtlib_z3C, translate_config))) + +end + + +(* constructors *) + +datatype sym = Sym of string * sym list + + +(** additional constructors **) + +type mk_builtins = { + mk_builtin_typ: sym -> typ option, + mk_builtin_num: theory -> int -> typ -> cterm option, + mk_builtin_fun: theory -> sym -> cterm list -> cterm option } + +fun chained _ [] = NONE + | chained f (b :: bs) = (case f b of SOME y => SOME y | NONE => chained f bs) + +fun chained_mk_builtin_typ bs sym = + chained (fn {mk_builtin_typ=mk, ...} : mk_builtins => mk sym) bs + +fun chained_mk_builtin_num ctxt bs i T = + let val thy = Proof_Context.theory_of ctxt + in chained (fn {mk_builtin_num=mk, ...} : mk_builtins => mk thy i T) bs end + +fun chained_mk_builtin_fun ctxt bs s cts = + let val thy = Proof_Context.theory_of ctxt + in chained (fn {mk_builtin_fun=mk, ...} : mk_builtins => mk thy s cts) bs end + +fun fst_int_ord ((i1, _), (i2, _)) = int_ord (i1, i2) + +structure Mk_Builtins = Generic_Data +( + type T = (int * mk_builtins) list + val empty = [] + val extend = I + fun merge data = Ord_List.merge fst_int_ord data +) + +fun add_mk_builtins mk = Mk_Builtins.map (Ord_List.insert fst_int_ord (serial (), mk)) + +fun get_mk_builtins ctxt = map snd (Mk_Builtins.get (Context.Proof ctxt)) + + +(** basic and additional constructors **) + +fun mk_builtin_typ _ (Sym ("Bool", _)) = SOME @{typ bool} + | mk_builtin_typ _ (Sym ("Int", _)) = SOME @{typ int} + | mk_builtin_typ _ (Sym ("bool", _)) = SOME @{typ bool} (*FIXME: legacy*) + | mk_builtin_typ _ (Sym ("int", _)) = SOME @{typ int} (*FIXME: legacy*) + | mk_builtin_typ ctxt sym = chained_mk_builtin_typ (get_mk_builtins ctxt) sym + +fun mk_builtin_num _ i @{typ int} = SOME (Numeral.mk_cnumber @{ctyp int} i) + | mk_builtin_num ctxt i T = + chained_mk_builtin_num ctxt (get_mk_builtins ctxt) i T + +val mk_true = Thm.cterm_of @{context} (@{const Not} $ @{const False}) +val mk_false = Thm.cterm_of @{context} @{const False} +val mk_not = Thm.apply (Thm.cterm_of @{context} @{const Not}) +val mk_implies = Thm.mk_binop (Thm.cterm_of @{context} @{const HOL.implies}) +val mk_iff = Thm.mk_binop (Thm.cterm_of @{context} @{const HOL.eq (bool)}) +val conj = Thm.cterm_of @{context} @{const HOL.conj} +val disj = Thm.cterm_of @{context} @{const HOL.disj} + +fun mk_nary _ cu [] = cu + | mk_nary ct _ cts = uncurry (fold_rev (Thm.mk_binop ct)) (split_last cts) + +val eq = SMT_Util.mk_const_pat @{theory} @{const_name HOL.eq} SMT_Util.destT1 +fun mk_eq ct cu = Thm.mk_binop (SMT_Util.instT' ct eq) ct cu + +val if_term = + SMT_Util.mk_const_pat @{theory} @{const_name If} (SMT_Util.destT1 o SMT_Util.destT2) +fun mk_if cc ct = Thm.mk_binop (Thm.apply (SMT_Util.instT' ct if_term) cc) ct + +val access = SMT_Util.mk_const_pat @{theory} @{const_name fun_app} SMT_Util.destT1 +fun mk_access array = Thm.apply (SMT_Util.instT' array access) array + +val update = + SMT_Util.mk_const_pat @{theory} @{const_name fun_upd} (Thm.dest_ctyp o SMT_Util.destT1) +fun mk_update array index value = + let val cTs = Thm.dest_ctyp (Thm.ctyp_of_cterm array) + in Thm.apply (Thm.mk_binop (SMT_Util.instTs cTs update) array index) value end + +val mk_uminus = Thm.apply (Thm.cterm_of @{context} @{const uminus (int)}) +val add = Thm.cterm_of @{context} @{const plus (int)} +val int0 = Numeral.mk_cnumber @{ctyp int} 0 +val mk_sub = Thm.mk_binop (Thm.cterm_of @{context} @{const minus (int)}) +val mk_mul = Thm.mk_binop (Thm.cterm_of @{context} @{const times (int)}) +val mk_div = Thm.mk_binop (Thm.cterm_of @{context} @{const z3div}) +val mk_mod = Thm.mk_binop (Thm.cterm_of @{context} @{const z3mod}) +val mk_lt = Thm.mk_binop (Thm.cterm_of @{context} @{const less (int)}) +val mk_le = Thm.mk_binop (Thm.cterm_of @{context} @{const less_eq (int)}) + +fun mk_builtin_fun ctxt sym cts = + (case (sym, cts) of + (Sym ("true", _), []) => SOME mk_true + | (Sym ("false", _), []) => SOME mk_false + | (Sym ("not", _), [ct]) => SOME (mk_not ct) + | (Sym ("and", _), _) => SOME (mk_nary conj mk_true cts) + | (Sym ("or", _), _) => SOME (mk_nary disj mk_false cts) + | (Sym ("implies", _), [ct, cu]) => SOME (mk_implies ct cu) + | (Sym ("iff", _), [ct, cu]) => SOME (mk_iff ct cu) + | (Sym ("~", _), [ct, cu]) => SOME (mk_iff ct cu) + | (Sym ("xor", _), [ct, cu]) => SOME (mk_not (mk_iff ct cu)) + | (Sym ("if", _), [ct1, ct2, ct3]) => SOME (mk_if ct1 ct2 ct3) + | (Sym ("ite", _), [ct1, ct2, ct3]) => SOME (mk_if ct1 ct2 ct3) (* FIXME: remove *) + | (Sym ("=", _), [ct, cu]) => SOME (mk_eq ct cu) + | (Sym ("select", _), [ca, ck]) => SOME (Thm.apply (mk_access ca) ck) + | (Sym ("store", _), [ca, ck, cv]) => SOME (mk_update ca ck cv) + | _ => + (case (sym, try (Thm.typ_of_cterm o hd) cts, cts) of + (Sym ("+", _), SOME @{typ int}, _) => SOME (mk_nary add int0 cts) + | (Sym ("-", _), SOME @{typ int}, [ct]) => SOME (mk_uminus ct) + | (Sym ("-", _), SOME @{typ int}, [ct, cu]) => SOME (mk_sub ct cu) + | (Sym ("*", _), SOME @{typ int}, [ct, cu]) => SOME (mk_mul ct cu) + | (Sym ("div", _), SOME @{typ int}, [ct, cu]) => SOME (mk_div ct cu) + | (Sym ("mod", _), SOME @{typ int}, [ct, cu]) => SOME (mk_mod ct cu) + | (Sym ("<", _), SOME @{typ int}, [ct, cu]) => SOME (mk_lt ct cu) + | (Sym ("<=", _), SOME @{typ int}, [ct, cu]) => SOME (mk_le ct cu) + | (Sym (">", _), SOME @{typ int}, [ct, cu]) => SOME (mk_lt cu ct) + | (Sym (">=", _), SOME @{typ int}, [ct, cu]) => SOME (mk_le cu ct) + | _ => chained_mk_builtin_fun ctxt (get_mk_builtins ctxt) sym cts)) + + +(* abstraction *) + +fun is_builtin_theory_term ctxt t = + if SMT_Builtin.is_builtin_num ctxt t then true + else + (case Term.strip_comb t of + (Const c, ts) => SMT_Builtin.is_builtin_fun ctxt c ts + | _ => false) + +end; diff --git a/src/main/SMT/z3_isar.ML b/src/main/SMT/z3_isar.ML new file mode 100644 index 0000000..5b73931 --- /dev/null +++ b/src/main/SMT/z3_isar.ML @@ -0,0 +1,120 @@ +(* Title: HOL/Tools/SMT/z3_isar.ML + Author: Jasmin Blanchette, TU Muenchen + +Z3 proofs as generic ATP proofs for Isar proof reconstruction. +*) + +signature Z3_ISAR = +sig + val atp_proof_of_z3_proof: Proof.context -> term list -> thm list -> term list -> term -> + (string * term) list -> int list -> int -> (int * string) list -> Z3_Proof.z3_step list -> + (term, string) ATP_Proof.atp_step list +end; + +structure Z3_Isar: Z3_ISAR = +struct + +open ATP_Util +open ATP_Problem +open ATP_Proof +open ATP_Proof_Reconstruct +open SMTLIB_Isar + +val z3_apply_def_rule = Z3_Proof.string_of_rule Z3_Proof.Apply_Def +val z3_hypothesis_rule = Z3_Proof.string_of_rule Z3_Proof.Hypothesis +val z3_intro_def_rule = Z3_Proof.string_of_rule Z3_Proof.Intro_Def +val z3_lemma_rule = Z3_Proof.string_of_rule Z3_Proof.Lemma + +fun inline_z3_defs _ [] = [] + | inline_z3_defs defs ((name, role, t, rule, deps) :: lines) = + if rule = z3_intro_def_rule then + let val def = t |> HOLogic.dest_Trueprop |> HOLogic.dest_eq |> swap in + inline_z3_defs (insert (op =) def defs) + (map (replace_dependencies_in_line (name, [])) lines) + end + else if rule = z3_apply_def_rule then + inline_z3_defs defs (map (replace_dependencies_in_line (name, [])) lines) + else + (name, role, Term.subst_atomic defs t, rule, deps) :: inline_z3_defs defs lines + +fun add_z3_hypotheses [] = I + | add_z3_hypotheses hyps = + HOLogic.dest_Trueprop + #> curry s_imp (Library.foldr1 s_conj (map HOLogic.dest_Trueprop hyps)) + #> HOLogic.mk_Trueprop + +fun inline_z3_hypotheses _ _ [] = [] + | inline_z3_hypotheses hyp_names hyps ((name, role, t, rule, deps) :: lines) = + if rule = z3_hypothesis_rule then + inline_z3_hypotheses (name :: hyp_names) (AList.map_default (op =) (t, []) (cons name) hyps) + lines + else + let val deps' = subtract (op =) hyp_names deps in + if rule = z3_lemma_rule then + (name, role, t, rule, deps') :: inline_z3_hypotheses hyp_names hyps lines + else + let + val add_hyps = filter_out (null o inter (op =) deps o snd) hyps + val t' = add_z3_hypotheses (map fst add_hyps) t + val hyps' = fold (AList.update (op =) o apsnd (insert (op =) name)) add_hyps hyps + in + (name, role, t', rule, deps') :: inline_z3_hypotheses hyp_names hyps' lines + end + end + +fun dest_alls (Const (@{const_name Pure.all}, _) $ Abs (abs as (_, T, _))) = + let val (s', t') = Term.dest_abs abs in + dest_alls t' |>> cons (s', T) + end + | dest_alls t = ([], t) + +val reorder_foralls = + dest_alls + #>> sort_by fst + #-> fold_rev (Logic.all o Free); + +fun atp_proof_of_z3_proof ctxt ll_defs rewrite_rules hyp_ts concl_t fact_helper_ts prem_ids + conjecture_id fact_helper_ids proof = + let + fun steps_of (Z3_Proof.Z3_Step {id, rule, prems, concl, ...}) = + let + val sid = string_of_int id + + val concl' = concl + |> reorder_foralls (* crucial for skolemization steps *) + |> postprocess_step_conclusion ctxt rewrite_rules ll_defs + fun standard_step role = + ((sid, []), role, concl', Z3_Proof.string_of_rule rule, + map (fn id => (string_of_int id, [])) prems) + in + (case rule of + Z3_Proof.Asserted => + let val ss = the_list (AList.lookup (op =) fact_helper_ids id) in + (case distinguish_conjecture_and_hypothesis ss id conjecture_id prem_ids fact_helper_ts + hyp_ts concl_t of + NONE => [] + | SOME (role0, concl00) => + let + val name0 = (sid ^ "a", ss) + val concl0 = unskolemize_names ctxt concl00 + in + (if role0 = Axiom then [] + else [(name0, role0, concl0, Z3_Proof.string_of_rule rule, [])]) @ + [((sid, []), Plain, concl', Z3_Proof.string_of_rule Z3_Proof.Rewrite, + name0 :: normalizing_prems ctxt concl0)] + end) + end + | Z3_Proof.Rewrite => [standard_step Lemma] + | Z3_Proof.Rewrite_Star => [standard_step Lemma] + | Z3_Proof.Skolemize => [standard_step Lemma] + | Z3_Proof.Th_Lemma _ => [standard_step Lemma] + | _ => [standard_step Plain]) + end + in + proof + |> maps steps_of + |> inline_z3_defs [] + |> inline_z3_hypotheses [] [] + end + +end; diff --git a/src/main/SMT/z3_proof.ML b/src/main/SMT/z3_proof.ML new file mode 100644 index 0000000..2c3ab4e --- /dev/null +++ b/src/main/SMT/z3_proof.ML @@ -0,0 +1,303 @@ +(* Title: HOL/Tools/SMT/z3_proof.ML + Author: Sascha Boehme, TU Muenchen + +Z3 proofs: parsing and abstract syntax tree. +*) + +signature Z3_PROOF = +sig + (*proof rules*) + datatype z3_rule = + True_Axiom | Asserted | Goal | Modus_Ponens | Reflexivity | Symmetry | Transitivity | + Transitivity_Star | Monotonicity | Quant_Intro | Distributivity | And_Elim | Not_Or_Elim | + Rewrite | Rewrite_Star | Pull_Quant | Pull_Quant_Star | Push_Quant | Elim_Unused_Vars | + Dest_Eq_Res | Quant_Inst | Hypothesis | Lemma | Unit_Resolution | Iff_True | Iff_False | + Commutativity | Def_Axiom | Intro_Def | Apply_Def | Iff_Oeq | Nnf_Pos | Nnf_Neg | Nnf_Star | + Cnf_Star | Skolemize | Modus_Ponens_Oeq | Th_Lemma of string + + val is_assumption: z3_rule -> bool + val string_of_rule: z3_rule -> string + + (*proofs*) + datatype z3_step = Z3_Step of { + id: int, + rule: z3_rule, + prems: int list, + concl: term, + fixes: string list, + is_fix_step: bool} + + (*proof parser*) + val parse: typ Symtab.table -> term Symtab.table -> string list -> + Proof.context -> z3_step list * Proof.context +end; + +structure Z3_Proof: Z3_PROOF = +struct + +open SMTLIB_Proof + + +(* proof rules *) + +datatype z3_rule = + True_Axiom | Asserted | Goal | Modus_Ponens | Reflexivity | Symmetry | Transitivity | + Transitivity_Star | Monotonicity | Quant_Intro | Distributivity | And_Elim | Not_Or_Elim | + Rewrite | Rewrite_Star | Pull_Quant | Pull_Quant_Star | Push_Quant | Elim_Unused_Vars | + Dest_Eq_Res | Quant_Inst | Hypothesis | Lemma | Unit_Resolution | Iff_True | Iff_False | + Commutativity | Def_Axiom | Intro_Def | Apply_Def | Iff_Oeq | Nnf_Pos | Nnf_Neg | Nnf_Star | + Cnf_Star | Skolemize | Modus_Ponens_Oeq | Th_Lemma of string + (* some proof rules include further information that is currently dropped by the parser *) + +val rule_names = Symtab.make [ + ("true-axiom", True_Axiom), + ("asserted", Asserted), + ("goal", Goal), + ("mp", Modus_Ponens), + ("refl", Reflexivity), + ("symm", Symmetry), + ("trans", Transitivity), + ("trans*", Transitivity_Star), + ("monotonicity", Monotonicity), + ("quant-intro", Quant_Intro), + ("distributivity", Distributivity), + ("and-elim", And_Elim), + ("not-or-elim", Not_Or_Elim), + ("rewrite", Rewrite), + ("rewrite*", Rewrite_Star), + ("pull-quant", Pull_Quant), + ("pull-quant*", Pull_Quant_Star), + ("push-quant", Push_Quant), + ("elim-unused", Elim_Unused_Vars), + ("der", Dest_Eq_Res), + ("quant-inst", Quant_Inst), + ("hypothesis", Hypothesis), + ("lemma", Lemma), + ("unit-resolution", Unit_Resolution), + ("iff-true", Iff_True), + ("iff-false", Iff_False), + ("commutativity", Commutativity), + ("def-axiom", Def_Axiom), + ("intro-def", Intro_Def), + ("apply-def", Apply_Def), + ("iff~", Iff_Oeq), + ("nnf-pos", Nnf_Pos), + ("nnf-neg", Nnf_Neg), + ("nnf*", Nnf_Star), + ("cnf*", Cnf_Star), + ("sk", Skolemize), + ("mp~", Modus_Ponens_Oeq)] + +fun is_assumption Asserted = true + | is_assumption Goal = true + | is_assumption Hypothesis = true + | is_assumption Intro_Def = true + | is_assumption Skolemize = true + | is_assumption _ = false + +fun rule_of_string name = + (case Symtab.lookup rule_names name of + SOME rule => rule + | NONE => error ("unknown Z3 proof rule " ^ quote name)) + +fun string_of_rule (Th_Lemma kind) = "th-lemma" ^ (if kind = "" then "" else " " ^ kind) + | string_of_rule r = + let fun eq_rule (s, r') = if r = r' then SOME s else NONE + in the (Symtab.get_first eq_rule rule_names) end + + +(* proofs *) + +datatype z3_node = Z3_Node of { + id: int, + rule: z3_rule, + prems: z3_node list, + concl: term, + bounds: string list} + +fun mk_node id rule prems concl bounds = + Z3_Node {id = id, rule = rule, prems = prems, concl = concl, bounds = bounds} + +datatype z3_step = Z3_Step of { + id: int, + rule: z3_rule, + prems: int list, + concl: term, + fixes: string list, + is_fix_step: bool} + +fun mk_step id rule prems concl fixes is_fix_step = + Z3_Step {id = id, rule = rule, prems = prems, concl = concl, fixes = fixes, + is_fix_step = is_fix_step} + + +(* proof parser *) + +fun rule_of (SMTLIB.Sym name) = rule_of_string name + | rule_of (SMTLIB.S (SMTLIB.Sym "_" :: SMTLIB.Sym name :: args)) = + (case (name, args) of + ("th-lemma", SMTLIB.Sym kind :: _) => Th_Lemma kind + | _ => rule_of_string name) + | rule_of r = raise SMTLIB_PARSE ("bad Z3 proof rule format", r) + +fun node_of p cx = + (case p of + SMTLIB.Sym name => + (case lookup_binding cx name of + Proof node => (node, cx) + | Tree p' => + cx + |> node_of p' + |-> (fn node => pair node o update_binding (name, Proof node)) + | _ => raise SMTLIB_PARSE ("bad Z3 proof format", p)) + | SMTLIB.S [SMTLIB.Sym "let", SMTLIB.S bindings, p] => + with_bindings (map dest_binding bindings) (node_of p) cx + | SMTLIB.S (name :: parts) => + let + val (ps, p) = split_last parts + val r = rule_of name + in + cx + |> fold_map node_of ps + ||>> `(with_fresh_names (term_of p)) + ||>> next_id + |>> (fn ((prems, (t, ns)), id) => mk_node id r prems t ns) + end + | _ => raise SMTLIB_PARSE ("bad Z3 proof format", p)) + +fun dest_name (SMTLIB.Sym name) = name + | dest_name t = raise SMTLIB_PARSE ("bad name", t) + +fun dest_seq (SMTLIB.S ts) = ts + | dest_seq t = raise SMTLIB_PARSE ("bad Z3 proof format", t) + +fun parse' (SMTLIB.S (SMTLIB.Sym "set-logic" :: _) :: ts) cx = parse' ts cx + | parse' (SMTLIB.S [SMTLIB.Sym "declare-fun", n, tys, ty] :: ts) cx = + let + val name = dest_name n + val Ts = map (type_of cx) (dest_seq tys) + val T = type_of cx ty + in parse' ts (declare_fun name (Ts ---> T) cx) end + | parse' (SMTLIB.S [SMTLIB.Sym "proof", p] :: _) cx = node_of p cx + | parse' ts _ = raise SMTLIB_PARSE ("bad Z3 proof declarations", SMTLIB.S ts) + +fun parse_proof typs funs lines ctxt = + let + val ts = dest_seq (SMTLIB.parse lines) + val (node, cx) = parse' ts (empty_context ctxt typs funs) + in (node, ctxt_of cx) end + handle SMTLIB.PARSE (l, msg) => error ("parsing error at line " ^ string_of_int l ^ ": " ^ msg) + | SMTLIB_PARSE (msg, t) => error (msg ^ ": " ^ SMTLIB.str_of t) + + +(* handling of bound variables *) + +fun subst_of tyenv = + let fun add (ix, (S, T)) = cons (TVar (ix, S), T) + in Vartab.fold add tyenv [] end + +fun substTs_same subst = + let val applyT = Same.function (AList.lookup (op =) subst) + in Term_Subst.map_atypsT_same applyT end + +fun subst_types ctxt env bounds t = + let + val match = Sign.typ_match (Proof_Context.theory_of ctxt) + + val t' = singleton (Variable.polymorphic ctxt) t + val patTs = map snd (Term.strip_qnt_vars @{const_name Pure.all} t') + val objTs = map (the o Symtab.lookup env) bounds + val subst = subst_of (fold match (patTs ~~ objTs) Vartab.empty) + in Same.commit (Term_Subst.map_types_same (substTs_same subst)) t' end + +fun eq_quant (@{const_name HOL.All}, _) (@{const_name HOL.All}, _) = true + | eq_quant (@{const_name HOL.Ex}, _) (@{const_name HOL.Ex}, _) = true + | eq_quant _ _ = false + +fun opp_quant (@{const_name HOL.All}, _) (@{const_name HOL.Ex}, _) = true + | opp_quant (@{const_name HOL.Ex}, _) (@{const_name HOL.All}, _) = true + | opp_quant _ _ = false + +fun with_quant pred i (Const q1 $ Abs (_, T1, t1), Const q2 $ Abs (_, T2, t2)) = + if pred q1 q2 andalso T1 = T2 then + let val t = Var (("", i), T1) + in SOME (apply2 Term.subst_bound ((t, t1), (t, t2))) end + else NONE + | with_quant _ _ _ = NONE + +fun dest_quant_pair i (@{term HOL.Not} $ t1, t2) = + Option.map (apfst HOLogic.mk_not) (with_quant opp_quant i (t1, t2)) + | dest_quant_pair i (t1, t2) = with_quant eq_quant i (t1, t2) + +fun dest_quant i t = + (case dest_quant_pair i (HOLogic.dest_eq (HOLogic.dest_Trueprop t)) of + SOME (t1, t2) => HOLogic.mk_Trueprop (HOLogic.mk_eq (t1, t2)) + | NONE => raise TERM ("lift_quant", [t])) + +fun match_types ctxt pat obj = + (Vartab.empty, Vartab.empty) + |> Pattern.first_order_match (Proof_Context.theory_of ctxt) (pat, obj) + +fun strip_match ctxt pat i obj = + (case try (match_types ctxt pat) obj of + SOME (tyenv, _) => subst_of tyenv + | NONE => strip_match ctxt pat (i + 1) (dest_quant i obj)) + +fun dest_all i (Const (@{const_name Pure.all}, _) $ (a as Abs (_, T, _))) = + dest_all (i + 1) (Term.betapply (a, Var (("", i), T))) + | dest_all i t = (i, t) + +fun dest_alls t = dest_all (Term.maxidx_of_term t + 1) t + +fun match_rule ctxt env (Z3_Node {bounds = bs', concl = t', ...}) bs t = + let + val t'' = singleton (Variable.polymorphic ctxt) t' + val (i, obj) = dest_alls (subst_types ctxt env bs t) + in + (case try (strip_match ctxt (snd (dest_alls t'')) i) obj of + NONE => NONE + | SOME subst => + let + val applyT = Same.commit (substTs_same subst) + val patTs = map snd (Term.strip_qnt_vars @{const_name Pure.all} t'') + in SOME (Symtab.make (bs' ~~ map applyT patTs)) end) + end + + +(* linearizing proofs and resolving types of bound variables *) + +fun has_step (tab, _) = Inttab.defined tab + +fun add_step id rule bounds concl is_fix_step ids (tab, sts) = + let val step = mk_step id rule ids concl bounds is_fix_step + in (id, (Inttab.update (id, ()) tab, step :: sts)) end + +fun is_fix_rule rule prems = + member (op =) [Quant_Intro, Nnf_Pos, Nnf_Neg] rule andalso length prems = 1 + +fun lin_proof ctxt env (Z3_Node {id, rule, prems, concl, bounds}) steps = + if has_step steps id then (id, steps) + else + let + val t = subst_types ctxt env bounds concl + val add = add_step id rule bounds t + fun rec_apply e b = fold_map (lin_proof ctxt e) prems #-> add b + in + if is_fix_rule rule prems then + (case match_rule ctxt env (hd prems) bounds t of + NONE => rec_apply env false steps + | SOME env' => rec_apply env' true steps) + else rec_apply env false steps + end + +fun linearize ctxt node = + rev (snd (snd (lin_proof ctxt Symtab.empty node (Inttab.empty, [])))) + + +(* overall proof parser *) + +fun parse typs funs lines ctxt = + let val (node, ctxt') = parse_proof typs funs lines ctxt + in (linearize ctxt' node, ctxt') end + +end; diff --git a/src/main/SMT/z3_real.ML b/src/main/SMT/z3_real.ML new file mode 100644 index 0000000..15ef469 --- /dev/null +++ b/src/main/SMT/z3_real.ML @@ -0,0 +1,32 @@ +(* Title: HOL/Tools/SMT/z3_real.ML + Author: Sascha Boehme, TU Muenchen + +Z3 setup for reals. +*) + +structure Z3_Real: sig end = +struct + +fun real_type_parser (SMTLIB.Sym "Real", []) = SOME @{typ Real.real} + | real_type_parser _ = NONE + +fun real_term_parser (SMTLIB.Dec (i, 0), []) = SOME (HOLogic.mk_number @{typ Real.real} i) + | real_term_parser (SMTLIB.Sym "/", [t1, t2]) = + SOME (@{term "Rings.divide :: real => _"} $ t1 $ t2) + | real_term_parser (SMTLIB.Sym "to_real", [t]) = SOME (@{term "Int.of_int :: int => _"} $ t) + | real_term_parser _ = NONE + +fun abstract abs t = + (case t of + (c as @{term "Rings.divide :: real => _"}) $ t1 $ t2 => + abs t1 ##>> abs t2 #>> (fn (u1, u2) => SOME (c $ u1 $ u2)) + | (c as @{term "Int.of_int :: int => _"}) $ t => + abs t #>> (fn u => SOME (c $ u)) + | _ => pair NONE) + +val _ = Theory.setup (Context.theory_map ( + SMTLIB_Proof.add_type_parser real_type_parser #> + SMTLIB_Proof.add_term_parser real_term_parser #> + Z3_Replay_Methods.add_arith_abstracter abstract)) + +end; diff --git a/src/main/SMT/z3_replay.ML b/src/main/SMT/z3_replay.ML new file mode 100644 index 0000000..b9ecce8 --- /dev/null +++ b/src/main/SMT/z3_replay.ML @@ -0,0 +1,262 @@ +(* Title: HOL/Tools/SMT/z3_replay.ML + Author: Sascha Boehme, TU Muenchen + Author: Jasmin Blanchette, TU Muenchen + +Z3 proof parsing and replay. +*) + +signature Z3_REPLAY = +sig + val parse_proof: Proof.context -> SMT_Translate.replay_data -> + ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> + SMT_Solver.parsed_proof + val replay: Proof.context -> SMT_Translate.replay_data -> string list -> thm +end; + +structure Z3_Replay: Z3_REPLAY = +struct + +fun params_of t = Term.strip_qnt_vars @{const_name Pure.all} t + +fun varify ctxt thm = + let + val maxidx = Thm.maxidx_of thm + 1 + val vs = params_of (Thm.prop_of thm) + val vars = map_index (fn (i, (n, T)) => Var ((n, i + maxidx), T)) vs + in Drule.forall_elim_list (map (Thm.cterm_of ctxt) vars) thm end + +fun add_paramTs names t = + fold2 (fn n => fn (_, T) => AList.update (op =) (n, T)) names (params_of t) + +fun new_fixes ctxt nTs = + let + val (ns, ctxt') = Variable.variant_fixes (replicate (length nTs) "") ctxt + fun mk (n, T) n' = (n, Thm.cterm_of ctxt' (Free (n', T))) + in (ctxt', Symtab.make (map2 mk nTs ns)) end + +fun forall_elim_term ct (Const (@{const_name Pure.all}, _) $ (a as Abs _)) = + Term.betapply (a, Thm.term_of ct) + | forall_elim_term _ qt = raise TERM ("forall_elim'", [qt]) + +fun apply_fixes elim env = fold (elim o the o Symtab.lookup env) + +val apply_fixes_prem = uncurry o apply_fixes Thm.forall_elim +val apply_fixes_concl = apply_fixes forall_elim_term + +fun export_fixes env names = Drule.forall_intr_list (map (the o Symtab.lookup env) names) + +fun under_fixes f ctxt (prems, nthms) names concl = + let + val thms1 = map (varify ctxt) prems + val (ctxt', env) = + add_paramTs names concl [] + |> fold (uncurry add_paramTs o apsnd Thm.prop_of) nthms + |> new_fixes ctxt + val thms2 = map (apply_fixes_prem env) nthms + val t = apply_fixes_concl env names concl + in export_fixes env names (f ctxt' (thms1 @ thms2) t) end + +fun replay_thm ctxt assumed nthms (Z3_Proof.Z3_Step {id, rule, concl, fixes, is_fix_step, ...}) = + if Z3_Proof.is_assumption rule then + (case Inttab.lookup assumed id of + SOME (_, thm) => thm + | NONE => Thm.assume (Thm.cterm_of ctxt concl)) + else + under_fixes (Z3_Replay_Methods.method_for rule) ctxt + (if is_fix_step then (map snd nthms, []) else ([], nthms)) fixes concl + +fun replay_step ctxt assumed (step as Z3_Proof.Z3_Step {id, rule, prems, fixes, ...}) state = + let + val (proofs, stats) = state + val nthms = map (the o Inttab.lookup proofs) prems + val replay = Timing.timing (replay_thm ctxt assumed nthms) + val ({elapsed, ...}, thm) = + SMT_Config.with_time_limit ctxt SMT_Config.reconstruction_step_timeout replay step + handle Timeout.TIMEOUT _ => raise SMT_Failure.SMT SMT_Failure.Time_Out + val stats' = Symtab.cons_list (Z3_Proof.string_of_rule rule, Time.toMilliseconds elapsed) stats + in (Inttab.update (id, (fixes, thm)) proofs, stats') end + +local + val remove_trigger = mk_meta_eq @{thm trigger_def} + val remove_fun_app = mk_meta_eq @{thm fun_app_def} + + fun rewrite_conv _ [] = Conv.all_conv + | rewrite_conv ctxt eqs = Simplifier.full_rewrite (empty_simpset ctxt addsimps eqs) + + val rewrite_true_rule = @{lemma "True == ~ False" by simp} + val prep_rules = [@{thm Let_def}, remove_trigger, remove_fun_app, rewrite_true_rule] + + fun rewrite _ [] = I + | rewrite ctxt eqs = Conv.fconv_rule (rewrite_conv ctxt eqs) + + fun lookup_assm assms_net ct = + Z3_Replay_Util.net_instances assms_net ct + |> map (fn ithm as (_, thm) => (ithm, Thm.cprop_of thm aconvc ct)) +in + +fun add_asserted outer_ctxt rewrite_rules assms steps ctxt = + let + val eqs = map (rewrite ctxt [rewrite_true_rule]) rewrite_rules + val eqs' = union Thm.eq_thm eqs prep_rules + + val assms_net = + assms + |> map (apsnd (rewrite ctxt eqs')) + |> map (apsnd (Conv.fconv_rule Thm.eta_conversion)) + |> Z3_Replay_Util.thm_net_of snd + + fun revert_conv ctxt = rewrite_conv ctxt eqs' then_conv Thm.eta_conversion + + fun assume thm ctxt = + let + val ct = Thm.cprem_of thm 1 + val (thm', ctxt') = yield_singleton Assumption.add_assumes ct ctxt + in (thm' RS thm, ctxt') end + + fun add1 id fixes thm1 ((i, th), exact) ((iidths, thms), (ctxt, ptab)) = + let + val (thm, ctxt') = if exact then (Thm.implies_elim thm1 th, ctxt) else assume thm1 ctxt + val thms' = if exact then thms else th :: thms + in (((i, (id, th)) :: iidths, thms'), (ctxt', Inttab.update (id, (fixes, thm)) ptab)) end + + fun add (Z3_Proof.Z3_Step {id, rule, concl, fixes, ...}) + (cx as ((iidths, thms), (ctxt, ptab))) = + if Z3_Proof.is_assumption rule andalso rule <> Z3_Proof.Hypothesis then + let + val ct = Thm.cterm_of ctxt concl + val thm1 = Thm.trivial ct |> Conv.fconv_rule (Conv.arg1_conv (revert_conv outer_ctxt)) + val thm2 = singleton (Variable.export ctxt outer_ctxt) thm1 + in + (case lookup_assm assms_net (Thm.cprem_of thm2 1) of + [] => + let val (thm, ctxt') = assume thm1 ctxt + in ((iidths, thms), (ctxt', Inttab.update (id, (fixes, thm)) ptab)) end + | ithms => fold (add1 id fixes thm1) ithms cx) + end + else + cx + in fold add steps (([], []), (ctxt, Inttab.empty)) end + +end + +(* |- (EX x. P x) = P c |- ~ (ALL x. P x) = ~ P c *) +local + val sk_rules = @{lemma + "c = (SOME x. P x) ==> (EX x. P x) = P c" + "c = (SOME x. ~ P x) ==> (~ (ALL x. P x)) = (~ P c)" + by (metis someI_ex)+} +in + +fun discharge_sk_tac ctxt i st = + (resolve_tac ctxt @{thms trans} i + THEN resolve_tac ctxt sk_rules i + THEN (resolve_tac ctxt @{thms refl} ORELSE' discharge_sk_tac ctxt) (i+1) + THEN resolve_tac ctxt @{thms refl} i) st + +end + +val true_thm = @{lemma "~False" by simp} +fun make_discharge_rules rules = rules @ [@{thm allI}, @{thm refl}, @{thm reflexive}, true_thm] + +val intro_def_rules = @{lemma + "(~ P | P) & (P | ~ P)" + "(P | ~ P) & (~ P | P)" + by fast+} + +fun discharge_assms_tac ctxt rules = + REPEAT + (HEADGOAL (resolve_tac ctxt (intro_def_rules @ rules) ORELSE' + SOLVED' (discharge_sk_tac ctxt))) + +fun discharge_assms ctxt rules thm = + (if Thm.nprems_of thm = 0 then + thm + else + (case Seq.pull (discharge_assms_tac ctxt rules thm) of + SOME (thm', _) => thm' + | NONE => raise THM ("failed to discharge premise", 1, [thm]))) + |> Goal.norm_result ctxt + +fun discharge rules outer_ctxt inner_ctxt = + singleton (Proof_Context.export inner_ctxt outer_ctxt) + #> discharge_assms outer_ctxt (make_discharge_rules rules) + +fun parse_proof outer_ctxt + ({context = ctxt, typs, terms, ll_defs, rewrite_rules, assms} : SMT_Translate.replay_data) + xfacts prems concl output = + let + val (steps, ctxt2) = Z3_Proof.parse typs terms output ctxt + val ((iidths, _), _) = add_asserted outer_ctxt rewrite_rules assms steps ctxt2 + + fun id_of_index i = the_default ~1 (Option.map fst (AList.lookup (op =) iidths i)) + + val conjecture_i = 0 + val prems_i = 1 + val facts_i = prems_i + length prems + + val conjecture_id = id_of_index conjecture_i + val prem_ids = map id_of_index (prems_i upto facts_i - 1) + val fact_ids' = + map_filter (fn (i, (id, _)) => try (apsnd (nth xfacts)) (id, i - facts_i)) iidths + val helper_ids' = map_filter (try (fn (~1, idth) => idth)) iidths + + val fact_helper_ts = + map (fn (_, th) => (ATP_Util.short_thm_name ctxt th, Thm.prop_of th)) helper_ids' @ + map (fn (_, ((s, _), th)) => (s, Thm.prop_of th)) fact_ids' + val fact_helper_ids' = + map (apsnd (ATP_Util.short_thm_name ctxt)) helper_ids' @ map (apsnd (fst o fst)) fact_ids' + in + {outcome = NONE, fact_ids = SOME fact_ids', + atp_proof = fn () => Z3_Isar.atp_proof_of_z3_proof ctxt ll_defs rewrite_rules prems concl + fact_helper_ts prem_ids conjecture_id fact_helper_ids' steps} + end + +fun intermediate_statistics ctxt start total = + SMT_Config.statistics_msg ctxt (fn current => + "Reconstructed " ^ string_of_int current ^ " of " ^ string_of_int total ^ " steps in " ^ + string_of_int (Time.toMilliseconds (#elapsed (Timing.result start))) ^ " ms") + +fun pretty_statistics total stats = + let + fun mean_of is = + let + val len = length is + val mid = len div 2 + in if len mod 2 = 0 then (nth is (mid - 1) + nth is mid) div 2 else nth is mid end + fun pretty_item name p = Pretty.item (Pretty.separate ":" [Pretty.str name, p]) + fun pretty (name, milliseconds) = pretty_item name (Pretty.block (Pretty.separate "," [ + Pretty.str (string_of_int (length milliseconds) ^ " occurrences") , + Pretty.str (string_of_int (mean_of milliseconds) ^ " ms mean time"), + Pretty.str (string_of_int (fold Integer.max milliseconds 0) ^ " ms maximum time"), + Pretty.str (string_of_int (fold Integer.add milliseconds 0) ^ " ms total time")])) + in + Pretty.big_list "Z3 proof reconstruction statistics:" ( + pretty_item "total time" (Pretty.str (string_of_int total ^ " ms")) :: + map pretty (Symtab.dest stats)) + end + +fun replay outer_ctxt + ({context = ctxt, typs, terms, rewrite_rules, assms, ...} : SMT_Translate.replay_data) output = + let + val (steps, ctxt2) = Z3_Proof.parse typs terms output ctxt + val ((_, rules), (ctxt3, assumed)) = add_asserted outer_ctxt rewrite_rules assms steps ctxt2 + val ctxt4 = + ctxt3 + |> put_simpset (Z3_Replay_Util.make_simpset ctxt3 []) + |> Config.put SAT.solver (Config.get ctxt3 SMT_Config.sat_solver) + val len = length steps + val start = Timing.start () + val print_runtime_statistics = intermediate_statistics ctxt4 start len + fun blockwise f (i, x) y = + (if i > 0 andalso i mod 100 = 0 then print_runtime_statistics i else (); f x y) + val (proofs, stats) = + fold_index (blockwise (replay_step ctxt4 assumed)) steps (assumed, Symtab.empty) + val _ = print_runtime_statistics len + val total = Time.toMilliseconds (#elapsed (Timing.result start)) + val (_, Z3_Proof.Z3_Step {id, ...}) = split_last steps + val _ = SMT_Config.statistics_msg ctxt4 (Pretty.string_of o pretty_statistics total) stats + in + Inttab.lookup proofs id |> the |> snd |> discharge rules outer_ctxt ctxt4 + end + +end; diff --git a/src/main/SMT/z3_replay_methods.ML b/src/main/SMT/z3_replay_methods.ML new file mode 100644 index 0000000..e4fbb73 --- /dev/null +++ b/src/main/SMT/z3_replay_methods.ML @@ -0,0 +1,685 @@ +(* Title: HOL/Tools/SMT/z3_replay_methods.ML + Author: Sascha Boehme, TU Muenchen + Author: Jasmin Blanchette, TU Muenchen + +Proof methods for replaying Z3 proofs. +*) + +signature Z3_REPLAY_METHODS = +sig + (*abstraction*) + type abs_context = int * term Termtab.table + type 'a abstracter = term -> abs_context -> 'a * abs_context + val add_arith_abstracter: (term abstracter -> term option abstracter) -> + Context.generic -> Context.generic + + (*theory lemma methods*) + type th_lemma_method = Proof.context -> thm list -> term -> thm + val add_th_lemma_method: string * th_lemma_method -> Context.generic -> + Context.generic + + (*methods for Z3 proof rules*) + type z3_method = Proof.context -> thm list -> term -> thm + val true_axiom: z3_method + val mp: z3_method + val refl: z3_method + val symm: z3_method + val trans: z3_method + val cong: z3_method + val quant_intro: z3_method + val distrib: z3_method + val and_elim: z3_method + val not_or_elim: z3_method + val rewrite: z3_method + val rewrite_star: z3_method + val pull_quant: z3_method + val push_quant: z3_method + val elim_unused: z3_method + val dest_eq_res: z3_method + val quant_inst: z3_method + val lemma: z3_method + val unit_res: z3_method + val iff_true: z3_method + val iff_false: z3_method + val comm: z3_method + val def_axiom: z3_method + val apply_def: z3_method + val iff_oeq: z3_method + val nnf_pos: z3_method + val nnf_neg: z3_method + val mp_oeq: z3_method + val th_lemma: string -> z3_method + val method_for: Z3_Proof.z3_rule -> z3_method +end; + +structure Z3_Replay_Methods: Z3_REPLAY_METHODS = +struct + +type z3_method = Proof.context -> thm list -> term -> thm + + +(* utility functions *) + +fun trace ctxt f = SMT_Config.trace_msg ctxt f () + +fun pretty_thm ctxt thm = Syntax.pretty_term ctxt (Thm.concl_of thm) + +fun pretty_goal ctxt msg rule thms t = + let + val full_msg = msg ^ ": " ^ quote (Z3_Proof.string_of_rule rule) + val assms = + if null thms then [] + else [Pretty.big_list "assumptions:" (map (pretty_thm ctxt) thms)] + val concl = Pretty.big_list "proposition:" [Syntax.pretty_term ctxt t] + in Pretty.big_list full_msg (assms @ [concl]) end + +fun replay_error ctxt msg rule thms t = error (Pretty.string_of (pretty_goal ctxt msg rule thms t)) + +fun replay_rule_error ctxt = replay_error ctxt "Failed to replay Z3 proof step" + +fun trace_goal ctxt rule thms t = + trace ctxt (fn () => Pretty.string_of (pretty_goal ctxt "Goal" rule thms t)) + +fun as_prop (t as Const (@{const_name Trueprop}, _) $ _) = t + | as_prop t = HOLogic.mk_Trueprop t + +fun dest_prop (Const (@{const_name Trueprop}, _) $ t) = t + | dest_prop t = t + +fun dest_thm thm = dest_prop (Thm.concl_of thm) + +fun certify_prop ctxt t = Thm.cterm_of ctxt (as_prop t) + +fun try_provers ctxt rule [] thms t = replay_rule_error ctxt rule thms t + | try_provers ctxt rule ((name, prover) :: named_provers) thms t = + (case (trace ctxt (K ("Trying prover " ^ quote name)); try prover t) of + SOME thm => thm + | NONE => try_provers ctxt rule named_provers thms t) + +fun match ctxt pat t = + (Vartab.empty, Vartab.empty) + |> Pattern.first_order_match (Proof_Context.theory_of ctxt) (pat, t) + +fun gen_certify_inst sel cert ctxt thm t = + let + val inst = match ctxt (dest_thm thm) (dest_prop t) + fun cert_inst (ix, (a, b)) = ((ix, a), cert b) + in Vartab.fold (cons o cert_inst) (sel inst) [] end + +fun match_instantiateT ctxt t thm = + if Term.exists_type (Term.exists_subtype Term.is_TVar) (dest_thm thm) then + Thm.instantiate (gen_certify_inst fst (Thm.ctyp_of ctxt) ctxt thm t, []) thm + else thm + +fun match_instantiate ctxt t thm = + let val thm' = match_instantiateT ctxt t thm in + Thm.instantiate ([], gen_certify_inst snd (Thm.cterm_of ctxt) ctxt thm' t) thm' + end + +fun apply_rule ctxt t = + (case Z3_Replay_Rules.apply ctxt (certify_prop ctxt t) of + SOME thm => thm + | NONE => raise Fail "apply_rule") + +fun discharge _ [] thm = thm + | discharge i (rule :: rules) thm = discharge (i + Thm.nprems_of rule) rules (rule RSN (i, thm)) + +fun by_tac ctxt thms ns ts t tac = + Goal.prove ctxt [] (map as_prop ts) (as_prop t) + (fn {context, prems} => HEADGOAL (tac context prems)) + |> Drule.generalize ([], ns) + |> discharge 1 thms + +fun prove ctxt t tac = by_tac ctxt [] [] [] t (K o tac) + +fun prop_tac ctxt prems = + Method.insert_tac ctxt prems + THEN' SUBGOAL (fn (prop, i) => + if Term.size_of_term prop > 100 then SAT.satx_tac ctxt i + else (Classical.fast_tac ctxt ORELSE' Clasimp.force_tac ctxt) i) + +fun quant_tac ctxt = Blast.blast_tac ctxt + + +(* plug-ins *) + +type abs_context = int * term Termtab.table + +type 'a abstracter = term -> abs_context -> 'a * abs_context + +type th_lemma_method = Proof.context -> thm list -> term -> thm + +fun id_ord ((id1, _), (id2, _)) = int_ord (id1, id2) + +structure Plugins = Generic_Data +( + type T = + (int * (term abstracter -> term option abstracter)) list * + th_lemma_method Symtab.table + val empty = ([], Symtab.empty) + val extend = I + fun merge ((abss1, ths1), (abss2, ths2)) = ( + Ord_List.merge id_ord (abss1, abss2), + Symtab.merge (K true) (ths1, ths2)) +) + +fun add_arith_abstracter abs = Plugins.map (apfst (Ord_List.insert id_ord (serial (), abs))) +fun get_arith_abstracters ctxt = map snd (fst (Plugins.get (Context.Proof ctxt))) + +fun add_th_lemma_method method = Plugins.map (apsnd (Symtab.update_new method)) +fun get_th_lemma_method ctxt = snd (Plugins.get (Context.Proof ctxt)) + + +(* abstraction *) + +fun prove_abstract ctxt thms t tac f = + let + val ((prems, concl), (_, ts)) = f (1, Termtab.empty) + val ns = Termtab.fold (fn (_, v) => cons (fst (Term.dest_Free v))) ts [] + in + by_tac ctxt [] ns prems concl tac + |> match_instantiate ctxt t + |> discharge 1 thms + end + +fun prove_abstract' ctxt t tac f = + prove_abstract ctxt [] t tac (f #>> pair []) + +fun lookup_term (_, terms) t = Termtab.lookup terms t + +fun abstract_sub t f cx = + (case lookup_term cx t of + SOME v => (v, cx) + | NONE => f cx) + +fun mk_fresh_free t (i, terms) = + let val v = Free ("t" ^ string_of_int i, fastype_of t) + in (v, (i + 1, Termtab.update (t, v) terms)) end + +fun apply_abstracters _ [] _ cx = (NONE, cx) + | apply_abstracters abs (abstracter :: abstracters) t cx = + (case abstracter abs t cx of + (NONE, _) => apply_abstracters abs abstracters t cx + | x as (SOME _, _) => x) + +fun abstract_term (t as _ $ _) = abstract_sub t (mk_fresh_free t) + | abstract_term (t as Abs _) = abstract_sub t (mk_fresh_free t) + | abstract_term t = pair t + +fun abstract_bin abs f t t1 t2 = abstract_sub t (abs t1 ##>> abs t2 #>> f) + +fun abstract_ter abs f t t1 t2 t3 = + abstract_sub t (abs t1 ##>> abs t2 ##>> abs t3 #>> (Scan.triple1 #> f)) + +fun abstract_lit (@{const HOL.Not} $ t) = abstract_term t #>> HOLogic.mk_not + | abstract_lit t = abstract_term t + +fun abstract_not abs (t as @{const HOL.Not} $ t1) = + abstract_sub t (abs t1 #>> HOLogic.mk_not) + | abstract_not _ t = abstract_lit t + +fun abstract_conj (t as @{const HOL.conj} $ t1 $ t2) = + abstract_bin abstract_conj HOLogic.mk_conj t t1 t2 + | abstract_conj t = abstract_lit t + +fun abstract_disj (t as @{const HOL.disj} $ t1 $ t2) = + abstract_bin abstract_disj HOLogic.mk_disj t t1 t2 + | abstract_disj t = abstract_lit t + +fun abstract_prop (t as (c as @{const If (bool)}) $ t1 $ t2 $ t3) = + abstract_ter abstract_prop (fn (t1, t2, t3) => c $ t1 $ t2 $ t3) t t1 t2 t3 + | abstract_prop (t as @{const HOL.disj} $ t1 $ t2) = + abstract_bin abstract_prop HOLogic.mk_disj t t1 t2 + | abstract_prop (t as @{const HOL.conj} $ t1 $ t2) = + abstract_bin abstract_prop HOLogic.mk_conj t t1 t2 + | abstract_prop (t as @{const HOL.implies} $ t1 $ t2) = + abstract_bin abstract_prop HOLogic.mk_imp t t1 t2 + | abstract_prop (t as @{term "HOL.eq :: bool => _"} $ t1 $ t2) = + abstract_bin abstract_prop HOLogic.mk_eq t t1 t2 + | abstract_prop t = abstract_not abstract_prop t + +fun abstract_arith ctxt u = + let + fun abs (t as (c as Const _) $ Abs (s, T, t')) = + abstract_sub t (abs t' #>> (fn u' => c $ Abs (s, T, u'))) + | abs (t as (c as Const (@{const_name If}, _)) $ t1 $ t2 $ t3) = + abstract_ter abs (fn (t1, t2, t3) => c $ t1 $ t2 $ t3) t t1 t2 t3 + | abs (t as @{const HOL.Not} $ t1) = abstract_sub t (abs t1 #>> HOLogic.mk_not) + | abs (t as @{const HOL.disj} $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> HOLogic.mk_disj) + | abs (t as (c as Const (@{const_name uminus_class.uminus}, _)) $ t1) = + abstract_sub t (abs t1 #>> (fn u => c $ u)) + | abs (t as (c as Const (@{const_name plus_class.plus}, _)) $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) + | abs (t as (c as Const (@{const_name minus_class.minus}, _)) $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) + | abs (t as (c as Const (@{const_name times_class.times}, _)) $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) + | abs (t as (c as Const (@{const_name z3div}, _)) $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) + | abs (t as (c as Const (@{const_name z3mod}, _)) $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) + | abs (t as (c as Const (@{const_name HOL.eq}, _)) $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) + | abs (t as (c as Const (@{const_name ord_class.less}, _)) $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) + | abs (t as (c as Const (@{const_name ord_class.less_eq}, _)) $ t1 $ t2) = + abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) + | abs t = abstract_sub t (fn cx => + if can HOLogic.dest_number t then (t, cx) + else + (case apply_abstracters abs (get_arith_abstracters ctxt) t cx of + (SOME u, cx') => (u, cx') + | (NONE, _) => abstract_term t cx)) + in abs u end + + +(* truth axiom *) + +fun true_axiom _ _ _ = @{thm TrueI} + + +(* modus ponens *) + +fun mp _ [p, p_eq_q] _ = discharge 1 [p_eq_q, p] iffD1 + | mp ctxt thms t = replay_rule_error ctxt Z3_Proof.Modus_Ponens thms t + +val mp_oeq = mp + + +(* reflexivity *) + +fun refl ctxt _ t = match_instantiate ctxt t @{thm refl} + + +(* symmetry *) + +fun symm _ [thm] _ = thm RS @{thm sym} + | symm ctxt thms t = replay_rule_error ctxt Z3_Proof.Reflexivity thms t + + +(* transitivity *) + +fun trans _ [thm1, thm2] _ = thm1 RSN (1, thm2 RSN (2, @{thm trans})) + | trans ctxt thms t = replay_rule_error ctxt Z3_Proof.Transitivity thms t + + +(* congruence *) + +fun ctac ctxt prems i st = st |> ( + resolve_tac ctxt (@{thm refl} :: prems) i + ORELSE (cong_tac ctxt i THEN ctac ctxt prems (i + 1) THEN ctac ctxt prems i)) + +fun cong_basic ctxt thms t = + let val st = Thm.trivial (certify_prop ctxt t) + in + (case Seq.pull (ctac ctxt thms 1 st) of + SOME (thm, _) => thm + | NONE => raise THM ("cong", 0, thms @ [st])) + end + +val cong_dest_rules = @{lemma + "(~ P | Q) & (P | ~ Q) ==> P = Q" + "(P | ~ Q) & (~ P | Q) ==> P = Q" + by fast+} + +fun cong_full ctxt thms t = prove ctxt t (fn ctxt' => + Method.insert_tac ctxt thms + THEN' (Classical.fast_tac ctxt' + ORELSE' dresolve_tac ctxt cong_dest_rules + THEN' Classical.fast_tac ctxt')) + +fun cong ctxt thms = try_provers ctxt Z3_Proof.Monotonicity [ + ("basic", cong_basic ctxt thms), + ("full", cong_full ctxt thms)] thms + + +(* quantifier introduction *) + +val quant_intro_rules = @{lemma + "(!!x. P x = Q x) ==> (ALL x. P x) = (ALL x. Q x)" + "(!!x. P x = Q x) ==> (EX x. P x) = (EX x. Q x)" + "(!!x. (~ P x) = Q x) ==> (~ (EX x. P x)) = (ALL x. Q x)" + "(!!x. (~ P x) = Q x) ==> (~ (ALL x. P x)) = (EX x. Q x)" + by fast+} + +fun quant_intro ctxt [thm] t = + prove ctxt t (K (REPEAT_ALL_NEW (resolve_tac ctxt (thm :: quant_intro_rules)))) + | quant_intro ctxt thms t = replay_rule_error ctxt Z3_Proof.Quant_Intro thms t + + +(* distributivity of conjunctions and disjunctions *) + +(* TODO: there are no tests with this proof rule *) +fun distrib ctxt _ t = + prove_abstract' ctxt t prop_tac (abstract_prop (dest_prop t)) + + +(* elimination of conjunctions *) + +fun and_elim ctxt [thm] t = + prove_abstract ctxt [thm] t prop_tac ( + abstract_lit (dest_prop t) ##>> + abstract_conj (dest_thm thm) #>> + apfst single o swap) + | and_elim ctxt thms t = replay_rule_error ctxt Z3_Proof.And_Elim thms t + + +(* elimination of negated disjunctions *) + +fun not_or_elim ctxt [thm] t = + prove_abstract ctxt [thm] t prop_tac ( + abstract_lit (dest_prop t) ##>> + abstract_not abstract_disj (dest_thm thm) #>> + apfst single o swap) + | not_or_elim ctxt thms t = + replay_rule_error ctxt Z3_Proof.Not_Or_Elim thms t + + +(* rewriting *) + +local + +fun dest_all (Const (@{const_name HOL.All}, _) $ Abs (_, T, t)) nctxt = + let + val (n, nctxt') = Name.variant "" nctxt + val f = Free (n, T) + val t' = Term.subst_bound (f, t) + in dest_all t' nctxt' |>> cons f end + | dest_all t _ = ([], t) + +fun dest_alls t = + let + val nctxt = Name.make_context (Term.add_free_names t []) + val (lhs, rhs) = HOLogic.dest_eq (dest_prop t) + val (ls, lhs') = dest_all lhs nctxt + val (rs, rhs') = dest_all rhs nctxt + in + if eq_list (op aconv) (ls, rs) then SOME (ls, (HOLogic.mk_eq (lhs', rhs'))) + else NONE + end + +fun forall_intr ctxt t thm = + let val ct = Thm.cterm_of ctxt t + in Thm.forall_intr ct thm COMP_INCR @{thm iff_allI} end + +in + +fun focus_eq f ctxt t = + (case dest_alls t of + NONE => f ctxt t + | SOME (vs, t') => fold (forall_intr ctxt) vs (f ctxt t')) + +end + +fun abstract_eq f (Const (@{const_name HOL.eq}, _) $ t1 $ t2) = + f t1 ##>> f t2 #>> HOLogic.mk_eq + | abstract_eq _ t = abstract_term t + +fun prove_prop_rewrite ctxt t = + prove_abstract' ctxt t prop_tac ( + abstract_eq abstract_prop (dest_prop t)) + +fun arith_rewrite_tac ctxt _ = + TRY o Simplifier.simp_tac ctxt + THEN_ALL_NEW (Arith_Data.arith_tac ctxt ORELSE' Clasimp.force_tac ctxt) + +fun prove_arith_rewrite ctxt t = + prove_abstract' ctxt t arith_rewrite_tac ( + abstract_eq (abstract_arith ctxt) (dest_prop t)) + +val lift_ite_thm = @{thm HOL.if_distrib} RS @{thm eq_reflection} + +fun ternary_conv cv = Conv.combination_conv (Conv.binop_conv cv) cv + +fun if_context_conv ctxt ct = + (case Thm.term_of ct of + Const (@{const_name HOL.If}, _) $ _ $ _ $ _ => + ternary_conv (if_context_conv ctxt) + | _ $ (Const (@{const_name HOL.If}, _) $ _ $ _ $ _) => + Conv.rewr_conv lift_ite_thm then_conv ternary_conv (if_context_conv ctxt) + | _ => Conv.sub_conv (Conv.top_sweep_conv if_context_conv) ctxt) ct + +fun lift_ite_rewrite ctxt t = + prove ctxt t (fn ctxt => + CONVERSION (HOLogic.Trueprop_conv (Conv.binop_conv (if_context_conv ctxt))) + THEN' resolve_tac ctxt @{thms refl}) + +fun prove_conj_disj_perm ctxt t = prove ctxt t Conj_Disj_Perm.conj_disj_perm_tac + +fun rewrite ctxt _ = try_provers ctxt Z3_Proof.Rewrite [ + ("rules", apply_rule ctxt), + ("conj_disj_perm", prove_conj_disj_perm ctxt), + ("prop_rewrite", prove_prop_rewrite ctxt), + ("arith_rewrite", focus_eq prove_arith_rewrite ctxt), + ("if_rewrite", lift_ite_rewrite ctxt)] [] + +fun rewrite_star ctxt = rewrite ctxt + + +(* pulling quantifiers *) + +fun pull_quant ctxt _ t = prove ctxt t quant_tac + + +(* pushing quantifiers *) + +fun push_quant _ _ _ = raise Fail "unsupported" (* FIXME *) + + +(* elimination of unused bound variables *) + +val elim_all = @{lemma "P = Q ==> (ALL x. P) = Q" by fast} +val elim_ex = @{lemma "P = Q ==> (EX x. P) = Q" by fast} + +fun elim_unused_tac ctxt i st = ( + match_tac ctxt [@{thm refl}] + ORELSE' (match_tac ctxt [elim_all, elim_ex] THEN' elim_unused_tac ctxt) + ORELSE' ( + match_tac ctxt [@{thm iff_allI}, @{thm iff_exI}] + THEN' elim_unused_tac ctxt)) i st + +fun elim_unused ctxt _ t = prove ctxt t elim_unused_tac + + +(* destructive equality resolution *) + +fun dest_eq_res _ _ _ = raise Fail "dest_eq_res" (* FIXME *) + + +(* quantifier instantiation *) + +val quant_inst_rule = @{lemma "~P x | Q ==> ~(ALL x. P x) | Q" by fast} + +fun quant_inst ctxt _ t = prove ctxt t (fn _ => + REPEAT_ALL_NEW (resolve_tac ctxt [quant_inst_rule]) + THEN' resolve_tac ctxt @{thms excluded_middle}) + + +(* propositional lemma *) + +exception LEMMA of unit + +val intro_hyp_rule1 = @{lemma "(~P ==> Q) ==> P | Q" by fast} +val intro_hyp_rule2 = @{lemma "(P ==> Q) ==> ~P | Q" by fast} + +fun norm_lemma thm = + (thm COMP_INCR intro_hyp_rule1) + handle THM _ => thm COMP_INCR intro_hyp_rule2 + +fun negated_prop (@{const HOL.Not} $ t) = HOLogic.mk_Trueprop t + | negated_prop t = HOLogic.mk_Trueprop (HOLogic.mk_not t) + +fun intro_hyps tab (t as @{const HOL.disj} $ t1 $ t2) cx = + lookup_intro_hyps tab t (fold (intro_hyps tab) [t1, t2]) cx + | intro_hyps tab t cx = + lookup_intro_hyps tab t (fn _ => raise LEMMA ()) cx + +and lookup_intro_hyps tab t f (cx as (thm, terms)) = + (case Termtab.lookup tab (negated_prop t) of + NONE => f cx + | SOME hyp => (norm_lemma (Thm.implies_intr hyp thm), t :: terms)) + +fun lemma ctxt (thms as [thm]) t = + (let + val tab = Termtab.make (map (`Thm.term_of) (Thm.chyps_of thm)) + val (thm', terms) = intro_hyps tab (dest_prop t) (thm, []) + in + prove_abstract ctxt [thm'] t prop_tac ( + fold (snd oo abstract_lit) terms #> + abstract_disj (dest_thm thm') #>> single ##>> + abstract_disj (dest_prop t)) + end + handle LEMMA () => replay_error ctxt "Bad proof state" Z3_Proof.Lemma thms t) + | lemma ctxt thms t = replay_rule_error ctxt Z3_Proof.Lemma thms t + + +(* unit resolution *) + +fun abstract_unit (t as (@{const HOL.Not} $ (@{const HOL.disj} $ t1 $ t2))) = + abstract_sub t (abstract_unit t1 ##>> abstract_unit t2 #>> + HOLogic.mk_not o HOLogic.mk_disj) + | abstract_unit (t as (@{const HOL.disj} $ t1 $ t2)) = + abstract_sub t (abstract_unit t1 ##>> abstract_unit t2 #>> + HOLogic.mk_disj) + | abstract_unit t = abstract_lit t + +fun unit_res ctxt thms t = + prove_abstract ctxt thms t prop_tac ( + fold_map (abstract_unit o dest_thm) thms ##>> + abstract_unit (dest_prop t) #>> + (fn (prems, concl) => (prems, concl))) + + +(* iff-true *) + +val iff_true_rule = @{lemma "P ==> P = True" by fast} + +fun iff_true _ [thm] _ = thm RS iff_true_rule + | iff_true ctxt thms t = replay_rule_error ctxt Z3_Proof.Iff_True thms t + + +(* iff-false *) + +val iff_false_rule = @{lemma "~P ==> P = False" by fast} + +fun iff_false _ [thm] _ = thm RS iff_false_rule + | iff_false ctxt thms t = replay_rule_error ctxt Z3_Proof.Iff_False thms t + + +(* commutativity *) + +fun comm ctxt _ t = match_instantiate ctxt t @{thm eq_commute} + + +(* definitional axioms *) + +fun def_axiom_disj ctxt t = + (case dest_prop t of + @{const HOL.disj} $ u1 $ u2 => + prove_abstract' ctxt t prop_tac ( + abstract_prop u2 ##>> abstract_prop u1 #>> HOLogic.mk_disj o swap) + | u => prove_abstract' ctxt t prop_tac (abstract_prop u)) + +fun def_axiom ctxt _ = try_provers ctxt Z3_Proof.Def_Axiom [ + ("rules", apply_rule ctxt), + ("disj", def_axiom_disj ctxt)] [] + + +(* application of definitions *) + +fun apply_def _ [thm] _ = thm (* TODO: cover also the missing cases *) + | apply_def ctxt thms t = replay_rule_error ctxt Z3_Proof.Apply_Def thms t + + +(* iff-oeq *) + +fun iff_oeq _ _ _ = raise Fail "iff_oeq" (* FIXME *) + + +(* negation normal form *) + +fun nnf_prop ctxt thms t = + prove_abstract ctxt thms t prop_tac ( + fold_map (abstract_prop o dest_thm) thms ##>> + abstract_prop (dest_prop t)) + +fun nnf ctxt rule thms = try_provers ctxt rule [ + ("prop", nnf_prop ctxt thms), + ("quant", quant_intro ctxt [hd thms])] thms + +fun nnf_pos ctxt = nnf ctxt Z3_Proof.Nnf_Pos +fun nnf_neg ctxt = nnf ctxt Z3_Proof.Nnf_Neg + + +(* theory lemmas *) + +fun arith_th_lemma_tac ctxt prems = + Method.insert_tac ctxt prems + THEN' SELECT_GOAL (Local_Defs.unfold0_tac ctxt @{thms z3div_def z3mod_def}) + THEN' Arith_Data.arith_tac ctxt + +fun arith_th_lemma ctxt thms t = + prove_abstract ctxt thms t arith_th_lemma_tac ( + fold_map (abstract_arith ctxt o dest_thm) thms ##>> + abstract_arith ctxt (dest_prop t)) + +val _ = Theory.setup (Context.theory_map (add_th_lemma_method ("arith", arith_th_lemma))) + +fun th_lemma name ctxt thms = + (case Symtab.lookup (get_th_lemma_method ctxt) name of + SOME method => method ctxt thms + | NONE => replay_error ctxt "Bad theory" (Z3_Proof.Th_Lemma name) thms) + + +(* mapping of rules to methods *) + +fun unsupported rule ctxt = replay_error ctxt "Unsupported" rule +fun assumed rule ctxt = replay_error ctxt "Assumed" rule + +fun choose Z3_Proof.True_Axiom = true_axiom + | choose (r as Z3_Proof.Asserted) = assumed r + | choose (r as Z3_Proof.Goal) = assumed r + | choose Z3_Proof.Modus_Ponens = mp + | choose Z3_Proof.Reflexivity = refl + | choose Z3_Proof.Symmetry = symm + | choose Z3_Proof.Transitivity = trans + | choose (r as Z3_Proof.Transitivity_Star) = unsupported r + | choose Z3_Proof.Monotonicity = cong + | choose Z3_Proof.Quant_Intro = quant_intro + | choose Z3_Proof.Distributivity = distrib + | choose Z3_Proof.And_Elim = and_elim + | choose Z3_Proof.Not_Or_Elim = not_or_elim + | choose Z3_Proof.Rewrite = rewrite + | choose Z3_Proof.Rewrite_Star = rewrite_star + | choose Z3_Proof.Pull_Quant = pull_quant + | choose (r as Z3_Proof.Pull_Quant_Star) = unsupported r + | choose Z3_Proof.Push_Quant = push_quant + | choose Z3_Proof.Elim_Unused_Vars = elim_unused + | choose Z3_Proof.Dest_Eq_Res = dest_eq_res + | choose Z3_Proof.Quant_Inst = quant_inst + | choose (r as Z3_Proof.Hypothesis) = assumed r + | choose Z3_Proof.Lemma = lemma + | choose Z3_Proof.Unit_Resolution = unit_res + | choose Z3_Proof.Iff_True = iff_true + | choose Z3_Proof.Iff_False = iff_false + | choose Z3_Proof.Commutativity = comm + | choose Z3_Proof.Def_Axiom = def_axiom + | choose (r as Z3_Proof.Intro_Def) = assumed r + | choose Z3_Proof.Apply_Def = apply_def + | choose Z3_Proof.Iff_Oeq = iff_oeq + | choose Z3_Proof.Nnf_Pos = nnf_pos + | choose Z3_Proof.Nnf_Neg = nnf_neg + | choose (r as Z3_Proof.Nnf_Star) = unsupported r + | choose (r as Z3_Proof.Cnf_Star) = unsupported r + | choose (r as Z3_Proof.Skolemize) = assumed r + | choose Z3_Proof.Modus_Ponens_Oeq = mp_oeq + | choose (Z3_Proof.Th_Lemma name) = th_lemma name + +fun with_tracing rule method ctxt thms t = + let val _ = trace_goal ctxt rule thms t + in method ctxt thms t end + +fun method_for rule = with_tracing rule (choose rule) + +end; diff --git a/src/main/SMT/z3_replay_rules.ML b/src/main/SMT/z3_replay_rules.ML new file mode 100644 index 0000000..966100b --- /dev/null +++ b/src/main/SMT/z3_replay_rules.ML @@ -0,0 +1,54 @@ +(* Title: HOL/Tools/SMT/z3_replay_rules.ML + Author: Sascha Boehme, TU Muenchen + +Custom rules for Z3 proof replay. +*) + +signature Z3_REPLAY_RULES = +sig + val apply: Proof.context -> cterm -> thm option +end; + +structure Z3_Replay_Rules: Z3_REPLAY_RULES = +struct + +structure Data = Generic_Data +( + type T = thm Net.net + val empty = Net.empty + val extend = I + val merge = Net.merge Thm.eq_thm +) + +fun maybe_instantiate ct thm = + try Thm.first_order_match (Thm.cprop_of thm, ct) + |> Option.map (fn inst => Thm.instantiate inst thm) + +fun apply ctxt ct = + let + val net = Data.get (Context.Proof ctxt) + val xthms = Net.match_term net (Thm.term_of ct) + + fun select ct = map_filter (maybe_instantiate ct) xthms + fun select' ct = + let val thm = Thm.trivial ct + in map_filter (try (fn rule => rule COMP thm)) xthms end + + in try hd (case select ct of [] => select' ct | xthms' => xthms') end + +val prep = `Thm.prop_of + +fun ins thm net = Net.insert_term Thm.eq_thm (prep thm) net handle Net.INSERT => net +fun del thm net = Net.delete_term Thm.eq_thm (prep thm) net handle Net.DELETE => net + +val add = Thm.declaration_attribute (Data.map o ins) +val del = Thm.declaration_attribute (Data.map o del) + +val name = Binding.name "z3_rule" + +val description = "declaration of Z3 proof rules" + +val _ = Theory.setup (Attrib.setup name (Attrib.add_del add del) description #> + Global_Theory.add_thms_dynamic (name, Net.content o Data.get)) + +end; From 79b0ce9de918e3e64e6a7ff4007a52003bc58487 Mon Sep 17 00:00:00 2001 From: Serguei Mokhov Date: Fri, 25 Feb 2022 11:57:04 -0500 Subject: [PATCH 8/8] [i2021-1] clean up test-gen copy of SMT --- src/test-gen/Old_SMT/old_smt_builtin.ML | 231 ----- src/test-gen/Old_SMT/old_smt_config.ML | 254 ----- src/test-gen/Old_SMT/old_smt_datatypes.ML | 94 -- src/test-gen/Old_SMT/old_smt_failure.ML | 61 -- src/test-gen/Old_SMT/old_smt_normalize.ML | 652 ------------- src/test-gen/Old_SMT/old_smt_real.ML | 134 --- src/test-gen/Old_SMT/old_smt_setup_solvers.ML | 189 ---- src/test-gen/Old_SMT/old_smt_solver.ML | 374 -------- src/test-gen/Old_SMT/old_smt_translate.ML | 589 ------------ src/test-gen/Old_SMT/old_smt_utils.ML | 221 ----- src/test-gen/Old_SMT/old_smt_word.ML | 146 --- src/test-gen/Old_SMT/old_smtlib_interface.ML | 161 ---- src/test-gen/Old_SMT/old_z3_interface.ML | 239 ----- src/test-gen/Old_SMT/old_z3_model.ML | 337 ------- src/test-gen/Old_SMT/old_z3_proof_literals.ML | 363 ------- src/test-gen/Old_SMT/old_z3_proof_methods.ML | 149 --- src/test-gen/Old_SMT/old_z3_proof_parser.ML | 446 --------- .../Old_SMT/old_z3_proof_reconstruction.ML | 891 ------------------ src/test-gen/Old_SMT/old_z3_proof_tools.ML | 374 -------- src/test-gen/SMT/conj_disj_perm.ML | 127 --- src/test-gen/SMT/cvc4_interface.ML | 31 - src/test-gen/SMT/cvc4_proof_parse.ML | 46 - src/test-gen/SMT/smt_builtin.ML | 222 ----- src/test-gen/SMT/smt_config.ML | 265 ------ src/test-gen/SMT/smt_datatypes.ML | 152 --- src/test-gen/SMT/smt_failure.ML | 40 - src/test-gen/SMT/smt_normalize.ML | 444 --------- src/test-gen/SMT/smt_real.ML | 115 --- src/test-gen/SMT/smt_solver.ML | 307 ------ src/test-gen/SMT/smt_systems.ML | 154 --- src/test-gen/SMT/smt_translate.ML | 527 ----------- src/test-gen/SMT/smt_util.ML | 240 ----- src/test-gen/SMT/smtlib.ML | 191 ---- src/test-gen/SMT/smtlib_interface.ML | 171 ---- src/test-gen/SMT/smtlib_isar.ML | 75 -- src/test-gen/SMT/smtlib_proof.ML | 298 ------ src/test-gen/SMT/verit_isar.ML | 60 -- src/test-gen/SMT/verit_proof.ML | 324 ------- src/test-gen/SMT/verit_proof_parse.ML | 78 -- src/test-gen/SMT/z3_interface.ML | 192 ---- src/test-gen/SMT/z3_isar.ML | 120 --- src/test-gen/SMT/z3_proof.ML | 303 ------ src/test-gen/SMT/z3_real.ML | 32 - src/test-gen/SMT/z3_replay.ML | 262 ----- src/test-gen/SMT/z3_replay_methods.ML | 685 -------------- src/test-gen/SMT/z3_replay_rules.ML | 54 -- src/test-gen/SMT/z3_replay_util.ML | 155 --- 47 files changed, 11575 deletions(-) delete mode 100644 src/test-gen/Old_SMT/old_smt_builtin.ML delete mode 100644 src/test-gen/Old_SMT/old_smt_config.ML delete mode 100644 src/test-gen/Old_SMT/old_smt_datatypes.ML delete mode 100644 src/test-gen/Old_SMT/old_smt_failure.ML delete mode 100644 src/test-gen/Old_SMT/old_smt_normalize.ML delete mode 100644 src/test-gen/Old_SMT/old_smt_real.ML delete mode 100644 src/test-gen/Old_SMT/old_smt_setup_solvers.ML delete mode 100644 src/test-gen/Old_SMT/old_smt_solver.ML delete mode 100644 src/test-gen/Old_SMT/old_smt_translate.ML delete mode 100644 src/test-gen/Old_SMT/old_smt_utils.ML delete mode 100644 src/test-gen/Old_SMT/old_smt_word.ML delete mode 100644 src/test-gen/Old_SMT/old_smtlib_interface.ML delete mode 100644 src/test-gen/Old_SMT/old_z3_interface.ML delete mode 100644 src/test-gen/Old_SMT/old_z3_model.ML delete mode 100644 src/test-gen/Old_SMT/old_z3_proof_literals.ML delete mode 100644 src/test-gen/Old_SMT/old_z3_proof_methods.ML delete mode 100644 src/test-gen/Old_SMT/old_z3_proof_parser.ML delete mode 100644 src/test-gen/Old_SMT/old_z3_proof_reconstruction.ML delete mode 100644 src/test-gen/Old_SMT/old_z3_proof_tools.ML delete mode 100644 src/test-gen/SMT/conj_disj_perm.ML delete mode 100644 src/test-gen/SMT/cvc4_interface.ML delete mode 100644 src/test-gen/SMT/cvc4_proof_parse.ML delete mode 100644 src/test-gen/SMT/smt_builtin.ML delete mode 100644 src/test-gen/SMT/smt_config.ML delete mode 100644 src/test-gen/SMT/smt_datatypes.ML delete mode 100644 src/test-gen/SMT/smt_failure.ML delete mode 100644 src/test-gen/SMT/smt_normalize.ML delete mode 100644 src/test-gen/SMT/smt_real.ML delete mode 100644 src/test-gen/SMT/smt_solver.ML delete mode 100644 src/test-gen/SMT/smt_systems.ML delete mode 100644 src/test-gen/SMT/smt_translate.ML delete mode 100644 src/test-gen/SMT/smt_util.ML delete mode 100644 src/test-gen/SMT/smtlib.ML delete mode 100644 src/test-gen/SMT/smtlib_interface.ML delete mode 100644 src/test-gen/SMT/smtlib_isar.ML delete mode 100644 src/test-gen/SMT/smtlib_proof.ML delete mode 100644 src/test-gen/SMT/verit_isar.ML delete mode 100644 src/test-gen/SMT/verit_proof.ML delete mode 100644 src/test-gen/SMT/verit_proof_parse.ML delete mode 100644 src/test-gen/SMT/z3_interface.ML delete mode 100644 src/test-gen/SMT/z3_isar.ML delete mode 100644 src/test-gen/SMT/z3_proof.ML delete mode 100644 src/test-gen/SMT/z3_real.ML delete mode 100644 src/test-gen/SMT/z3_replay.ML delete mode 100644 src/test-gen/SMT/z3_replay_methods.ML delete mode 100644 src/test-gen/SMT/z3_replay_rules.ML delete mode 100644 src/test-gen/SMT/z3_replay_util.ML diff --git a/src/test-gen/Old_SMT/old_smt_builtin.ML b/src/test-gen/Old_SMT/old_smt_builtin.ML deleted file mode 100644 index e492ed4..0000000 --- a/src/test-gen/Old_SMT/old_smt_builtin.ML +++ /dev/null @@ -1,231 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_smt_builtin.ML - Author: Sascha Boehme, TU Muenchen - -Tables of types and terms directly supported by SMT solvers. -*) - -signature OLD_SMT_BUILTIN = -sig - (*for experiments*) - val filter_builtins: (typ -> bool) -> Proof.context -> Proof.context - - (*built-in types*) - val add_builtin_typ: Old_SMT_Utils.class -> - typ * (typ -> string option) * (typ -> int -> string option) -> - Context.generic -> Context.generic - val add_builtin_typ_ext: typ * (typ -> bool) -> Context.generic -> - Context.generic - val dest_builtin_typ: Proof.context -> typ -> string option - val is_builtin_typ_ext: Proof.context -> typ -> bool - - (*built-in numbers*) - val dest_builtin_num: Proof.context -> term -> (string * typ) option - val is_builtin_num: Proof.context -> term -> bool - val is_builtin_num_ext: Proof.context -> term -> bool - - (*built-in functions*) - type 'a bfun = Proof.context -> typ -> term list -> 'a - type bfunr = string * int * term list * (term list -> term) - val add_builtin_fun: Old_SMT_Utils.class -> - (string * typ) * bfunr option bfun -> Context.generic -> Context.generic - val add_builtin_fun': Old_SMT_Utils.class -> term * string -> Context.generic -> - Context.generic - val add_builtin_fun_ext: (string * typ) * term list bfun -> - Context.generic -> Context.generic - val add_builtin_fun_ext': string * typ -> Context.generic -> Context.generic - val add_builtin_fun_ext'': string -> Context.generic -> Context.generic - val dest_builtin_fun: Proof.context -> string * typ -> term list -> - bfunr option - val dest_builtin_eq: Proof.context -> term -> term -> bfunr option - val dest_builtin_pred: Proof.context -> string * typ -> term list -> - bfunr option - val dest_builtin_conn: Proof.context -> string * typ -> term list -> - bfunr option - val dest_builtin: Proof.context -> string * typ -> term list -> bfunr option - val dest_builtin_ext: Proof.context -> string * typ -> term list -> - term list option - val is_builtin_fun: Proof.context -> string * typ -> term list -> bool - val is_builtin_fun_ext: Proof.context -> string * typ -> term list -> bool -end - -structure Old_SMT_Builtin: OLD_SMT_BUILTIN = -struct - - -(* built-in tables *) - -datatype ('a, 'b) kind = Ext of 'a | Int of 'b - -type ('a, 'b) ttab = ((typ * ('a, 'b) kind) Ord_List.T) Old_SMT_Utils.dict - -fun typ_ord ((T, _), (U, _)) = - let - fun tord (TVar _, Type _) = GREATER - | tord (Type _, TVar _) = LESS - | tord (Type (n, Ts), Type (m, Us)) = - if n = m then list_ord tord (Ts, Us) - else Term_Ord.typ_ord (T, U) - | tord TU = Term_Ord.typ_ord TU - in tord (T, U) end - -fun insert_ttab cs T f = - Old_SMT_Utils.dict_map_default (cs, []) - (Ord_List.insert typ_ord (perhaps (try Logic.varifyT_global) T, f)) - -fun merge_ttab ttabp = - Old_SMT_Utils.dict_merge (Ord_List.merge typ_ord) ttabp - -fun lookup_ttab ctxt ttab T = - let fun match (U, _) = Sign.typ_instance (Proof_Context.theory_of ctxt) (T, U) - in - get_first (find_first match) - (Old_SMT_Utils.dict_lookup ttab (Old_SMT_Config.solver_class_of ctxt)) - end - -type ('a, 'b) btab = ('a, 'b) ttab Symtab.table - -fun insert_btab cs n T f = - Symtab.map_default (n, []) (insert_ttab cs T f) - -fun merge_btab btabp = Symtab.join (K merge_ttab) btabp - -fun lookup_btab ctxt btab (n, T) = - (case Symtab.lookup btab n of - NONE => NONE - | SOME ttab => lookup_ttab ctxt ttab T) - -type 'a bfun = Proof.context -> typ -> term list -> 'a - -type bfunr = string * int * term list * (term list -> term) - -structure Builtins = Generic_Data -( - type T = - (typ -> bool, (typ -> string option) * (typ -> int -> string option)) ttab * - (term list bfun, bfunr option bfun) btab - val empty = ([], Symtab.empty) - val extend = I - fun merge ((t1, b1), (t2, b2)) = (merge_ttab (t1, t2), merge_btab (b1, b2)) -) - -fun filter_ttab keep_T = map (apsnd (filter (keep_T o fst))) - -fun filter_builtins keep_T = - Context.proof_map (Builtins.map (fn (ttab, btab) => - (filter_ttab keep_T ttab, Symtab.map (K (filter_ttab keep_T)) btab))) - - -(* built-in types *) - -fun add_builtin_typ cs (T, f, g) = - Builtins.map (apfst (insert_ttab cs T (Int (f, g)))) - -fun add_builtin_typ_ext (T, f) = - Builtins.map (apfst (insert_ttab Old_SMT_Utils.basicC T (Ext f))) - -fun lookup_builtin_typ ctxt = - lookup_ttab ctxt (fst (Builtins.get (Context.Proof ctxt))) - -fun dest_builtin_typ ctxt T = - (case lookup_builtin_typ ctxt T of - SOME (_, Int (f, _)) => f T - | _ => NONE) - -fun is_builtin_typ_ext ctxt T = - (case lookup_builtin_typ ctxt T of - SOME (_, Int (f, _)) => is_some (f T) - | SOME (_, Ext f) => f T - | NONE => false) - - -(* built-in numbers *) - -fun dest_builtin_num ctxt t = - (case try HOLogic.dest_number t of - NONE => NONE - | SOME (T, i) => - if i < 0 then NONE else - (case lookup_builtin_typ ctxt T of - SOME (_, Int (_, g)) => g T i |> Option.map (rpair T) - | _ => NONE)) - -val is_builtin_num = is_some oo dest_builtin_num - -fun is_builtin_num_ext ctxt t = - (case try HOLogic.dest_number t of - NONE => false - | SOME (T, _) => is_builtin_typ_ext ctxt T) - - -(* built-in functions *) - -fun add_builtin_fun cs ((n, T), f) = - Builtins.map (apsnd (insert_btab cs n T (Int f))) - -fun add_builtin_fun' cs (t, n) = - let - val c as (m, T) = Term.dest_Const t - fun app U ts = Term.list_comb (Const (m, U), ts) - fun bfun _ U ts = SOME (n, length (Term.binder_types T), ts, app U) - in add_builtin_fun cs (c, bfun) end - -fun add_builtin_fun_ext ((n, T), f) = - Builtins.map (apsnd (insert_btab Old_SMT_Utils.basicC n T (Ext f))) - -fun add_builtin_fun_ext' c = add_builtin_fun_ext (c, fn _ => fn _ => I) - -fun add_builtin_fun_ext'' n context = - let val thy = Context.theory_of context - in add_builtin_fun_ext' (n, Sign.the_const_type thy n) context end - -fun lookup_builtin_fun ctxt = - lookup_btab ctxt (snd (Builtins.get (Context.Proof ctxt))) - -fun dest_builtin_fun ctxt (c as (_, T)) ts = - (case lookup_builtin_fun ctxt c of - SOME (_, Int f) => f ctxt T ts - | _ => NONE) - -fun dest_builtin_eq ctxt t u = - let - val aT = TFree (Name.aT, @{sort type}) - val c = (@{const_name HOL.eq}, aT --> aT --> @{typ bool}) - fun mk ts = Term.list_comb (HOLogic.eq_const (Term.fastype_of (hd ts)), ts) - in - dest_builtin_fun ctxt c [] - |> Option.map (fn (n, i, _, _) => (n, i, [t, u], mk)) - end - -fun special_builtin_fun pred ctxt (c as (_, T)) ts = - if pred (Term.body_type T, Term.binder_types T) then - dest_builtin_fun ctxt c ts - else NONE - -fun dest_builtin_pred ctxt = special_builtin_fun (equal @{typ bool} o fst) ctxt - -fun dest_builtin_conn ctxt = - special_builtin_fun (forall (equal @{typ bool}) o (op ::)) ctxt - -fun dest_builtin ctxt c ts = - let val t = Term.list_comb (Const c, ts) - in - (case dest_builtin_num ctxt t of - SOME (n, _) => SOME (n, 0, [], K t) - | NONE => dest_builtin_fun ctxt c ts) - end - -fun dest_builtin_fun_ext ctxt (c as (_, T)) ts = - (case lookup_builtin_fun ctxt c of - SOME (_, Int f) => f ctxt T ts |> Option.map (fn (_, _, us, _) => us) - | SOME (_, Ext f) => SOME (f ctxt T ts) - | NONE => NONE) - -fun dest_builtin_ext ctxt c ts = - if is_builtin_num_ext ctxt (Term.list_comb (Const c, ts)) then SOME [] - else dest_builtin_fun_ext ctxt c ts - -fun is_builtin_fun ctxt c ts = is_some (dest_builtin_fun ctxt c ts) - -fun is_builtin_fun_ext ctxt c ts = is_some (dest_builtin_fun_ext ctxt c ts) - -end diff --git a/src/test-gen/Old_SMT/old_smt_config.ML b/src/test-gen/Old_SMT/old_smt_config.ML deleted file mode 100644 index 318b2ce..0000000 --- a/src/test-gen/Old_SMT/old_smt_config.ML +++ /dev/null @@ -1,254 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_smt_config.ML - Author: Sascha Boehme, TU Muenchen - -Configuration options and diagnostic tools for SMT. -*) - -signature OLD_SMT_CONFIG = -sig - (*solver*) - type solver_info = { - name: string, - class: Proof.context -> Old_SMT_Utils.class, - avail: unit -> bool, - options: Proof.context -> string list } - val add_solver: solver_info -> Context.generic -> Context.generic - val set_solver_options: string * string -> Context.generic -> Context.generic - val is_available: Proof.context -> string -> bool - val available_solvers_of: Proof.context -> string list - val select_solver: string -> Context.generic -> Context.generic - val solver_of: Proof.context -> string - val solver_class_of: Proof.context -> Old_SMT_Utils.class - val solver_options_of: Proof.context -> string list - - (*options*) - val oracle: bool Config.T - val datatypes: bool Config.T - val timeout: real Config.T - val random_seed: int Config.T - val read_only_certificates: bool Config.T - val verbose: bool Config.T - val trace: bool Config.T - val trace_used_facts: bool Config.T - val monomorph_limit: int Config.T - val monomorph_instances: int Config.T - val infer_triggers: bool Config.T - val filter_only_facts: bool Config.T - val debug_files: string Config.T - - (*tools*) - val with_timeout: Proof.context -> ('a -> 'b) -> 'a -> 'b - - (*diagnostics*) - val trace_msg: Proof.context -> ('a -> string) -> 'a -> unit - val verbose_msg: Proof.context -> ('a -> string) -> 'a -> unit - - (*certificates*) - val select_certificates: string -> Context.generic -> Context.generic - val certificates_of: Proof.context -> Cache_IO.cache option - - (*setup*) - val setup: theory -> theory - val print_setup: Proof.context -> unit -end - -structure Old_SMT_Config: OLD_SMT_CONFIG = -struct - -(* solver *) - -type solver_info = { - name: string, - class: Proof.context -> Old_SMT_Utils.class, - avail: unit -> bool, - options: Proof.context -> string list } - -(* FIXME just one data slot (record) per program unit *) -structure Solvers = Generic_Data -( - type T = (solver_info * string list) Symtab.table * string option - val empty = (Symtab.empty, NONE) - val extend = I - fun merge ((ss1, s1), (ss2, s2)) = - (Symtab.merge (K true) (ss1, ss2), merge_options (s1, s2)) -) - -fun set_solver_options (name, options) = - let val opts = String.tokens (Symbol.is_ascii_blank o str) options - in Solvers.map (apfst (Symtab.map_entry name (apsnd (K opts)))) end - -fun add_solver (info as {name, ...} : solver_info) context = - if Symtab.defined (fst (Solvers.get context)) name then - error ("Solver already registered: " ^ quote name) - else - context - |> Solvers.map (apfst (Symtab.update (name, (info, [])))) - |> Context.map_theory (Attrib.setup (Binding.name ("old_" ^ name ^ "_options")) - (Scan.lift (@{keyword "="} |-- Args.name) >> - (Thm.declaration_attribute o K o set_solver_options o pair name)) - ("additional command line options for SMT solver " ^ quote name)) - -fun all_solvers_of ctxt = Symtab.keys (fst (Solvers.get (Context.Proof ctxt))) - -fun solver_name_of ctxt = snd (Solvers.get (Context.Proof ctxt)) - -fun is_available ctxt name = - (case Symtab.lookup (fst (Solvers.get (Context.Proof ctxt))) name of - SOME ({avail, ...}, _) => avail () - | NONE => false) - -fun available_solvers_of ctxt = - filter (is_available ctxt) (all_solvers_of ctxt) - -fun warn_solver (Context.Proof ctxt) name = - if Context_Position.is_visible ctxt then - warning ("The SMT solver " ^ quote name ^ " is not installed.") - else () - | warn_solver _ _ = (); - -fun select_solver name context = - let - val ctxt = Context.proof_of context - val upd = Solvers.map (apsnd (K (SOME name))) - in - if not (member (op =) (all_solvers_of ctxt) name) then - error ("Trying to select unknown solver: " ^ quote name) - else if not (is_available ctxt name) then - (warn_solver context name; upd context) - else upd context - end - -fun no_solver_err () = error "No SMT solver selected" - -fun solver_of ctxt = - (case solver_name_of ctxt of - SOME name => name - | NONE => no_solver_err ()) - -fun solver_info_of default select ctxt = - (case Solvers.get (Context.Proof ctxt) of - (solvers, SOME name) => select (Symtab.lookup solvers name) - | (_, NONE) => default ()) - -fun solver_class_of ctxt = - let fun class_of ({class, ...}: solver_info, _) = class ctxt - in solver_info_of no_solver_err (class_of o the) ctxt end - -fun solver_options_of ctxt = - let - fun all_options NONE = [] - | all_options (SOME ({options, ...} : solver_info, opts)) = - opts @ options ctxt - in solver_info_of (K []) all_options ctxt end - -val setup_solver = - Attrib.setup @{binding old_smt_solver} - (Scan.lift (@{keyword "="} |-- Args.name) >> - (Thm.declaration_attribute o K o select_solver)) - "SMT solver configuration" - - -(* options *) - -val oracle = Attrib.setup_config_bool @{binding old_smt_oracle} (K true) -val datatypes = Attrib.setup_config_bool @{binding old_smt_datatypes} (K false) -val timeout = Attrib.setup_config_real @{binding old_smt_timeout} (K 30.0) -val random_seed = Attrib.setup_config_int @{binding old_smt_random_seed} (K 1) -val read_only_certificates = Attrib.setup_config_bool @{binding old_smt_read_only_certificates} (K false) -val verbose = Attrib.setup_config_bool @{binding old_smt_verbose} (K true) -val trace = Attrib.setup_config_bool @{binding old_smt_trace} (K false) -val trace_used_facts = Attrib.setup_config_bool @{binding old_smt_trace_used_facts} (K false) -val monomorph_limit = Attrib.setup_config_int @{binding old_smt_monomorph_limit} (K 10) -val monomorph_instances = Attrib.setup_config_int @{binding old_smt_monomorph_instances} (K 500) -val infer_triggers = Attrib.setup_config_bool @{binding old_smt_infer_triggers} (K false) -val filter_only_facts = Attrib.setup_config_bool @{binding old_smt_filter_only_facts} (K false) -val debug_files = Attrib.setup_config_string @{binding old_smt_debug_files} (K "") - - -(* diagnostics *) - -fun cond_trace flag f x = if flag then tracing ("SMT: " ^ f x) else () - -fun verbose_msg ctxt = cond_trace (Config.get ctxt verbose) - -fun trace_msg ctxt = cond_trace (Config.get ctxt trace) - - -(* tools *) - -fun with_timeout ctxt f x = - Timeout.apply (seconds (Config.get ctxt timeout)) f x - handle Timeout.TIMEOUT _ => raise Old_SMT_Failure.SMT Old_SMT_Failure.Time_Out - - -(* certificates *) - -(* FIXME just one data slot (record) per program unit *) -structure Certificates = Generic_Data -( - type T = Cache_IO.cache option - val empty = NONE - val extend = I - fun merge (s, _) = s (* FIXME merge options!? *) -) - -val get_certificates_path = - Option.map (Cache_IO.cache_path_of) o Certificates.get o Context.Proof - -fun select_certificates name context = context |> Certificates.put ( - if name = "" then NONE - else - Path.explode name - |> Path.append (Resources.master_directory (Context.theory_of context)) - |> SOME o Cache_IO.unsynchronized_init) - -val certificates_of = Certificates.get o Context.Proof - -val setup_certificates = - Attrib.setup @{binding old_smt_certificates} - (Scan.lift (@{keyword "="} |-- Args.name) >> - (Thm.declaration_attribute o K o select_certificates)) - "SMT certificates configuration" - - -(* setup *) - -val setup = - setup_solver #> - setup_certificates - -fun print_setup ctxt = - let - fun string_of_bool b = if b then "true" else "false" - - val names = available_solvers_of ctxt - val ns = if null names then ["(none)"] else sort_strings names - val n = the_default "(none)" (solver_name_of ctxt) - val opts = solver_options_of ctxt - - val t = string_of_real (Config.get ctxt timeout) - - val certs_filename = - (case get_certificates_path ctxt of - SOME path => Path.print path - | NONE => "(disabled)") - in - Pretty.writeln (Pretty.big_list "SMT setup:" [ - Pretty.str ("Current SMT solver: " ^ n), - Pretty.str ("Current SMT solver options: " ^ space_implode " " opts), - Pretty.str_list "Available SMT solvers: " "" ns, - Pretty.str ("Current timeout: " ^ t ^ " seconds"), - Pretty.str ("With proofs: " ^ - string_of_bool (not (Config.get ctxt oracle))), - Pretty.str ("Certificates cache: " ^ certs_filename), - Pretty.str ("Fixed certificates: " ^ - string_of_bool (Config.get ctxt read_only_certificates))]) - end - -val _ = - Outer_Syntax.command @{command_keyword old_smt_status} - "show the available SMT solvers, the currently selected SMT solver, \ - \and the values of SMT configuration options" - (Scan.succeed (Toplevel.keep (print_setup o Toplevel.context_of))) - -end diff --git a/src/test-gen/Old_SMT/old_smt_datatypes.ML b/src/test-gen/Old_SMT/old_smt_datatypes.ML deleted file mode 100644 index 971dc74..0000000 --- a/src/test-gen/Old_SMT/old_smt_datatypes.ML +++ /dev/null @@ -1,94 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_smt_datatypes.ML - Author: Sascha Boehme, TU Muenchen - -Collector functions for common type declarations and their representation -as algebraic datatypes. -*) - -signature OLD_SMT_DATATYPES = -sig - val add_decls: typ -> - (typ * (term * term list) list) list list * Proof.context -> - (typ * (term * term list) list) list list * Proof.context -end - -structure Old_SMT_Datatypes: OLD_SMT_DATATYPES = -struct - -fun mk_selectors T Ts = - Variable.variant_fixes (replicate (length Ts) "select") - #>> map2 (fn U => fn n => Free (n, T --> U)) Ts - - -(* free constructor type declarations *) - -fun get_ctr_sugar_decl ({ctrs, ...} : Ctr_Sugar.ctr_sugar) T Ts ctxt = - let - fun mk_constr ctr0 = - let val ctr = Ctr_Sugar.mk_ctr Ts ctr0 in - mk_selectors T (binder_types (fastype_of ctr)) #>> pair ctr - end - in - fold_map mk_constr ctrs ctxt - |>> (pair T #> single) - end - - -(* typedef declarations *) - -fun get_typedef_decl (({Abs_name, Rep_name, abs_type, rep_type, ...}, {Abs_inverse, ...}) - : Typedef.info) T Ts = - if can (curry (op RS) @{thm UNIV_I}) Abs_inverse then - let - val env = snd (Term.dest_Type abs_type) ~~ Ts - val instT = Term.map_atyps (perhaps (AList.lookup (op =) env)) - - val constr = Const (Abs_name, instT (rep_type --> abs_type)) - val select = Const (Rep_name, instT (abs_type --> rep_type)) - in [(T, [(constr, [select])])] end - else - [] - - -(* collection of declarations *) - -fun declared declss T = exists (exists (equal T o fst)) declss -fun declared' dss T = exists (exists (equal T o fst) o snd) dss - -fun get_decls T n Ts ctxt = - (case Ctr_Sugar.ctr_sugar_of ctxt n of - SOME ctr_sugar => get_ctr_sugar_decl ctr_sugar T Ts ctxt - | NONE => - (case Typedef.get_info ctxt n of - [] => ([], ctxt) - | info :: _ => (get_typedef_decl info T Ts, ctxt))) - -fun add_decls T (declss, ctxt) = - let - fun depends Ts ds = exists (member (op =) (map fst ds)) Ts - - fun add (TFree _) = I - | add (TVar _) = I - | add (T as Type (@{type_name fun}, _)) = - fold add (Term.body_type T :: Term.binder_types T) - | add @{typ bool} = I - | add (T as Type (n, Ts)) = (fn (dss, ctxt1) => - if declared declss T orelse declared' dss T then (dss, ctxt1) - else if Old_SMT_Builtin.is_builtin_typ_ext ctxt1 T then (dss, ctxt1) - else - (case get_decls T n Ts ctxt1 of - ([], _) => (dss, ctxt1) - | (ds, ctxt2) => - let - val constrTs = - maps (map (snd o Term.dest_Const o fst) o snd) ds - val Us = fold (union (op =) o Term.binder_types) constrTs [] - - fun ins [] = [(Us, ds)] - | ins ((Uds as (Us', _)) :: Udss) = - if depends Us' ds then (Us, ds) :: Uds :: Udss - else Uds :: ins Udss - in fold add Us (ins dss, ctxt2) end)) - in add T ([], ctxt) |>> append declss o map snd end - -end diff --git a/src/test-gen/Old_SMT/old_smt_failure.ML b/src/test-gen/Old_SMT/old_smt_failure.ML deleted file mode 100644 index 394287c..0000000 --- a/src/test-gen/Old_SMT/old_smt_failure.ML +++ /dev/null @@ -1,61 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_smt_failure.ML - Author: Sascha Boehme, TU Muenchen - -Failures and exception of SMT. -*) - -signature OLD_SMT_FAILURE = -sig - type counterexample = { - is_real_cex: bool, - free_constraints: term list, - const_defs: term list} - datatype failure = - Counterexample of counterexample | - Time_Out | - Out_Of_Memory | - Abnormal_Termination of int | - Other_Failure of string - val pretty_counterexample: Proof.context -> counterexample -> Pretty.T - val string_of_failure: Proof.context -> failure -> string - exception SMT of failure -end - -structure Old_SMT_Failure: OLD_SMT_FAILURE = -struct - -type counterexample = { - is_real_cex: bool, - free_constraints: term list, - const_defs: term list} - -datatype failure = - Counterexample of counterexample | - Time_Out | - Out_Of_Memory | - Abnormal_Termination of int | - Other_Failure of string - -fun pretty_counterexample ctxt {is_real_cex, free_constraints, const_defs} = - let - val msg = - if is_real_cex then "Counterexample found (possibly spurious)" - else "Potential counterexample found" - in - if null free_constraints andalso null const_defs then Pretty.str msg - else - Pretty.big_list (msg ^ ":") - (map (Syntax.pretty_term ctxt) (free_constraints @ const_defs)) - end - -fun string_of_failure ctxt (Counterexample cex) = - Pretty.string_of (pretty_counterexample ctxt cex) - | string_of_failure _ Time_Out = "Timed out" - | string_of_failure _ Out_Of_Memory = "Ran out of memory" - | string_of_failure _ (Abnormal_Termination err) = - "Solver terminated abnormally with error code " ^ string_of_int err - | string_of_failure _ (Other_Failure msg) = msg - -exception SMT of failure - -end diff --git a/src/test-gen/Old_SMT/old_smt_normalize.ML b/src/test-gen/Old_SMT/old_smt_normalize.ML deleted file mode 100644 index 18cf0b7..0000000 --- a/src/test-gen/Old_SMT/old_smt_normalize.ML +++ /dev/null @@ -1,652 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_smt_normalize.ML - Author: Sascha Boehme, TU Muenchen - -Normalization steps on theorems required by SMT solvers. -*) - -signature OLD_SMT_NORMALIZE = -sig - val drop_fact_warning: Proof.context -> thm -> unit - val atomize_conv: Proof.context -> conv - type extra_norm = Proof.context -> thm list * thm list -> thm list * thm list - val add_extra_norm: Old_SMT_Utils.class * extra_norm -> Context.generic -> - Context.generic - val normalize: (int * (int option * thm)) list -> Proof.context -> - (int * thm) list * Proof.context - val setup: theory -> theory -end - -structure Old_SMT_Normalize: OLD_SMT_NORMALIZE = -struct - -fun drop_fact_warning ctxt = - Old_SMT_Config.verbose_msg ctxt (prefix "Warning: dropping assumption: " o - Thm.string_of_thm ctxt) - - -(* general theorem normalizations *) - -(** instantiate elimination rules **) - -local - val (cpfalse, cfalse) = - `Old_SMT_Utils.mk_cprop (Thm.cterm_of @{context} @{const False}) - - fun inst f ct thm = - let val cv = f (Drule.strip_imp_concl (Thm.cprop_of thm)) - in Thm.instantiate ([], [(dest_Var (Thm.term_of cv), ct)]) thm end -in - -fun instantiate_elim thm = - (case Thm.concl_of thm of - @{const Trueprop} $ Var (_, @{typ bool}) => inst Thm.dest_arg cfalse thm - | Var _ => inst I cpfalse thm - | _ => thm) - -end - - -(** normalize definitions **) - -fun norm_def thm = - (case Thm.prop_of thm of - @{const Trueprop} $ (Const (@{const_name HOL.eq}, _) $ _ $ Abs _) => - norm_def (thm RS @{thm fun_cong}) - | Const (@{const_name Pure.eq}, _) $ _ $ Abs _ => - norm_def (thm RS @{thm meta_eq_to_obj_eq}) - | _ => thm) - - -(** atomization **) - -fun atomize_conv ctxt ct = - (case Thm.term_of ct of - @{const Pure.imp} $ _ $ _ => - Conv.binop_conv (atomize_conv ctxt) then_conv - Conv.rewr_conv @{thm atomize_imp} - | Const (@{const_name Pure.eq}, _) $ _ $ _ => - Conv.binop_conv (atomize_conv ctxt) then_conv - Conv.rewr_conv @{thm atomize_eq} - | Const (@{const_name Pure.all}, _) $ Abs _ => - Conv.binder_conv (atomize_conv o snd) ctxt then_conv - Conv.rewr_conv @{thm atomize_all} - | _ => Conv.all_conv) ct - -val setup_atomize = - fold Old_SMT_Builtin.add_builtin_fun_ext'' [@{const_name Pure.imp}, - @{const_name Pure.eq}, @{const_name Pure.all}, @{const_name Trueprop}] - - -(** unfold special quantifiers **) - -local - val ex1_def = mk_meta_eq @{lemma - "Ex1 = (%P. EX x. P x & (ALL y. P y --> y = x))" - by (rule ext) (simp only: Ex1_def)} - - val ball_def = mk_meta_eq @{lemma "Ball = (%A P. ALL x. x : A --> P x)" - by (rule ext)+ (rule Ball_def)} - - val bex_def = mk_meta_eq @{lemma "Bex = (%A P. EX x. x : A & P x)" - by (rule ext)+ (rule Bex_def)} - - val special_quants = [(@{const_name Ex1}, ex1_def), - (@{const_name Ball}, ball_def), (@{const_name Bex}, bex_def)] - - fun special_quant (Const (n, _)) = AList.lookup (op =) special_quants n - | special_quant _ = NONE - - fun special_quant_conv _ ct = - (case special_quant (Thm.term_of ct) of - SOME thm => Conv.rewr_conv thm - | NONE => Conv.all_conv) ct -in - -fun unfold_special_quants_conv ctxt = - Old_SMT_Utils.if_exists_conv (is_some o special_quant) - (Conv.top_conv special_quant_conv ctxt) - -val setup_unfolded_quants = - fold (Old_SMT_Builtin.add_builtin_fun_ext'' o fst) special_quants - -end - - -(** trigger inference **) - -local - (*** check trigger syntax ***) - - fun dest_trigger (Const (@{const_name pat}, _) $ _) = SOME true - | dest_trigger (Const (@{const_name nopat}, _) $ _) = SOME false - | dest_trigger _ = NONE - - fun eq_list [] = false - | eq_list (b :: bs) = forall (equal b) bs - - fun proper_trigger t = - t - |> these o try HOLogic.dest_list - |> map (map_filter dest_trigger o these o try HOLogic.dest_list) - |> (fn [] => false | bss => forall eq_list bss) - - fun proper_quant inside f t = - (case t of - Const (@{const_name All}, _) $ Abs (_, _, u) => proper_quant true f u - | Const (@{const_name Ex}, _) $ Abs (_, _, u) => proper_quant true f u - | @{const trigger} $ p $ u => - (if inside then f p else false) andalso proper_quant false f u - | Abs (_, _, u) => proper_quant false f u - | u1 $ u2 => proper_quant false f u1 andalso proper_quant false f u2 - | _ => true) - - fun check_trigger_error ctxt t = - error ("SMT triggers must only occur under quantifier and multipatterns " ^ - "must have the same kind: " ^ Syntax.string_of_term ctxt t) - - fun check_trigger_conv ctxt ct = - if proper_quant false proper_trigger (Old_SMT_Utils.term_of ct) then - Conv.all_conv ct - else check_trigger_error ctxt (Thm.term_of ct) - - - (*** infer simple triggers ***) - - fun dest_cond_eq ct = - (case Thm.term_of ct of - Const (@{const_name HOL.eq}, _) $ _ $ _ => Thm.dest_binop ct - | @{const HOL.implies} $ _ $ _ => dest_cond_eq (Thm.dest_arg ct) - | _ => raise CTERM ("no equation", [ct])) - - fun get_constrs thy (Type (n, _)) = these (Old_Datatype_Data.get_constrs thy n) - | get_constrs _ _ = [] - - fun is_constr thy (n, T) = - let fun match (m, U) = m = n andalso Sign.typ_instance thy (T, U) - in can (the o find_first match o get_constrs thy o Term.body_type) T end - - fun is_constr_pat thy t = - (case Term.strip_comb t of - (Free _, []) => true - | (Const c, ts) => is_constr thy c andalso forall (is_constr_pat thy) ts - | _ => false) - - fun is_simp_lhs ctxt t = - (case Term.strip_comb t of - (Const c, ts as _ :: _) => - not (Old_SMT_Builtin.is_builtin_fun_ext ctxt c ts) andalso - forall (is_constr_pat (Proof_Context.theory_of ctxt)) ts - | _ => false) - - fun has_all_vars vs t = - subset (op aconv) (vs, map Free (Term.add_frees t [])) - - fun minimal_pats vs ct = - if has_all_vars vs (Thm.term_of ct) then - (case Thm.term_of ct of - _ $ _ => - (case apply2 (minimal_pats vs) (Thm.dest_comb ct) of - ([], []) => [[ct]] - | (ctss, ctss') => union (eq_set (op aconvc)) ctss ctss') - | _ => []) - else [] - - fun proper_mpat _ _ _ [] = false - | proper_mpat thy gen u cts = - let - val tps = (op ~~) (`gen (map Thm.term_of cts)) - fun some_match u = tps |> exists (fn (t', t) => - Pattern.matches thy (t', u) andalso not (t aconv u)) - in not (Term.exists_subterm some_match u) end - - val pat = - Old_SMT_Utils.mk_const_pat @{theory} @{const_name pat} Old_SMT_Utils.destT1 - fun mk_pat ct = Thm.apply (Old_SMT_Utils.instT' ct pat) ct - - fun mk_clist T = apply2 (Thm.cterm_of @{context}) (HOLogic.cons_const T, HOLogic.nil_const T) - fun mk_list (ccons, cnil) f cts = fold_rev (Thm.mk_binop ccons o f) cts cnil - val mk_pat_list = mk_list (mk_clist @{typ pattern}) - val mk_mpat_list = mk_list (mk_clist @{typ "pattern list"}) - fun mk_trigger ctss = mk_mpat_list (mk_pat_list mk_pat) ctss - - val trigger_eq = - mk_meta_eq @{lemma "p = trigger t p" by (simp add: trigger_def)} - - fun insert_trigger_conv [] ct = Conv.all_conv ct - | insert_trigger_conv ctss ct = - let val (ctr, cp) = Thm.dest_binop (Thm.rhs_of trigger_eq) ||> rpair ct - in Thm.instantiate ([], map (apfst (dest_Var o Thm.term_of)) [cp, (ctr, mk_trigger ctss)]) trigger_eq end - - fun infer_trigger_eq_conv outer_ctxt (ctxt, cvs) ct = - let - val (lhs, rhs) = dest_cond_eq ct - - val vs = map Thm.term_of cvs - val thy = Proof_Context.theory_of ctxt - - fun get_mpats ct = - if is_simp_lhs ctxt (Thm.term_of ct) then minimal_pats vs ct - else [] - val gen = Variable.export_terms ctxt outer_ctxt - val filter_mpats = filter (proper_mpat thy gen (Thm.term_of rhs)) - - in insert_trigger_conv (filter_mpats (get_mpats lhs)) ct end - - fun has_trigger (@{const trigger} $ _ $ _) = true - | has_trigger _ = false - - fun try_trigger_conv cv ct = - if Old_SMT_Utils.under_quant has_trigger (Old_SMT_Utils.term_of ct) then - Conv.all_conv ct - else Conv.try_conv cv ct - - fun infer_trigger_conv ctxt = - if Config.get ctxt Old_SMT_Config.infer_triggers then - try_trigger_conv - (Old_SMT_Utils.under_quant_conv (infer_trigger_eq_conv ctxt) ctxt) - else Conv.all_conv -in - -fun trigger_conv ctxt = - Old_SMT_Utils.prop_conv - (check_trigger_conv ctxt then_conv infer_trigger_conv ctxt) - -val setup_trigger = - fold Old_SMT_Builtin.add_builtin_fun_ext'' - [@{const_name pat}, @{const_name nopat}, @{const_name trigger}] - -end - - -(** adding quantifier weights **) - -local - (*** check weight syntax ***) - - val has_no_weight = - not o Term.exists_subterm (fn @{const weight} => true | _ => false) - - fun is_weight (@{const weight} $ w $ t) = - (case try HOLogic.dest_number w of - SOME (_, i) => i >= 0 andalso has_no_weight t - | _ => false) - | is_weight t = has_no_weight t - - fun proper_trigger (@{const trigger} $ _ $ t) = is_weight t - | proper_trigger t = is_weight t - - fun check_weight_error ctxt t = - error ("SMT weight must be a non-negative number and must only occur " ^ - "under the top-most quantifier and an optional trigger: " ^ - Syntax.string_of_term ctxt t) - - fun check_weight_conv ctxt ct = - if Old_SMT_Utils.under_quant proper_trigger (Old_SMT_Utils.term_of ct) then - Conv.all_conv ct - else check_weight_error ctxt (Thm.term_of ct) - - - (*** insertion of weights ***) - - fun under_trigger_conv cv ct = - (case Thm.term_of ct of - @{const trigger} $ _ $ _ => Conv.arg_conv cv - | _ => cv) ct - - val weight_eq = - mk_meta_eq @{lemma "p = weight i p" by (simp add: weight_def)} - fun mk_weight_eq w = - let val cv = Thm.dest_arg1 (Thm.rhs_of weight_eq) - in - Thm.instantiate ([], [(dest_Var (Thm.term_of cv), Numeral.mk_cnumber @{ctyp int} w)]) - weight_eq - end - - fun add_weight_conv NONE _ = Conv.all_conv - | add_weight_conv (SOME weight) ctxt = - let val cv = Conv.rewr_conv (mk_weight_eq weight) - in Old_SMT_Utils.under_quant_conv (K (under_trigger_conv cv)) ctxt end -in - -fun weight_conv weight ctxt = - Old_SMT_Utils.prop_conv - (check_weight_conv ctxt then_conv add_weight_conv weight ctxt) - -val setup_weight = Old_SMT_Builtin.add_builtin_fun_ext'' @{const_name weight} - -end - - -(** combined general normalizations **) - -fun gen_normalize1_conv ctxt weight = - atomize_conv ctxt then_conv - unfold_special_quants_conv ctxt then_conv - Thm.beta_conversion true then_conv - trigger_conv ctxt then_conv - weight_conv weight ctxt - -fun gen_normalize1 ctxt weight thm = - thm - |> instantiate_elim - |> norm_def - |> Conv.fconv_rule (Thm.beta_conversion true then_conv Thm.eta_conversion) - |> Drule.forall_intr_vars - |> Conv.fconv_rule (gen_normalize1_conv ctxt weight) - -fun gen_norm1_safe ctxt (i, (weight, thm)) = - (case try (gen_normalize1 ctxt weight) thm of - SOME thm' => SOME (i, thm') - | NONE => (drop_fact_warning ctxt thm; NONE)) - -fun gen_normalize ctxt iwthms = map_filter (gen_norm1_safe ctxt) iwthms - - - -(* unfolding of definitions and theory-specific rewritings *) - -fun expand_head_conv cv ct = - (case Thm.term_of ct of - _ $ _ => - Conv.fun_conv (expand_head_conv cv) then_conv - Conv.try_conv (Thm.beta_conversion false) - | _ => cv) ct - - -(** rewrite bool case expressions as if expressions **) - -local - fun is_case_bool (Const (@{const_name "bool.case_bool"}, _)) = true - | is_case_bool _ = false - - val thm = mk_meta_eq @{lemma - "case_bool = (%x y P. if P then x else y)" by (rule ext)+ simp} - - fun unfold_conv _ = - Old_SMT_Utils.if_true_conv (is_case_bool o Term.head_of) - (expand_head_conv (Conv.rewr_conv thm)) -in - -fun rewrite_case_bool_conv ctxt = - Old_SMT_Utils.if_exists_conv is_case_bool (Conv.top_conv unfold_conv ctxt) - -val setup_case_bool = - Old_SMT_Builtin.add_builtin_fun_ext'' @{const_name "bool.case_bool"} - -end - - -(** unfold abs, min and max **) - -local - val abs_def = mk_meta_eq @{lemma - "abs = (%a::'a::abs_if. if a < 0 then - a else a)" - by (rule ext) (rule abs_if)} - - val min_def = mk_meta_eq @{lemma "min = (%a b. if a <= b then a else b)" - by (rule ext)+ (rule min_def)} - - val max_def = mk_meta_eq @{lemma "max = (%a b. if a <= b then b else a)" - by (rule ext)+ (rule max_def)} - - val defs = [(@{const_name min}, min_def), (@{const_name max}, max_def), - (@{const_name abs}, abs_def)] - - fun is_builtinT ctxt T = - Old_SMT_Builtin.is_builtin_typ_ext ctxt (Term.domain_type T) - - fun abs_min_max ctxt (Const (n, T)) = - (case AList.lookup (op =) defs n of - NONE => NONE - | SOME thm => if is_builtinT ctxt T then SOME thm else NONE) - | abs_min_max _ _ = NONE - - fun unfold_amm_conv ctxt ct = - (case abs_min_max ctxt (Term.head_of (Thm.term_of ct)) of - SOME thm => expand_head_conv (Conv.rewr_conv thm) - | NONE => Conv.all_conv) ct -in - -fun unfold_abs_min_max_conv ctxt = - Old_SMT_Utils.if_exists_conv (is_some o abs_min_max ctxt) - (Conv.top_conv unfold_amm_conv ctxt) - -val setup_abs_min_max = fold (Old_SMT_Builtin.add_builtin_fun_ext'' o fst) defs - -end - - -(** embedding of standard natural number operations into integer operations **) - -local - val nat_embedding = @{lemma - "ALL n. nat (int n) = n" - "ALL i. i >= 0 --> int (nat i) = i" - "ALL i. i < 0 --> int (nat i) = 0" - by simp_all} - - val simple_nat_ops = [ - @{const less (nat)}, @{const less_eq (nat)}, - @{const Suc}, @{const plus (nat)}, @{const minus (nat)}] - - val mult_nat_ops = - [@{const times (nat)}, @{const divide (nat)}, @{const modulo (nat)}] - - val nat_ops = simple_nat_ops @ mult_nat_ops - - val nat_consts = nat_ops @ [@{const numeral (nat)}, - @{const zero_class.zero (nat)}, @{const one_class.one (nat)}] - - val nat_int_coercions = [@{const of_nat (int)}, @{const nat}] - - val builtin_nat_ops = nat_int_coercions @ simple_nat_ops - - val is_nat_const = member (op aconv) nat_consts - - fun is_nat_const' @{const of_nat (int)} = true - | is_nat_const' t = is_nat_const t - - val expands = map mk_meta_eq @{lemma - "0 = nat 0" - "1 = nat 1" - "(numeral :: num => nat) = (%i. nat (numeral i))" - "op < = (%a b. int a < int b)" - "op <= = (%a b. int a <= int b)" - "Suc = (%a. nat (int a + 1))" - "op + = (%a b. nat (int a + int b))" - "op - = (%a b. nat (int a - int b))" - "op * = (%a b. nat (int a * int b))" - "op div = (%a b. nat (int a div int b))" - "op mod = (%a b. nat (int a mod int b))" - by (fastforce simp add: nat_mult_distrib nat_div_distrib nat_mod_distrib)+} - - val ints = map mk_meta_eq @{lemma - "int 0 = 0" - "int 1 = 1" - "int (Suc n) = int n + 1" - "int (n + m) = int n + int m" - "int (n - m) = int (nat (int n - int m))" - "int (n * m) = int n * int m" - "int (n div m) = int n div int m" - "int (n mod m) = int n mod int m" - by (auto simp add: of_nat_mult zdiv_int zmod_int)} - - val int_if = mk_meta_eq @{lemma - "int (if P then n else m) = (if P then int n else int m)" - by simp} - - fun mk_number_eq ctxt i lhs = - let - val eq = Old_SMT_Utils.mk_cequals lhs (Numeral.mk_cnumber @{ctyp int} i) - val tac = - Simplifier.simp_tac (put_simpset HOL_ss ctxt addsimps [@{thm of_nat_numeral}]) 1 - in Goal.norm_result ctxt (Goal.prove_internal ctxt [] eq (K tac)) end - - fun ite_conv cv1 cv2 = - Conv.combination_conv (Conv.combination_conv (Conv.arg_conv cv1) cv2) cv2 - - fun int_conv ctxt ct = - (case Thm.term_of ct of - @{const of_nat (int)} $ (n as (@{const numeral (nat)} $ _)) => - Conv.rewr_conv (mk_number_eq ctxt (snd (HOLogic.dest_number n)) ct) - | @{const of_nat (int)} $ _ => - (Conv.rewrs_conv ints then_conv Conv.sub_conv ints_conv ctxt) else_conv - (Conv.rewr_conv int_if then_conv - ite_conv (nat_conv ctxt) (int_conv ctxt)) else_conv - Conv.sub_conv (Conv.top_sweep_conv nat_conv) ctxt - | _ => Conv.no_conv) ct - - and ints_conv ctxt = Conv.top_sweep_conv int_conv ctxt - - and expand_conv ctxt = - Old_SMT_Utils.if_conv (is_nat_const o Term.head_of) - (expand_head_conv (Conv.rewrs_conv expands) then_conv ints_conv ctxt) - (int_conv ctxt) - - and nat_conv ctxt = Old_SMT_Utils.if_exists_conv is_nat_const' - (Conv.top_sweep_conv expand_conv ctxt) - - val uses_nat_int = Term.exists_subterm (member (op aconv) nat_int_coercions) -in - -val nat_as_int_conv = nat_conv - -fun add_nat_embedding thms = - if exists (uses_nat_int o Thm.prop_of) thms then (thms, nat_embedding) - else (thms, []) - -val setup_nat_as_int = - Old_SMT_Builtin.add_builtin_typ_ext (@{typ nat}, K true) #> - fold (Old_SMT_Builtin.add_builtin_fun_ext' o Term.dest_Const) builtin_nat_ops - -end - - -(** normalize numerals **) - -local - (* - rewrite Numeral1 into 1 - rewrite - 0 into 0 - *) - - fun is_irregular_number (Const (@{const_name numeral}, _) $ Const (@{const_name num.One}, _)) = - true - | is_irregular_number (Const (@{const_name uminus}, _) $ Const (@{const_name Groups.zero}, _)) = - true - | is_irregular_number _ = - false; - - fun is_strange_number ctxt t = is_irregular_number t andalso Old_SMT_Builtin.is_builtin_num ctxt t; - - val proper_num_ss = - simpset_of (put_simpset HOL_ss @{context} - addsimps @{thms Num.numeral_One minus_zero}) - - fun norm_num_conv ctxt = - Old_SMT_Utils.if_conv (is_strange_number ctxt) - (Simplifier.rewrite (put_simpset proper_num_ss ctxt)) Conv.no_conv -in - -fun normalize_numerals_conv ctxt = - Old_SMT_Utils.if_exists_conv (is_strange_number ctxt) - (Conv.top_sweep_conv norm_num_conv ctxt) - -end - - -(** combined unfoldings and rewritings **) - -fun unfold_conv ctxt = - rewrite_case_bool_conv ctxt then_conv - unfold_abs_min_max_conv ctxt then_conv - nat_as_int_conv ctxt then_conv - Thm.beta_conversion true - -fun unfold1 ctxt = map (apsnd (Conv.fconv_rule (unfold_conv ctxt))) - -fun burrow_ids f ithms = - let - val (is, thms) = split_list ithms - val (thms', extra_thms) = f thms - in (is ~~ thms') @ map (pair ~1) extra_thms end - -fun unfold2 ctxt ithms = - ithms - |> map (apsnd (Conv.fconv_rule (normalize_numerals_conv ctxt))) - |> burrow_ids add_nat_embedding - - - -(* overall normalization *) - -type extra_norm = Proof.context -> thm list * thm list -> thm list * thm list - -structure Extra_Norms = Generic_Data -( - type T = extra_norm Old_SMT_Utils.dict - val empty = [] - val extend = I - fun merge data = Old_SMT_Utils.dict_merge fst data -) - -fun add_extra_norm (cs, norm) = - Extra_Norms.map (Old_SMT_Utils.dict_update (cs, norm)) - -fun apply_extra_norms ctxt ithms = - let - val cs = Old_SMT_Config.solver_class_of ctxt - val es = Old_SMT_Utils.dict_lookup (Extra_Norms.get (Context.Proof ctxt)) cs - in burrow_ids (fold (fn e => e ctxt) es o rpair []) ithms end - -local - val ignored = member (op =) [@{const_name All}, @{const_name Ex}, - @{const_name Let}, @{const_name If}, @{const_name HOL.eq}] - - val schematic_consts_of = - let - fun collect (@{const trigger} $ p $ t) = - collect_trigger p #> collect t - | collect (t $ u) = collect t #> collect u - | collect (Abs (_, _, t)) = collect t - | collect (t as Const (n, _)) = - if not (ignored n) then Monomorph.add_schematic_consts_of t else I - | collect _ = I - and collect_trigger t = - let val dest = these o try HOLogic.dest_list - in fold (fold collect_pat o dest) (dest t) end - and collect_pat (Const (@{const_name pat}, _) $ t) = collect t - | collect_pat (Const (@{const_name nopat}, _) $ t) = collect t - | collect_pat _ = I - in (fn t => collect t Symtab.empty) end -in - -fun monomorph ctxt xthms = - let val (xs, thms) = split_list xthms - in - map (pair 1) thms - |> Monomorph.monomorph schematic_consts_of ctxt - |> maps (uncurry (map o pair)) o map2 pair xs o map (map snd) - end - -end - -fun normalize iwthms ctxt = - iwthms - |> gen_normalize ctxt - |> unfold1 ctxt - |> monomorph ctxt - |> unfold2 ctxt - |> apply_extra_norms ctxt - |> rpair ctxt - -val setup = Context.theory_map ( - setup_atomize #> - setup_unfolded_quants #> - setup_trigger #> - setup_weight #> - setup_case_bool #> - setup_abs_min_max #> - setup_nat_as_int) - -end diff --git a/src/test-gen/Old_SMT/old_smt_real.ML b/src/test-gen/Old_SMT/old_smt_real.ML deleted file mode 100644 index 6a2a793..0000000 --- a/src/test-gen/Old_SMT/old_smt_real.ML +++ /dev/null @@ -1,134 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_smt_real.ML - Author: Sascha Boehme, TU Muenchen - -SMT setup for reals. -*) - -structure Old_SMT_Real: sig end = -struct - - -(* SMT-LIB logic *) - -fun smtlib_logic ts = - if exists (Term.exists_type (Term.exists_subtype (equal @{typ real}))) ts - then SOME "AUFLIRA" - else NONE - - -(* SMT-LIB and Z3 built-ins *) - -local - fun real_num _ i = SOME (string_of_int i ^ ".0") - - fun is_linear [t] = Old_SMT_Utils.is_number t - | is_linear [t, u] = Old_SMT_Utils.is_number t orelse Old_SMT_Utils.is_number u - | is_linear _ = false - - fun mk_times ts = Term.list_comb (@{const times (real)}, ts) - - fun times _ _ ts = if is_linear ts then SOME ("*", 2, ts, mk_times) else NONE -in - -val setup_builtins = - Old_SMT_Builtin.add_builtin_typ Old_SMTLIB_Interface.smtlibC - (@{typ real}, K (SOME "Real"), real_num) #> - fold (Old_SMT_Builtin.add_builtin_fun' Old_SMTLIB_Interface.smtlibC) [ - (@{const less (real)}, "<"), - (@{const less_eq (real)}, "<="), - (@{const uminus (real)}, "~"), - (@{const plus (real)}, "+"), - (@{const minus (real)}, "-") ] #> - Old_SMT_Builtin.add_builtin_fun Old_SMTLIB_Interface.smtlibC - (Term.dest_Const @{const times (real)}, times) #> - Old_SMT_Builtin.add_builtin_fun' Old_Z3_Interface.smtlib_z3C - (@{const times (real)}, "*") #> - Old_SMT_Builtin.add_builtin_fun' Old_Z3_Interface.smtlib_z3C - (@{const divide (real)}, "/") - -end - - -(* Z3 constructors *) - -local - fun z3_mk_builtin_typ (Old_Z3_Interface.Sym ("Real", _)) = SOME @{typ real} - | z3_mk_builtin_typ (Old_Z3_Interface.Sym ("real", _)) = SOME @{typ real} - (*FIXME: delete*) - | z3_mk_builtin_typ _ = NONE - - fun z3_mk_builtin_num _ i T = - if T = @{typ real} then SOME (Numeral.mk_cnumber @{ctyp real} i) - else NONE - - fun mk_nary _ cu [] = cu - | mk_nary ct _ cts = uncurry (fold_rev (Thm.mk_binop ct)) (split_last cts) - - val mk_uminus = Thm.apply (Thm.cterm_of @{context} @{const uminus (real)}) - val add = Thm.cterm_of @{context} @{const plus (real)} - val real0 = Numeral.mk_cnumber @{ctyp real} 0 - val mk_sub = Thm.mk_binop (Thm.cterm_of @{context} @{const minus (real)}) - val mk_mul = Thm.mk_binop (Thm.cterm_of @{context} @{const times (real)}) - val mk_div = Thm.mk_binop (Thm.cterm_of @{context} @{const divide (real)}) - val mk_lt = Thm.mk_binop (Thm.cterm_of @{context} @{const less (real)}) - val mk_le = Thm.mk_binop (Thm.cterm_of @{context} @{const less_eq (real)}) - - fun z3_mk_builtin_fun (Old_Z3_Interface.Sym ("-", _)) [ct] = SOME (mk_uminus ct) - | z3_mk_builtin_fun (Old_Z3_Interface.Sym ("+", _)) cts = - SOME (mk_nary add real0 cts) - | z3_mk_builtin_fun (Old_Z3_Interface.Sym ("-", _)) [ct, cu] = - SOME (mk_sub ct cu) - | z3_mk_builtin_fun (Old_Z3_Interface.Sym ("*", _)) [ct, cu] = - SOME (mk_mul ct cu) - | z3_mk_builtin_fun (Old_Z3_Interface.Sym ("/", _)) [ct, cu] = - SOME (mk_div ct cu) - | z3_mk_builtin_fun (Old_Z3_Interface.Sym ("<", _)) [ct, cu] = - SOME (mk_lt ct cu) - | z3_mk_builtin_fun (Old_Z3_Interface.Sym ("<=", _)) [ct, cu] = - SOME (mk_le ct cu) - | z3_mk_builtin_fun (Old_Z3_Interface.Sym (">", _)) [ct, cu] = - SOME (mk_lt cu ct) - | z3_mk_builtin_fun (Old_Z3_Interface.Sym (">=", _)) [ct, cu] = - SOME (mk_le cu ct) - | z3_mk_builtin_fun _ _ = NONE -in - -val z3_mk_builtins = { - mk_builtin_typ = z3_mk_builtin_typ, - mk_builtin_num = z3_mk_builtin_num, - mk_builtin_fun = (fn _ => fn sym => fn cts => - (case try (Thm.typ_of_cterm o hd) cts of - SOME @{typ real} => z3_mk_builtin_fun sym cts - | _ => NONE)) } - -end - - -(* Z3 proof reconstruction *) - -val real_rules = @{lemma - "0 + (x::real) = x" - "x + 0 = x" - "0 * x = 0" - "1 * x = x" - "x + y = y + x" - by auto} - -val real_linarith_proc = - Simplifier.make_simproc @{context} "fast_real_arith" - {lhss = [@{term "(m::real) < n"}, @{term "(m::real) \ n"}, @{term "(m::real) = n"}], - proc = K Lin_Arith.simproc} - - -(* setup *) - -val _ = - Theory.setup - (Context.theory_map ( - Old_SMTLIB_Interface.add_logic (10, smtlib_logic) #> - setup_builtins #> - Old_Z3_Interface.add_mk_builtins z3_mk_builtins #> - fold Old_Z3_Proof_Reconstruction.add_z3_rule real_rules #> - Old_Z3_Proof_Tools.add_simproc real_linarith_proc)) - -end diff --git a/src/test-gen/Old_SMT/old_smt_setup_solvers.ML b/src/test-gen/Old_SMT/old_smt_setup_solvers.ML deleted file mode 100644 index 15e01db..0000000 --- a/src/test-gen/Old_SMT/old_smt_setup_solvers.ML +++ /dev/null @@ -1,189 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_smt_setup_solvers.ML - Author: Sascha Boehme, TU Muenchen - -Setup SMT solvers. -*) - -signature OLD_SMT_SETUP_SOLVERS = -sig - datatype z3_non_commercial = - Z3_Non_Commercial_Unknown | - Z3_Non_Commercial_Accepted | - Z3_Non_Commercial_Declined - val z3_non_commercial: unit -> z3_non_commercial - val z3_with_extensions: bool Config.T - val setup: theory -> theory -end - -structure Old_SMT_Setup_Solvers: OLD_SMT_SETUP_SOLVERS = -struct - -(* helper functions *) - -fun make_avail name () = getenv ("OLD_" ^ name ^ "_SOLVER") <> "" -fun make_command name () = [getenv ("OLD_" ^ name ^ "_SOLVER")] - -fun outcome_of unsat sat unknown solver_name line = - if String.isPrefix unsat line then Old_SMT_Solver.Unsat - else if String.isPrefix sat line then Old_SMT_Solver.Sat - else if String.isPrefix unknown line then Old_SMT_Solver.Unknown - else raise Old_SMT_Failure.SMT (Old_SMT_Failure.Other_Failure ("Solver " ^ - quote solver_name ^ " failed. Enable SMT tracing by setting the " ^ - "configuration option " ^ quote (Config.name_of Old_SMT_Config.trace) ^ " and " ^ - "see the trace for details.")) - -fun on_first_line test_outcome solver_name lines = - let - val empty_line = (fn "" => true | _ => false) - val split_first = (fn [] => ("", []) | l :: ls => (l, ls)) - val (l, ls) = split_first (snd (take_prefix empty_line lines)) - in (test_outcome solver_name l, ls) end - - -(* CVC3 *) - -local - fun cvc3_options ctxt = [ - "-seed", string_of_int (Config.get ctxt Old_SMT_Config.random_seed), - "-lang", "smtlib", "-output-lang", "presentation", - "-timeout", string_of_int (Real.ceil (Config.get ctxt Old_SMT_Config.timeout))] -in - -val cvc3: Old_SMT_Solver.solver_config = { - name = "cvc3", - class = K Old_SMTLIB_Interface.smtlibC, - avail = make_avail "CVC3", - command = make_command "CVC3", - options = cvc3_options, - default_max_relevant = 400 (* FUDGE *), - supports_filter = false, - outcome = - on_first_line (outcome_of "Unsatisfiable." "Satisfiable." "Unknown."), - cex_parser = NONE, - reconstruct = NONE } - -end - - -(* Yices *) - -val yices: Old_SMT_Solver.solver_config = { - name = "yices", - class = K Old_SMTLIB_Interface.smtlibC, - avail = make_avail "YICES", - command = make_command "YICES", - options = (fn ctxt => [ - "--rand-seed=" ^ string_of_int (Config.get ctxt Old_SMT_Config.random_seed), - "--timeout=" ^ - string_of_int (Real.ceil (Config.get ctxt Old_SMT_Config.timeout)), - "--smtlib"]), - default_max_relevant = 350 (* FUDGE *), - supports_filter = false, - outcome = on_first_line (outcome_of "unsat" "sat" "unknown"), - cex_parser = NONE, - reconstruct = NONE } - - -(* Z3 *) - -datatype z3_non_commercial = - Z3_Non_Commercial_Unknown | - Z3_Non_Commercial_Accepted | - Z3_Non_Commercial_Declined - - -local - val accepted = member (op =) ["yes", "Yes", "YES"] - val declined = member (op =) ["no", "No", "NO"] -in - -fun z3_non_commercial () = - let - val flag2 = getenv "OLD_Z3_NON_COMMERCIAL" - in - if accepted flag2 then Z3_Non_Commercial_Accepted - else if declined flag2 then Z3_Non_Commercial_Declined - else Z3_Non_Commercial_Unknown - end - -fun if_z3_non_commercial f = - (case z3_non_commercial () of - Z3_Non_Commercial_Accepted => f () - | Z3_Non_Commercial_Declined => - error (Pretty.string_of (Pretty.para - "The SMT solver Z3 may only be used for non-commercial applications.")) - | Z3_Non_Commercial_Unknown => - error - (Pretty.string_of - (Pretty.para - ("The SMT solver Z3 is not activated. To activate it, set the Isabelle \ - \system option \"z3_non_commercial\" to \"yes\" (e.g. via \ - \the Isabelle/jEdit menu Plugin Options / Isabelle / General).")) ^ - "\n\nSee also " ^ Url.print (Url.explode "http://z3.codeplex.com/license"))) - -end - - -val z3_with_extensions = - Attrib.setup_config_bool @{binding old_z3_with_extensions} (K false) - -local - fun z3_make_command name () = if_z3_non_commercial (make_command name) - - fun z3_options ctxt = - ["-rs:" ^ string_of_int (Config.get ctxt Old_SMT_Config.random_seed), - "MODEL=true", - "SOFT_TIMEOUT=" ^ - string_of_int (Real.ceil (1000.0 * Config.get ctxt Old_SMT_Config.timeout)), - "-smt"] - |> not (Config.get ctxt Old_SMT_Config.oracle) ? - append ["DISPLAY_PROOF=true", "PROOF_MODE=2"] - - fun z3_on_first_or_last_line solver_name lines = - let - fun junk l = - if String.isPrefix "WARNING: Out of allocated virtual memory" l - then raise Old_SMT_Failure.SMT Old_SMT_Failure.Out_Of_Memory - else - String.isPrefix "WARNING" l orelse - String.isPrefix "ERROR" l orelse - forall Symbol.is_ascii_blank (Symbol.explode l) - val lines = filter_out junk lines - fun outcome split = - the_default ("", []) (try split lines) - |>> outcome_of "unsat" "sat" "unknown" solver_name - in - (* Starting with version 4.0, Z3 puts the outcome on the first line of the - output rather than on the last line. *) - outcome (fn lines => (hd lines, tl lines)) - handle Old_SMT_Failure.SMT _ => outcome (swap o split_last) - end - - fun select_class ctxt = - if Config.get ctxt z3_with_extensions then Old_Z3_Interface.smtlib_z3C - else Old_SMTLIB_Interface.smtlibC -in - -val z3: Old_SMT_Solver.solver_config = { - name = "z3", - class = select_class, - avail = make_avail "Z3", - command = z3_make_command "Z3", - options = z3_options, - default_max_relevant = 350 (* FUDGE *), - supports_filter = true, - outcome = z3_on_first_or_last_line, - cex_parser = SOME Old_Z3_Model.parse_counterex, - reconstruct = SOME Old_Z3_Proof_Reconstruction.reconstruct } - -end - - -(* overall setup *) - -val setup = - Old_SMT_Solver.add_solver cvc3 #> - Old_SMT_Solver.add_solver yices #> - Old_SMT_Solver.add_solver z3 - -end diff --git a/src/test-gen/Old_SMT/old_smt_solver.ML b/src/test-gen/Old_SMT/old_smt_solver.ML deleted file mode 100644 index da7b8e4..0000000 --- a/src/test-gen/Old_SMT/old_smt_solver.ML +++ /dev/null @@ -1,374 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_smt_solver.ML - Author: Sascha Boehme, TU Muenchen - -SMT solvers registry and SMT tactic. -*) - -signature OLD_SMT_SOLVER = -sig - (*configuration*) - datatype outcome = Unsat | Sat | Unknown - type solver_config = { - name: string, - class: Proof.context -> Old_SMT_Utils.class, - avail: unit -> bool, - command: unit -> string list, - options: Proof.context -> string list, - default_max_relevant: int, - supports_filter: bool, - outcome: string -> string list -> outcome * string list, - cex_parser: (Proof.context -> Old_SMT_Translate.recon -> string list -> - term list * term list) option, - reconstruct: (Proof.context -> Old_SMT_Translate.recon -> string list -> - int list * thm) option } - - (*registry*) - val add_solver: solver_config -> theory -> theory - val solver_name_of: Proof.context -> string - val available_solvers_of: Proof.context -> string list - val apply_solver: Proof.context -> (int * (int option * thm)) list -> - int list * thm - val default_max_relevant: Proof.context -> string -> int - - (*filter*) - type 'a smt_filter_data = - ('a * thm) list * ((int * thm) list * Proof.context) - val smt_filter_preprocess: Proof.context -> thm list -> thm -> - ('a * (int option * thm)) list -> int -> 'a smt_filter_data - val smt_filter_apply: Time.time -> 'a smt_filter_data -> - {outcome: Old_SMT_Failure.failure option, used_facts: ('a * thm) list} - - (*tactic*) - val smt_tac: Proof.context -> thm list -> int -> tactic - val smt_tac': Proof.context -> thm list -> int -> tactic -end - -structure Old_SMT_Solver: OLD_SMT_SOLVER = -struct - - -(* interface to external solvers *) - -local - -fun make_cmd command options problem_path proof_path = - space_implode " " - ("(exec 2>&1;" :: map Bash.string (command () @ options) @ - [File.bash_path problem_path, ")", ">", File.bash_path proof_path]) - -fun trace_and ctxt msg f x = - let val _ = Old_SMT_Config.trace_msg ctxt (fn () => msg) () - in f x end - -fun run ctxt name mk_cmd input = - (case Old_SMT_Config.certificates_of ctxt of - NONE => - if not (Old_SMT_Config.is_available ctxt name) then - error ("The SMT solver " ^ quote name ^ " is not installed.") - else if Config.get ctxt Old_SMT_Config.debug_files = "" then - trace_and ctxt ("Invoking SMT solver " ^ quote name ^ " ...") - (Cache_IO.run mk_cmd) input - else - let - val base_path = Path.explode (Config.get ctxt Old_SMT_Config.debug_files) - val in_path = Path.ext "smt_in" base_path - val out_path = Path.ext "smt_out" base_path - in Cache_IO.raw_run mk_cmd input in_path out_path end - | SOME certs => - (case Cache_IO.lookup certs input of - (NONE, key) => - if Config.get ctxt Old_SMT_Config.read_only_certificates then - error ("Bad certificate cache: missing certificate") - else - Cache_IO.run_and_cache certs key mk_cmd input - | (SOME output, _) => - trace_and ctxt ("Using cached certificate from " ^ - Path.print (Cache_IO.cache_path_of certs) ^ " ...") - I output)) - -fun run_solver ctxt name mk_cmd input = - let - fun pretty tag ls = Pretty.string_of (Pretty.big_list tag - (map Pretty.str ls)) - - val _ = Old_SMT_Config.trace_msg ctxt (pretty "Problem:" o split_lines) input - - val {redirected_output=res, output=err, return_code} = - Old_SMT_Config.with_timeout ctxt (run ctxt name mk_cmd) input - val _ = Old_SMT_Config.trace_msg ctxt (pretty "Solver:") err - - val ls = fst (take_suffix (equal "") res) - val _ = Old_SMT_Config.trace_msg ctxt (pretty "Result:") ls - - val _ = return_code <> 0 andalso - raise Old_SMT_Failure.SMT (Old_SMT_Failure.Abnormal_Termination return_code) - in ls end - -fun trace_assms ctxt = - Old_SMT_Config.trace_msg ctxt (Pretty.string_of o - Pretty.big_list "Assertions:" o map (Thm.pretty_thm ctxt o snd)) - -fun trace_recon_data ({context=ctxt, typs, terms, ...} : Old_SMT_Translate.recon) = - let - fun pretty_eq n p = Pretty.block [Pretty.str n, Pretty.str " = ", p] - fun p_typ (n, T) = pretty_eq n (Syntax.pretty_typ ctxt T) - fun p_term (n, t) = pretty_eq n (Syntax.pretty_term ctxt t) - in - Old_SMT_Config.trace_msg ctxt (fn () => - Pretty.string_of (Pretty.big_list "Names:" [ - Pretty.big_list "sorts:" (map p_typ (Symtab.dest typs)), - Pretty.big_list "functions:" (map p_term (Symtab.dest terms))])) () - end - -in - -fun invoke name command ithms ctxt = - let - val options = Old_SMT_Config.solver_options_of ctxt - val comments = ("solver: " ^ name) :: - ("timeout: " ^ string_of_real (Config.get ctxt Old_SMT_Config.timeout)) :: - ("random seed: " ^ - string_of_int (Config.get ctxt Old_SMT_Config.random_seed)) :: - "arguments:" :: options - - val (str, recon as {context=ctxt', ...}) = - ithms - |> tap (trace_assms ctxt) - |> Old_SMT_Translate.translate ctxt comments - ||> tap trace_recon_data - in (run_solver ctxt' name (make_cmd command options) str, recon) end - -end - - -(* configuration *) - -datatype outcome = Unsat | Sat | Unknown - -type solver_config = { - name: string, - class: Proof.context -> Old_SMT_Utils.class, - avail: unit -> bool, - command: unit -> string list, - options: Proof.context -> string list, - default_max_relevant: int, - supports_filter: bool, - outcome: string -> string list -> outcome * string list, - cex_parser: (Proof.context -> Old_SMT_Translate.recon -> string list -> - term list * term list) option, - reconstruct: (Proof.context -> Old_SMT_Translate.recon -> string list -> - int list * thm) option } - - -(* registry *) - -type solver_info = { - command: unit -> string list, - default_max_relevant: int, - supports_filter: bool, - reconstruct: Proof.context -> string list * Old_SMT_Translate.recon -> - int list * thm } - -structure Solvers = Generic_Data -( - type T = solver_info Symtab.table - val empty = Symtab.empty - val extend = I - fun merge data = Symtab.merge (K true) data -) - -local - fun finish outcome cex_parser reconstruct ocl outer_ctxt - (output, (recon as {context=ctxt, ...} : Old_SMT_Translate.recon)) = - (case outcome output of - (Unsat, ls) => - if not (Config.get ctxt Old_SMT_Config.oracle) andalso is_some reconstruct - then the reconstruct outer_ctxt recon ls - else ([], ocl ()) - | (result, ls) => - let - val (ts, us) = - (case cex_parser of SOME f => f ctxt recon ls | _ => ([], [])) - in - raise Old_SMT_Failure.SMT (Old_SMT_Failure.Counterexample { - is_real_cex = (result = Sat), - free_constraints = ts, - const_defs = us}) - end) - - val cfalse = Thm.cterm_of @{context} (@{const Trueprop} $ @{const False}) -in - -fun add_solver cfg = - let - val {name, class, avail, command, options, default_max_relevant, - supports_filter, outcome, cex_parser, reconstruct} = cfg - - fun core_oracle () = cfalse - - fun solver ocl = { - command = command, - default_max_relevant = default_max_relevant, - supports_filter = supports_filter, - reconstruct = finish (outcome name) cex_parser reconstruct ocl } - - val info = {name=name, class=class, avail=avail, options=options} - in - Thm.add_oracle (Binding.name name, core_oracle) #-> (fn (_, ocl) => - Context.theory_map (Solvers.map (Symtab.update_new (name, solver ocl)))) #> - Context.theory_map (Old_SMT_Config.add_solver info) - end - -end - -fun get_info ctxt name = - the (Symtab.lookup (Solvers.get (Context.Proof ctxt)) name) - -val solver_name_of = Old_SMT_Config.solver_of - -val available_solvers_of = Old_SMT_Config.available_solvers_of - -fun name_and_info_of ctxt = - let val name = solver_name_of ctxt - in (name, get_info ctxt name) end - -fun gen_preprocess ctxt iwthms = Old_SMT_Normalize.normalize iwthms ctxt - -fun gen_apply (ithms, ctxt) = - let val (name, {command, reconstruct, ...}) = name_and_info_of ctxt - in - (ithms, ctxt) - |-> invoke name command - |> reconstruct ctxt - |>> distinct (op =) - end - -fun apply_solver ctxt = gen_apply o gen_preprocess ctxt - -val default_max_relevant = #default_max_relevant oo get_info - -val supports_filter = #supports_filter o snd o name_and_info_of - - -(* check well-sortedness *) - -val has_topsort = Term.exists_type (Term.exists_subtype (fn - TFree (_, []) => true - | TVar (_, []) => true - | _ => false)) - -(* without this test, we would run into problems when atomizing the rules: *) -fun check_topsort ctxt thm = - if has_topsort (Thm.prop_of thm) then - (Old_SMT_Normalize.drop_fact_warning ctxt thm; TrueI) - else - thm - -fun check_topsorts ctxt iwthms = map (apsnd (apsnd (check_topsort ctxt))) iwthms - - -(* filter *) - -val cnot = Thm.cterm_of @{context} @{const Not} - -fun mk_result outcome xrules = { outcome = outcome, used_facts = xrules } - -type 'a smt_filter_data = ('a * thm) list * ((int * thm) list * Proof.context) - -fun smt_filter_preprocess ctxt facts goal xwthms i = - let - val ctxt = - ctxt - |> Config.put Old_SMT_Config.oracle false - |> Config.put Old_SMT_Config.filter_only_facts true - - val ({context=ctxt', prems, concl, ...}, _) = Subgoal.focus ctxt i NONE goal - fun negate ct = Thm.dest_comb ct ||> Thm.apply cnot |-> Thm.apply - val cprop = - (case try negate (Thm.rhs_of (Old_SMT_Normalize.atomize_conv ctxt' concl)) of - SOME ct => ct - | NONE => raise Old_SMT_Failure.SMT (Old_SMT_Failure.Other_Failure ( - "goal is not a HOL term"))) - in - map snd xwthms - |> map_index I - |> append (map (pair ~1 o pair NONE) (Thm.assume cprop :: prems @ facts)) - |> check_topsorts ctxt' - |> gen_preprocess ctxt' - |> pair (map (apsnd snd) xwthms) - end - -fun smt_filter_apply time_limit (xthms, (ithms, ctxt)) = - let - val ctxt' = - ctxt - |> Config.put Old_SMT_Config.timeout (Time.toReal time_limit) - - fun filter_thms false = K xthms - | filter_thms true = map_filter (try (nth xthms)) o fst - in - (ithms, ctxt') - |> gen_apply - |> filter_thms (supports_filter ctxt') - |> mk_result NONE - end - handle Old_SMT_Failure.SMT fail => mk_result (SOME fail) [] - - -(* SMT tactic *) - -local - fun trace_assumptions ctxt iwthms idxs = - let - val wthms = - idxs - |> filter (fn i => i >= 0) - |> map_filter (AList.lookup (op =) iwthms) - in - if Config.get ctxt Old_SMT_Config.trace_used_facts andalso length wthms > 0 - then - tracing (Pretty.string_of (Pretty.big_list "SMT used facts:" - (map (Thm.pretty_thm ctxt o snd) wthms))) - else () - end - - fun solve ctxt iwthms = - iwthms - |> check_topsorts ctxt - |> apply_solver ctxt - |>> trace_assumptions ctxt iwthms - |> snd - - fun str_of ctxt fail = - Old_SMT_Failure.string_of_failure ctxt fail - |> prefix ("Solver " ^ Old_SMT_Config.solver_of ctxt ^ ": ") - - fun safe_solve ctxt iwthms = SOME (solve ctxt iwthms) - handle - Old_SMT_Failure.SMT (fail as Old_SMT_Failure.Counterexample _) => - (Old_SMT_Config.verbose_msg ctxt (str_of ctxt) fail; NONE) - | Old_SMT_Failure.SMT (fail as Old_SMT_Failure.Time_Out) => - error ("SMT: Solver " ^ quote (Old_SMT_Config.solver_of ctxt) ^ ": " ^ - Old_SMT_Failure.string_of_failure ctxt fail ^ " (setting the " ^ - "configuration option " ^ quote (Config.name_of Old_SMT_Config.timeout) ^ " might help)") - | Old_SMT_Failure.SMT fail => error (str_of ctxt fail) - - fun tag_rules thms = map_index (apsnd (pair NONE)) thms - fun tag_prems thms = map (pair ~1 o pair NONE) thms - - fun resolve ctxt (SOME thm) = resolve_tac ctxt [thm] 1 - | resolve _ NONE = no_tac - - fun tac prove ctxt rules = - CONVERSION (Old_SMT_Normalize.atomize_conv ctxt) - THEN' resolve_tac ctxt @{thms ccontr} - THEN' SUBPROOF (fn {context = ctxt', prems, ...} => - resolve ctxt' (prove ctxt' (tag_rules rules @ tag_prems prems))) ctxt -in - -val smt_tac = tac safe_solve -val smt_tac' = tac (SOME oo solve) - -end - -end diff --git a/src/test-gen/Old_SMT/old_smt_translate.ML b/src/test-gen/Old_SMT/old_smt_translate.ML deleted file mode 100644 index ab4a2a2..0000000 --- a/src/test-gen/Old_SMT/old_smt_translate.ML +++ /dev/null @@ -1,589 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_smt_translate.ML - Author: Sascha Boehme, TU Muenchen - -Translate theorems into an SMT intermediate format and serialize them. -*) - -signature OLD_SMT_TRANSLATE = -sig - (*intermediate term structure*) - datatype squant = SForall | SExists - datatype 'a spattern = SPat of 'a list | SNoPat of 'a list - datatype sterm = - SVar of int | - SApp of string * sterm list | - SLet of string * sterm * sterm | - SQua of squant * string list * sterm spattern list * int option * sterm - - (*translation configuration*) - type prefixes = {sort_prefix: string, func_prefix: string} - type sign = { - header: string list, - sorts: string list, - dtyps: (string * (string * (string * string) list) list) list list, - funcs: (string * (string list * string)) list } - type config = { - prefixes: prefixes, - header: term list -> string list, - is_fol: bool, - has_datatypes: bool, - serialize: string list -> sign -> sterm list -> string } - type recon = { - context: Proof.context, - typs: typ Symtab.table, - terms: term Symtab.table, - rewrite_rules: thm list, - assms: (int * thm) list } - - (*translation*) - val add_config: Old_SMT_Utils.class * (Proof.context -> config) -> - Context.generic -> Context.generic - val translate: Proof.context -> string list -> (int * thm) list -> - string * recon -end - -structure Old_SMT_Translate: OLD_SMT_TRANSLATE = -struct - - -(* intermediate term structure *) - -datatype squant = SForall | SExists - -datatype 'a spattern = SPat of 'a list | SNoPat of 'a list - -datatype sterm = - SVar of int | - SApp of string * sterm list | - SLet of string * sterm * sterm | - SQua of squant * string list * sterm spattern list * int option * sterm - - - -(* translation configuration *) - -type prefixes = {sort_prefix: string, func_prefix: string} - -type sign = { - header: string list, - sorts: string list, - dtyps: (string * (string * (string * string) list) list) list list, - funcs: (string * (string list * string)) list } - -type config = { - prefixes: prefixes, - header: term list -> string list, - is_fol: bool, - has_datatypes: bool, - serialize: string list -> sign -> sterm list -> string } - -type recon = { - context: Proof.context, - typs: typ Symtab.table, - terms: term Symtab.table, - rewrite_rules: thm list, - assms: (int * thm) list } - - - -(* translation context *) - -fun make_tr_context {sort_prefix, func_prefix} = - (sort_prefix, 1, Typtab.empty, func_prefix, 1, Termtab.empty) - -fun string_of_index pre i = pre ^ string_of_int i - -fun add_typ T proper (cx as (sp, Tidx, typs, fp, idx, terms)) = - (case Typtab.lookup typs T of - SOME (n, _) => (n, cx) - | NONE => - let - val n = string_of_index sp Tidx - val typs' = Typtab.update (T, (n, proper)) typs - in (n, (sp, Tidx+1, typs', fp, idx, terms)) end) - -fun add_fun t sort (cx as (sp, Tidx, typs, fp, idx, terms)) = - (case Termtab.lookup terms t of - SOME (n, _) => (n, cx) - | NONE => - let - val n = string_of_index fp idx - val terms' = Termtab.update (t, (n, sort)) terms - in (n, (sp, Tidx, typs, fp, idx+1, terms')) end) - -fun sign_of header dtyps (_, _, typs, _, _, terms) = { - header = header, - sorts = Typtab.fold (fn (_, (n, true)) => cons n | _ => I) typs [], - dtyps = dtyps, - funcs = Termtab.fold (fn (_, (n, SOME ss)) => cons (n,ss) | _ => I) terms []} - -fun recon_of ctxt rules thms ithms (_, _, typs, _, _, terms) = - let - fun add_typ (T, (n, _)) = Symtab.update (n, T) - val typs' = Typtab.fold add_typ typs Symtab.empty - - fun add_fun (t, (n, _)) = Symtab.update (n, t) - val terms' = Termtab.fold add_fun terms Symtab.empty - - val assms = map (pair ~1) thms @ ithms - in - {context=ctxt, typs=typs', terms=terms', rewrite_rules=rules, assms=assms} - end - - - -(* preprocessing *) - -(** datatype declarations **) - -fun collect_datatypes_and_records (tr_context, ctxt) ts = - let - val (declss, ctxt') = - fold (Term.fold_types Old_SMT_Datatypes.add_decls) ts ([], ctxt) - - fun is_decl_typ T = exists (exists (equal T o fst)) declss - - fun add_typ' T proper = - (case Old_SMT_Builtin.dest_builtin_typ ctxt' T of - SOME n => pair n - | NONE => add_typ T proper) - - fun tr_select sel = - let val T = Term.range_type (Term.fastype_of sel) - in add_fun sel NONE ##>> add_typ' T (not (is_decl_typ T)) end - fun tr_constr (constr, selects) = - add_fun constr NONE ##>> fold_map tr_select selects - fun tr_typ (T, cases) = add_typ' T false ##>> fold_map tr_constr cases - val (declss', tr_context') = fold_map (fold_map tr_typ) declss tr_context - - fun add (constr, selects) = - Termtab.update (constr, length selects) #> - fold (Termtab.update o rpair 1) selects - val funcs = fold (fold (fold add o snd)) declss Termtab.empty - in ((funcs, declss', tr_context', ctxt'), ts) end - (* FIXME: also return necessary datatype and record theorems *) - - -(** eta-expand quantifiers, let expressions and built-ins *) - -local - fun eta f T t = Abs (Name.uu, T, f (Term.incr_boundvars 1 t $ Bound 0)) - - fun exp f T = eta f (Term.domain_type (Term.domain_type T)) - - fun exp2 T q = - let val U = Term.domain_type T - in Abs (Name.uu, U, q $ eta I (Term.domain_type U) (Bound 0)) end - - fun exp2' T l = - let val (U1, U2) = Term.dest_funT T ||> Term.domain_type - in Abs (Name.uu, U1, eta I U2 (l $ Bound 0)) end - - fun expf k i T t = - let val Ts = drop i (fst (Old_SMT_Utils.dest_funT k T)) - in - Term.incr_boundvars (length Ts) t - |> fold_rev (fn i => fn u => u $ Bound i) (0 upto length Ts - 1) - |> fold_rev (fn T => fn u => Abs (Name.uu, T, u)) Ts - end -in - -fun eta_expand ctxt is_fol funcs = - let - fun exp_func t T ts = - (case Termtab.lookup funcs t of - SOME k => - Term.list_comb (t, ts) - |> k <> length ts ? expf k (length ts) T - | NONE => Term.list_comb (t, ts)) - - fun expand ((q as Const (@{const_name All}, _)) $ Abs a) = q $ abs_expand a - | expand ((q as Const (@{const_name All}, T)) $ t) = q $ exp expand T t - | expand (q as Const (@{const_name All}, T)) = exp2 T q - | expand ((q as Const (@{const_name Ex}, _)) $ Abs a) = q $ abs_expand a - | expand ((q as Const (@{const_name Ex}, T)) $ t) = q $ exp expand T t - | expand (q as Const (@{const_name Ex}, T)) = exp2 T q - | expand ((l as Const (@{const_name Let}, _)) $ t $ Abs a) = - if is_fol then expand (Term.betapply (Abs a, t)) - else l $ expand t $ abs_expand a - | expand ((l as Const (@{const_name Let}, T)) $ t $ u) = - if is_fol then expand (u $ t) - else l $ expand t $ exp expand (Term.range_type T) u - | expand ((l as Const (@{const_name Let}, T)) $ t) = - if is_fol then - let val U = Term.domain_type (Term.range_type T) - in Abs (Name.uu, U, Bound 0 $ Term.incr_boundvars 1 t) end - else exp2 T (l $ expand t) - | expand (l as Const (@{const_name Let}, T)) = - if is_fol then - let val U = Term.domain_type (Term.range_type T) - in - Abs (Name.uu, Term.domain_type T, Abs (Name.uu, U, - Bound 0 $ Bound 1)) - end - else exp2' T l - | expand t = - (case Term.strip_comb t of - (u as Const (c as (_, T)), ts) => - (case Old_SMT_Builtin.dest_builtin ctxt c ts of - SOME (_, k, us, mk) => - if k = length us then mk (map expand us) - else if k < length us then - chop k (map expand us) |>> mk |> Term.list_comb - else expf k (length ts) T (mk (map expand us)) - | NONE => exp_func u T (map expand ts)) - | (u as Free (_, T), ts) => exp_func u T (map expand ts) - | (Abs a, ts) => Term.list_comb (abs_expand a, map expand ts) - | (u, ts) => Term.list_comb (u, map expand ts)) - - and abs_expand (n, T, t) = Abs (n, T, expand t) - - in map expand end - -end - - -(** introduce explicit applications **) - -local - (* - Make application explicit for functions with varying number of arguments. - *) - - fun add t i = apfst (Termtab.map_default (t, i) (Integer.min i)) - fun add_type T = apsnd (Typtab.update (T, ())) - - fun min_arities t = - (case Term.strip_comb t of - (u as Const _, ts) => add u (length ts) #> fold min_arities ts - | (u as Free _, ts) => add u (length ts) #> fold min_arities ts - | (Abs (_, T, u), ts) => add_type T #> min_arities u #> fold min_arities ts - | (_, ts) => fold min_arities ts) - - fun minimize types t i = - let - fun find_min j [] _ = j - | find_min j (U :: Us) T = - if Typtab.defined types T then j - else find_min (j + 1) Us (U --> T) - - val (Ts, T) = Term.strip_type (Term.type_of t) - in find_min 0 (take i (rev Ts)) T end - - fun app u (t, T) = - (Const (@{const_name fun_app}, T --> T) $ t $ u, Term.range_type T) - - fun apply i t T ts = - let - val (ts1, ts2) = chop i ts - val (_, U) = Old_SMT_Utils.dest_funT i T - in fst (fold app ts2 (Term.list_comb (t, ts1), U)) end -in - -fun intro_explicit_application ctxt funcs ts = - let - val (arities, types) = fold min_arities ts (Termtab.empty, Typtab.empty) - val arities' = Termtab.map (minimize types) arities - - fun app_func t T ts = - if is_some (Termtab.lookup funcs t) then Term.list_comb (t, ts) - else apply (the (Termtab.lookup arities' t)) t T ts - - fun in_list T f t = HOLogic.mk_list T (map f (HOLogic.dest_list t)) - - fun traverse Ts t = - (case Term.strip_comb t of - (q as Const (@{const_name All}, _), [Abs (x, T, u)]) => - q $ Abs (x, T, in_trigger (T :: Ts) u) - | (q as Const (@{const_name Ex}, _), [Abs (x, T, u)]) => - q $ Abs (x, T, in_trigger (T :: Ts) u) - | (q as Const (@{const_name Let}, _), [u1, u2 as Abs _]) => - q $ traverse Ts u1 $ traverse Ts u2 - | (u as Const (c as (_, T)), ts) => - (case Old_SMT_Builtin.dest_builtin ctxt c ts of - SOME (_, k, us, mk) => - let - val (ts1, ts2) = chop k (map (traverse Ts) us) - val U = Term.strip_type T |>> snd o chop k |> (op --->) - in apply 0 (mk ts1) U ts2 end - | NONE => app_func u T (map (traverse Ts) ts)) - | (u as Free (_, T), ts) => app_func u T (map (traverse Ts) ts) - | (u as Bound i, ts) => apply 0 u (nth Ts i) (map (traverse Ts) ts) - | (Abs (n, T, u), ts) => traverses Ts (Abs (n, T, traverse (T::Ts) u)) ts - | (u, ts) => traverses Ts u ts) - and in_trigger Ts ((c as @{const trigger}) $ p $ t) = - c $ in_pats Ts p $ in_weight Ts t - | in_trigger Ts t = in_weight Ts t - and in_pats Ts ps = - in_list @{typ "pattern list"} - (in_list @{typ pattern} (in_pat Ts)) ps - and in_pat Ts ((p as Const (@{const_name pat}, _)) $ t) = - p $ traverse Ts t - | in_pat Ts ((p as Const (@{const_name nopat}, _)) $ t) = - p $ traverse Ts t - | in_pat _ t = raise TERM ("bad pattern", [t]) - and in_weight Ts ((c as @{const weight}) $ w $ t) = - c $ w $ traverse Ts t - | in_weight Ts t = traverse Ts t - and traverses Ts t ts = Term.list_comb (t, map (traverse Ts) ts) - in map (traverse []) ts end - -val fun_app_eq = mk_meta_eq @{thm fun_app_def} - -end - - -(** map HOL formulas to FOL formulas (i.e., separate formulas froms terms) **) - -local - val term_bool = @{lemma "term_true ~= term_false" - by (simp add: term_true_def term_false_def)} - - val is_quant = member (op =) [@{const_name All}, @{const_name Ex}] - - val fol_rules = [ - Let_def, - mk_meta_eq @{thm term_true_def}, - mk_meta_eq @{thm term_false_def}, - @{lemma "P = True == P" by (rule eq_reflection) simp}, - @{lemma "if P then True else False == P" by (rule eq_reflection) simp}] - - fun as_term t = @{const HOL.eq (bool)} $ t $ @{const term_true} - - exception BAD_PATTERN of unit - - fun wrap_in_if pat t = - if pat then - raise BAD_PATTERN () - else - @{const If (bool)} $ t $ @{const term_true} $ @{const term_false} - - fun is_builtin_conn_or_pred ctxt c ts = - is_some (Old_SMT_Builtin.dest_builtin_conn ctxt c ts) orelse - is_some (Old_SMT_Builtin.dest_builtin_pred ctxt c ts) - - fun builtin b ctxt c ts = - (case (Const c, ts) of - (@{const HOL.eq (bool)}, [t, u]) => - if t = @{const term_true} orelse u = @{const term_true} then - Old_SMT_Builtin.dest_builtin_eq ctxt t u - else b ctxt c ts - | _ => b ctxt c ts) -in - -fun folify ctxt = - let - fun in_list T f t = HOLogic.mk_list T (map_filter f (HOLogic.dest_list t)) - - fun in_term pat t = - (case Term.strip_comb t of - (@{const True}, []) => @{const term_true} - | (@{const False}, []) => @{const term_false} - | (u as Const (@{const_name If}, _), [t1, t2, t3]) => - if pat then raise BAD_PATTERN () - else u $ in_form t1 $ in_term pat t2 $ in_term pat t3 - | (Const (c as (n, _)), ts) => - if is_builtin_conn_or_pred ctxt c ts then wrap_in_if pat (in_form t) - else if is_quant n then wrap_in_if pat (in_form t) - else Term.list_comb (Const c, map (in_term pat) ts) - | (Free c, ts) => Term.list_comb (Free c, map (in_term pat) ts) - | _ => t) - - and in_weight ((c as @{const weight}) $ w $ t) = c $ w $ in_form t - | in_weight t = in_form t - - and in_pat ((p as Const (@{const_name pat}, _)) $ t) = - p $ in_term true t - | in_pat ((p as Const (@{const_name nopat}, _)) $ t) = - p $ in_term true t - | in_pat t = raise TERM ("bad pattern", [t]) - - and in_pats ps = - in_list @{typ "pattern list"} - (SOME o in_list @{typ pattern} (try in_pat)) ps - - and in_trigger ((c as @{const trigger}) $ p $ t) = - c $ in_pats p $ in_weight t - | in_trigger t = in_weight t - - and in_form t = - (case Term.strip_comb t of - (q as Const (qn, _), [Abs (n, T, u)]) => - if is_quant qn then q $ Abs (n, T, in_trigger u) - else as_term (in_term false t) - | (Const c, ts) => - (case Old_SMT_Builtin.dest_builtin_conn ctxt c ts of - SOME (_, _, us, mk) => mk (map in_form us) - | NONE => - (case Old_SMT_Builtin.dest_builtin_pred ctxt c ts of - SOME (_, _, us, mk) => mk (map (in_term false) us) - | NONE => as_term (in_term false t))) - | _ => as_term (in_term false t)) - in - map in_form #> - cons (Old_SMT_Utils.prop_of term_bool) #> - pair (fol_rules, [term_bool], builtin) - end - -end - - -(* translation into intermediate format *) - -(** utility functions **) - -val quantifier = (fn - @{const_name All} => SOME SForall - | @{const_name Ex} => SOME SExists - | _ => NONE) - -fun group_quant qname Ts (t as Const (q, _) $ Abs (_, T, u)) = - if q = qname then group_quant qname (T :: Ts) u else (Ts, t) - | group_quant _ Ts t = (Ts, t) - -fun dest_weight (@{const weight} $ w $ t) = - (SOME (snd (HOLogic.dest_number w)), t) - | dest_weight t = (NONE, t) - -fun dest_pat (Const (@{const_name pat}, _) $ t) = (t, true) - | dest_pat (Const (@{const_name nopat}, _) $ t) = (t, false) - | dest_pat t = raise TERM ("bad pattern", [t]) - -fun dest_pats [] = I - | dest_pats ts = - (case map dest_pat ts |> split_list ||> distinct (op =) of - (ps, [true]) => cons (SPat ps) - | (ps, [false]) => cons (SNoPat ps) - | _ => raise TERM ("bad multi-pattern", ts)) - -fun dest_trigger (@{const trigger} $ tl $ t) = - (rev (fold (dest_pats o HOLogic.dest_list) (HOLogic.dest_list tl) []), t) - | dest_trigger t = ([], t) - -fun dest_quant qn T t = quantifier qn |> Option.map (fn q => - let - val (Ts, u) = group_quant qn [T] t - val (ps, p) = dest_trigger u - val (w, b) = dest_weight p - in (q, rev Ts, ps, w, b) end) - -fun fold_map_pat f (SPat ts) = fold_map f ts #>> SPat - | fold_map_pat f (SNoPat ts) = fold_map f ts #>> SNoPat - - -(** translation from Isabelle terms into SMT intermediate terms **) - -fun intermediate header dtyps builtin ctxt ts trx = - let - fun transT (T as TFree _) = add_typ T true - | transT (T as TVar _) = (fn _ => raise TYPE ("bad SMT type", [T], [])) - | transT (T as Type _) = - (case Old_SMT_Builtin.dest_builtin_typ ctxt T of - SOME n => pair n - | NONE => add_typ T true) - - fun app n ts = SApp (n, ts) - - fun trans t = - (case Term.strip_comb t of - (Const (qn, _), [Abs (_, T, t1)]) => - (case dest_quant qn T t1 of - SOME (q, Ts, ps, w, b) => - fold_map transT Ts ##>> fold_map (fold_map_pat trans) ps ##>> - trans b #>> (fn ((Ts', ps'), b') => SQua (q, Ts', ps', w, b')) - | NONE => raise TERM ("unsupported quantifier", [t])) - | (Const (@{const_name Let}, _), [t1, Abs (_, T, t2)]) => - transT T ##>> trans t1 ##>> trans t2 #>> - (fn ((U, u1), u2) => SLet (U, u1, u2)) - | (u as Const (c as (_, T)), ts) => - (case builtin ctxt c ts of - SOME (n, _, us, _) => fold_map trans us #>> app n - | NONE => transs u T ts) - | (u as Free (_, T), ts) => transs u T ts - | (Bound i, []) => pair (SVar i) - | _ => raise TERM ("bad SMT term", [t])) - - and transs t T ts = - let val (Us, U) = Old_SMT_Utils.dest_funT (length ts) T - in - fold_map transT Us ##>> transT U #-> (fn Up => - add_fun t (SOME Up) ##>> fold_map trans ts #>> SApp) - end - - val (us, trx') = fold_map trans ts trx - in ((sign_of (header ts) dtyps trx', us), trx') end - - - -(* translation *) - -structure Configs = Generic_Data -( - type T = (Proof.context -> config) Old_SMT_Utils.dict - val empty = [] - val extend = I - fun merge data = Old_SMT_Utils.dict_merge fst data -) - -fun add_config (cs, cfg) = Configs.map (Old_SMT_Utils.dict_update (cs, cfg)) - -fun get_config ctxt = - let val cs = Old_SMT_Config.solver_class_of ctxt - in - (case Old_SMT_Utils.dict_get (Configs.get (Context.Proof ctxt)) cs of - SOME cfg => cfg ctxt - | NONE => error ("SMT: no translation configuration found " ^ - "for solver class " ^ quote (Old_SMT_Utils.string_of_class cs))) - end - -fun translate ctxt comments ithms = - let - val {prefixes, is_fol, header, has_datatypes, serialize} = get_config ctxt - - val with_datatypes = - has_datatypes andalso Config.get ctxt Old_SMT_Config.datatypes - - fun no_dtyps (tr_context, ctxt) ts = - ((Termtab.empty, [], tr_context, ctxt), ts) - - val ts1 = map (Envir.beta_eta_contract o Old_SMT_Utils.prop_of o snd) ithms - - val ((funcs, dtyps, tr_context, ctxt1), ts2) = - ((make_tr_context prefixes, ctxt), ts1) - |-> (if with_datatypes then collect_datatypes_and_records else no_dtyps) - - fun is_binder (Const (@{const_name Let}, _) $ _) = true - | is_binder t = Lambda_Lifting.is_quantifier t - - fun mk_trigger ((q as Const (@{const_name All}, _)) $ Abs (n, T, t)) = - q $ Abs (n, T, mk_trigger t) - | mk_trigger (eq as (Const (@{const_name HOL.eq}, T) $ lhs $ _)) = - Term.domain_type T --> @{typ pattern} - |> (fn T => Const (@{const_name pat}, T) $ lhs) - |> HOLogic.mk_list @{typ pattern} o single - |> HOLogic.mk_list @{typ "pattern list"} o single - |> (fn t => @{const trigger} $ t $ eq) - | mk_trigger t = t - - val (ctxt2, ts3) = - ts2 - |> eta_expand ctxt1 is_fol funcs - |> rpair ctxt1 - |-> Lambda_Lifting.lift_lambdas NONE is_binder - |-> (fn (ts', defs) => fn ctxt' => - map mk_trigger defs @ ts' - |> intro_explicit_application ctxt' funcs - |> pair ctxt') - - val ((rewrite_rules, extra_thms, builtin), ts4) = - (if is_fol then folify ctxt2 else pair ([], [], I)) ts3 - - val rewrite_rules' = fun_app_eq :: rewrite_rules - in - (ts4, tr_context) - |-> intermediate header dtyps (builtin Old_SMT_Builtin.dest_builtin) ctxt2 - |>> uncurry (serialize comments) - ||> recon_of ctxt2 rewrite_rules' extra_thms ithms - end - -end diff --git a/src/test-gen/Old_SMT/old_smt_utils.ML b/src/test-gen/Old_SMT/old_smt_utils.ML deleted file mode 100644 index 8603f1a..0000000 --- a/src/test-gen/Old_SMT/old_smt_utils.ML +++ /dev/null @@ -1,221 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_smt_utils.ML - Author: Sascha Boehme, TU Muenchen - -General utility functions. -*) - -signature OLD_SMT_UTILS = -sig - (*basic combinators*) - val repeat: ('a -> 'a option) -> 'a -> 'a - val repeat_yield: ('a -> 'b -> ('a * 'b) option) -> 'a -> 'b -> 'a * 'b - - (*class dictionaries*) - type class = string list - val basicC: class - val string_of_class: class -> string - type 'a dict = (class * 'a) Ord_List.T - val dict_map_default: class * 'a -> ('a -> 'a) -> 'a dict -> 'a dict - val dict_update: class * 'a -> 'a dict -> 'a dict - val dict_merge: ('a * 'a -> 'a) -> 'a dict * 'a dict -> 'a dict - val dict_lookup: 'a dict -> class -> 'a list - val dict_get: 'a dict -> class -> 'a option - - (*types*) - val dest_funT: int -> typ -> typ list * typ - - (*terms*) - val dest_conj: term -> term * term - val dest_disj: term -> term * term - val under_quant: (term -> 'a) -> term -> 'a - val is_number: term -> bool - - (*patterns and instantiations*) - val mk_const_pat: theory -> string -> (ctyp -> 'a) -> 'a * cterm - val destT1: ctyp -> ctyp - val destT2: ctyp -> ctyp - val instTs: ctyp list -> ctyp list * cterm -> cterm - val instT: ctyp -> ctyp * cterm -> cterm - val instT': cterm -> ctyp * cterm -> cterm - - (*certified terms*) - val dest_cabs: cterm -> Proof.context -> cterm * Proof.context - val dest_all_cabs: cterm -> Proof.context -> cterm * Proof.context - val dest_cbinder: cterm -> Proof.context -> cterm * Proof.context - val dest_all_cbinders: cterm -> Proof.context -> cterm * Proof.context - val mk_cprop: cterm -> cterm - val dest_cprop: cterm -> cterm - val mk_cequals: cterm -> cterm -> cterm - val term_of: cterm -> term - val prop_of: thm -> term - - (*conversions*) - val if_conv: (term -> bool) -> conv -> conv -> conv - val if_true_conv: (term -> bool) -> conv -> conv - val if_exists_conv: (term -> bool) -> conv -> conv - val binders_conv: (Proof.context -> conv) -> Proof.context -> conv - val under_quant_conv: (Proof.context * cterm list -> conv) -> - Proof.context -> conv - val prop_conv: conv -> conv -end - -structure Old_SMT_Utils: OLD_SMT_UTILS = -struct - -(* basic combinators *) - -fun repeat f = - let fun rep x = (case f x of SOME y => rep y | NONE => x) - in rep end - -fun repeat_yield f = - let fun rep x y = (case f x y of SOME (x', y') => rep x' y' | NONE => (x, y)) - in rep end - - -(* class dictionaries *) - -type class = string list - -val basicC = [] - -fun string_of_class [] = "basic" - | string_of_class cs = "basic." ^ space_implode "." cs - -type 'a dict = (class * 'a) Ord_List.T - -fun class_ord ((cs1, _), (cs2, _)) = - rev_order (list_ord fast_string_ord (cs1, cs2)) - -fun dict_insert (cs, x) d = - if AList.defined (op =) d cs then d - else Ord_List.insert class_ord (cs, x) d - -fun dict_map_default (cs, x) f = - dict_insert (cs, x) #> AList.map_entry (op =) cs f - -fun dict_update (e as (_, x)) = dict_map_default e (K x) - -fun dict_merge val_merge = sort class_ord o AList.join (op =) (K val_merge) - -fun dict_lookup d cs = - let fun match (cs', x) = if is_prefix (op =) cs' cs then SOME x else NONE - in map_filter match d end - -fun dict_get d cs = - (case AList.lookup (op =) d cs of - NONE => (case cs of [] => NONE | _ => dict_get d (take (length cs - 1) cs)) - | SOME x => SOME x) - - -(* types *) - -val dest_funT = - let - fun dest Ts 0 T = (rev Ts, T) - | dest Ts i (Type ("fun", [T, U])) = dest (T::Ts) (i-1) U - | dest _ _ T = raise TYPE ("not a function type", [T], []) - in dest [] end - - -(* terms *) - -fun dest_conj (@{const HOL.conj} $ t $ u) = (t, u) - | dest_conj t = raise TERM ("not a conjunction", [t]) - -fun dest_disj (@{const HOL.disj} $ t $ u) = (t, u) - | dest_disj t = raise TERM ("not a disjunction", [t]) - -fun under_quant f t = - (case t of - Const (@{const_name All}, _) $ Abs (_, _, u) => under_quant f u - | Const (@{const_name Ex}, _) $ Abs (_, _, u) => under_quant f u - | _ => f t) - -val is_number = - let - fun is_num env (Const (@{const_name If}, _) $ _ $ t $ u) = - is_num env t andalso is_num env u - | is_num env (Const (@{const_name Let}, _) $ t $ Abs (_, _, u)) = - is_num (t :: env) u - | is_num env (Bound i) = i < length env andalso is_num env (nth env i) - | is_num _ t = can HOLogic.dest_number t - in is_num [] end - - -(* patterns and instantiations *) - -fun mk_const_pat thy name destT = - let val cpat = Thm.global_cterm_of thy (Const (name, Sign.the_const_type thy name)) - in (destT (Thm.ctyp_of_cterm cpat), cpat) end - -val destT1 = hd o Thm.dest_ctyp -val destT2 = hd o tl o Thm.dest_ctyp - -fun instTs cUs (cTs, ct) = Thm.instantiate_cterm (map (dest_TVar o Thm.typ_of) cTs ~~ cUs, []) ct -fun instT cU (cT, ct) = instTs [cU] ([cT], ct) -fun instT' ct = instT (Thm.ctyp_of_cterm ct) - - -(* certified terms *) - -fun dest_cabs ct ctxt = - (case Thm.term_of ct of - Abs _ => - let val (n, ctxt') = yield_singleton Variable.variant_fixes Name.uu ctxt - in (snd (Thm.dest_abs (SOME n) ct), ctxt') end - | _ => raise CTERM ("no abstraction", [ct])) - -val dest_all_cabs = repeat_yield (try o dest_cabs) - -fun dest_cbinder ct ctxt = - (case Thm.term_of ct of - Const _ $ Abs _ => dest_cabs (Thm.dest_arg ct) ctxt - | _ => raise CTERM ("not a binder", [ct])) - -val dest_all_cbinders = repeat_yield (try o dest_cbinder) - -val mk_cprop = Thm.apply (Thm.cterm_of @{context} @{const Trueprop}) - -fun dest_cprop ct = - (case Thm.term_of ct of - @{const Trueprop} $ _ => Thm.dest_arg ct - | _ => raise CTERM ("not a property", [ct])) - -val equals = mk_const_pat @{theory} @{const_name Pure.eq} destT1 -fun mk_cequals ct cu = Thm.mk_binop (instT' ct equals) ct cu - -val dest_prop = (fn @{const Trueprop} $ t => t | t => t) -fun term_of ct = dest_prop (Thm.term_of ct) -fun prop_of thm = dest_prop (Thm.prop_of thm) - - -(* conversions *) - -fun if_conv pred cv1 cv2 ct = if pred (Thm.term_of ct) then cv1 ct else cv2 ct - -fun if_true_conv pred cv = if_conv pred cv Conv.all_conv - -fun if_exists_conv pred = if_true_conv (Term.exists_subterm pred) - -fun binders_conv cv ctxt = - Conv.binder_conv (binders_conv cv o snd) ctxt else_conv cv ctxt - -fun under_quant_conv cv ctxt = - let - fun quant_conv inside ctxt cvs ct = - (case Thm.term_of ct of - Const (@{const_name All}, _) $ Abs _ => - Conv.binder_conv (under_conv cvs) ctxt - | Const (@{const_name Ex}, _) $ Abs _ => - Conv.binder_conv (under_conv cvs) ctxt - | _ => if inside then cv (ctxt, cvs) else Conv.all_conv) ct - and under_conv cvs (cv, ctxt) = quant_conv true ctxt (cv :: cvs) - in quant_conv false ctxt [] end - -fun prop_conv cv ct = - (case Thm.term_of ct of - @{const Trueprop} $ _ => Conv.arg_conv cv ct - | _ => raise CTERM ("not a property", [ct])) - -end diff --git a/src/test-gen/Old_SMT/old_smt_word.ML b/src/test-gen/Old_SMT/old_smt_word.ML deleted file mode 100644 index 4303aba..0000000 --- a/src/test-gen/Old_SMT/old_smt_word.ML +++ /dev/null @@ -1,146 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_smt_word.ML - Author: Sascha Boehme, TU Muenchen - -SMT setup for words. -*) - -structure Old_SMT_Word: sig end = -struct - -open Word_Lib - -(* SMT-LIB logic *) - -fun smtlib_logic ts = - if exists (Term.exists_type (Term.exists_subtype is_wordT)) ts - then SOME "QF_AUFBV" - else NONE - - -(* SMT-LIB builtins *) - -local - val smtlibC = Old_SMTLIB_Interface.smtlibC - - val wordT = @{typ "'a::len word"} - - fun index1 n i = n ^ "[" ^ string_of_int i ^ "]" - fun index2 n i j = n ^ "[" ^ string_of_int i ^ ":" ^ string_of_int j ^ "]" - - fun word_typ (Type (@{type_name word}, [T])) = - Option.map (index1 "BitVec") (try dest_binT T) - | word_typ _ = NONE - - fun word_num (Type (@{type_name word}, [T])) i = - Option.map (index1 ("bv" ^ string_of_int i)) (try dest_binT T) - | word_num _ _ = NONE - - fun if_fixed pred m n T ts = - let val (Us, U) = Term.strip_type T - in - if pred (U, Us) then - SOME (n, length Us, ts, Term.list_comb o pair (Const (m, T))) - else NONE - end - - fun if_fixed_all m = if_fixed (forall (can dest_wordT) o (op ::)) m - fun if_fixed_args m = if_fixed (forall (can dest_wordT) o snd) m - - fun add_word_fun f (t, n) = - let val (m, _) = Term.dest_Const t - in Old_SMT_Builtin.add_builtin_fun smtlibC (Term.dest_Const t, K (f m n)) end - - fun hd2 xs = hd (tl xs) - - fun mk_nat i = @{const nat} $ HOLogic.mk_number @{typ nat} i - - fun dest_nat (@{const nat} $ n) = snd (HOLogic.dest_number n) - | dest_nat t = raise TERM ("not a natural number", [t]) - - fun mk_shift c [t, u] = Const c $ t $ mk_nat (snd (HOLogic.dest_number u)) - | mk_shift c ts = raise TERM ("bad arguments", Const c :: ts) - - fun shift m n T ts = - let val U = Term.domain_type T - in - (case (can dest_wordT U, try (dest_nat o hd2) ts) of - (true, SOME i) => - SOME (n, 2, [hd ts, HOLogic.mk_number U i], mk_shift (m, T)) - | _ => NONE) (* FIXME: also support non-numerical shifts *) - end - - fun mk_extract c i ts = Term.list_comb (Const c, mk_nat i :: ts) - - fun extract m n T ts = - let val U = Term.range_type (Term.range_type T) - in - (case (try (dest_nat o hd) ts, try dest_wordT U) of - (SOME lb, SOME i) => - SOME (index2 n (i + lb - 1) lb, 1, tl ts, mk_extract (m, T) lb) - | _ => NONE) - end - - fun mk_extend c ts = Term.list_comb (Const c, ts) - - fun extend m n T ts = - let val (U1, U2) = Term.dest_funT T - in - (case (try dest_wordT U1, try dest_wordT U2) of - (SOME i, SOME j) => - if j-i >= 0 then SOME (index1 n (j-i), 1, ts, mk_extend (m, T)) - else NONE - | _ => NONE) - end - - fun mk_rotate c i ts = Term.list_comb (Const c, mk_nat i :: ts) - - fun rotate m n T ts = - let val U = Term.domain_type (Term.range_type T) - in - (case (can dest_wordT U, try (dest_nat o hd) ts) of - (true, SOME i) => SOME (index1 n i, 1, tl ts, mk_rotate (m, T) i) - | _ => NONE) - end -in - -val setup_builtins = - Old_SMT_Builtin.add_builtin_typ smtlibC (wordT, word_typ, word_num) #> - fold (add_word_fun if_fixed_all) [ - (@{term "uminus :: 'a::len word => _"}, "bvneg"), - (@{term "plus :: 'a::len word => _"}, "bvadd"), - (@{term "minus :: 'a::len word => _"}, "bvsub"), - (@{term "times :: 'a::len word => _"}, "bvmul"), - (@{term "bitNOT :: 'a::len word => _"}, "bvnot"), - (@{term "bitAND :: 'a::len word => _"}, "bvand"), - (@{term "bitOR :: 'a::len word => _"}, "bvor"), - (@{term "bitXOR :: 'a::len word => _"}, "bvxor"), - (@{term "word_cat :: 'a::len word => _"}, "concat") ] #> - fold (add_word_fun shift) [ - (@{term "shiftl :: 'a::len word => _ "}, "bvshl"), - (@{term "shiftr :: 'a::len word => _"}, "bvlshr"), - (@{term "sshiftr :: 'a::len word => _"}, "bvashr") ] #> - add_word_fun extract - (@{term "slice :: _ => 'a::len word => _"}, "extract") #> - fold (add_word_fun extend) [ - (@{term "ucast :: 'a::len word => _"}, "zero_extend"), - (@{term "scast :: 'a::len word => _"}, "sign_extend") ] #> - fold (add_word_fun rotate) [ - (@{term word_rotl}, "rotate_left"), - (@{term word_rotr}, "rotate_right") ] #> - fold (add_word_fun if_fixed_args) [ - (@{term "less :: 'a::len word => _"}, "bvult"), - (@{term "less_eq :: 'a::len word => _"}, "bvule"), - (@{term word_sless}, "bvslt"), - (@{term word_sle}, "bvsle") ] - -end - - -(* setup *) - -val _ = - Theory.setup - (Context.theory_map - (Old_SMTLIB_Interface.add_logic (20, smtlib_logic) #> setup_builtins)) - -end diff --git a/src/test-gen/Old_SMT/old_smtlib_interface.ML b/src/test-gen/Old_SMT/old_smtlib_interface.ML deleted file mode 100644 index dc00faa..0000000 --- a/src/test-gen/Old_SMT/old_smtlib_interface.ML +++ /dev/null @@ -1,161 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_smtlib_interface.ML - Author: Sascha Boehme, TU Muenchen - -Interface to SMT solvers based on the SMT-LIB format. -*) - -signature OLD_SMTLIB_INTERFACE = -sig - val smtlibC: Old_SMT_Utils.class - val add_logic: int * (term list -> string option) -> Context.generic -> - Context.generic - val translate_config: Proof.context -> Old_SMT_Translate.config - val setup: theory -> theory -end - -structure Old_SMTLIB_Interface: OLD_SMTLIB_INTERFACE = -struct - - -val smtlibC = ["smtlib"] - - -(* builtins *) - -local - fun int_num _ i = SOME (string_of_int i) - - fun is_linear [t] = Old_SMT_Utils.is_number t - | is_linear [t, u] = Old_SMT_Utils.is_number t orelse Old_SMT_Utils.is_number u - | is_linear _ = false - - fun times _ _ ts = - let val mk = Term.list_comb o pair @{const times (int)} - in if is_linear ts then SOME ("*", 2, ts, mk) else NONE end -in - -val setup_builtins = - Old_SMT_Builtin.add_builtin_typ smtlibC (@{typ int}, K (SOME "Int"), int_num) #> - fold (Old_SMT_Builtin.add_builtin_fun' smtlibC) [ - (@{const True}, "true"), - (@{const False}, "false"), - (@{const Not}, "not"), - (@{const HOL.conj}, "and"), - (@{const HOL.disj}, "or"), - (@{const HOL.implies}, "implies"), - (@{const HOL.eq (bool)}, "iff"), - (@{const HOL.eq ('a)}, "="), - (@{const If (bool)}, "if_then_else"), - (@{const If ('a)}, "ite"), - (@{const less (int)}, "<"), - (@{const less_eq (int)}, "<="), - (@{const uminus (int)}, "~"), - (@{const plus (int)}, "+"), - (@{const minus (int)}, "-") ] #> - Old_SMT_Builtin.add_builtin_fun smtlibC - (Term.dest_Const @{const times (int)}, times) - -end - - -(* serialization *) - -(** header **) - -fun fst_int_ord ((i1, _), (i2, _)) = int_ord (i1, i2) - -structure Logics = Generic_Data -( - type T = (int * (term list -> string option)) list - val empty = [] - val extend = I - fun merge data = Ord_List.merge fst_int_ord data -) - -fun add_logic pf = Logics.map (Ord_List.insert fst_int_ord pf) - -fun choose_logic ctxt ts = - let - fun choose [] = "AUFLIA" - | choose ((_, f) :: fs) = (case f ts of SOME s => s | NONE => choose fs) - in [":logic " ^ choose (Logics.get (Context.Proof ctxt))] end - - -(** serialization **) - -val add = Buffer.add -fun sep f = add " " #> f -fun enclose l r f = sep (add l #> f #> add r) -val par = enclose "(" ")" -fun app n f = (fn [] => sep (add n) | xs => par (add n #> fold f xs)) -fun line f = f #> add "\n" - -fun var i = add "?v" #> add (string_of_int i) - -fun sterm l (Old_SMT_Translate.SVar i) = sep (var (l - i - 1)) - | sterm l (Old_SMT_Translate.SApp (n, ts)) = app n (sterm l) ts - | sterm _ (Old_SMT_Translate.SLet _) = - raise Fail "SMT-LIB: unsupported let expression" - | sterm l (Old_SMT_Translate.SQua (q, ss, ps, w, t)) = - let - fun quant Old_SMT_Translate.SForall = add "forall" - | quant Old_SMT_Translate.SExists = add "exists" - val vs = map_index (apfst (Integer.add l)) ss - fun var_decl (i, s) = par (var i #> sep (add s)) - val sub = sterm (l + length ss) - fun pat kind ts = sep (add kind #> enclose "{" " }" (fold sub ts)) - fun pats (Old_SMT_Translate.SPat ts) = pat ":pat" ts - | pats (Old_SMT_Translate.SNoPat ts) = pat ":nopat" ts - fun weight NONE = I - | weight (SOME i) = - sep (add ":weight { " #> add (string_of_int i) #> add " }") - in - par (quant q #> fold var_decl vs #> sub t #> fold pats ps #> weight w) - end - -fun ssort sorts = sort fast_string_ord sorts -fun fsort funcs = sort (prod_ord fast_string_ord (K EQUAL)) funcs - -fun sdatatypes decls = - let - fun con (n, []) = sep (add n) - | con (n, sels) = par (add n #> - fold (fn (n, s) => par (add n #> sep (add s))) sels) - fun dtyp (n, decl) = add n #> fold con decl - in line (add ":datatypes " #> par (fold (par o dtyp) decls)) end - -fun serialize comments {header, sorts, dtyps, funcs} ts = - Buffer.empty - |> line (add "(benchmark Isabelle") - |> line (add ":status unknown") - |> fold (line o add) header - |> length sorts > 0 ? - line (add ":extrasorts" #> par (fold (sep o add) (ssort sorts))) - |> fold sdatatypes dtyps - |> length funcs > 0 ? ( - line (add ":extrafuns" #> add " (") #> - fold (fn (f, (ss, s)) => - line (sep (app f (sep o add) (ss @ [s])))) (fsort funcs) #> - line (add ")")) - |> fold (fn t => line (add ":assumption" #> sterm 0 t)) ts - |> line (add ":formula true)") - |> fold (fn str => line (add "; " #> add str)) comments - |> Buffer.content - - -(* interface *) - -fun translate_config ctxt = { - prefixes = { - sort_prefix = "S", - func_prefix = "f"}, - header = choose_logic ctxt, - is_fol = true, - has_datatypes = false, - serialize = serialize} - -val setup = Context.theory_map ( - setup_builtins #> - Old_SMT_Translate.add_config (smtlibC, translate_config)) - -end diff --git a/src/test-gen/Old_SMT/old_z3_interface.ML b/src/test-gen/Old_SMT/old_z3_interface.ML deleted file mode 100644 index ec9f3d6..0000000 --- a/src/test-gen/Old_SMT/old_z3_interface.ML +++ /dev/null @@ -1,239 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_z3_interface.ML - Author: Sascha Boehme, TU Muenchen - -Interface to Z3 based on a relaxed version of SMT-LIB. -*) - -signature OLD_Z3_INTERFACE = -sig - val smtlib_z3C: Old_SMT_Utils.class - val setup: theory -> theory - - datatype sym = Sym of string * sym list - type mk_builtins = { - mk_builtin_typ: sym -> typ option, - mk_builtin_num: theory -> int -> typ -> cterm option, - mk_builtin_fun: theory -> sym -> cterm list -> cterm option } - val add_mk_builtins: mk_builtins -> Context.generic -> Context.generic - val mk_builtin_typ: Proof.context -> sym -> typ option - val mk_builtin_num: Proof.context -> int -> typ -> cterm option - val mk_builtin_fun: Proof.context -> sym -> cterm list -> cterm option - - val is_builtin_theory_term: Proof.context -> term -> bool -end - -structure Old_Z3_Interface: OLD_Z3_INTERFACE = -struct - -val smtlib_z3C = Old_SMTLIB_Interface.smtlibC @ ["z3"] - - - -(* interface *) - -local - fun translate_config ctxt = - let - val {prefixes, header, is_fol, serialize, ...} = - Old_SMTLIB_Interface.translate_config ctxt - in - {prefixes=prefixes, header=header, is_fol=is_fol, serialize=serialize, - has_datatypes=true} - end - - fun is_div_mod @{const divide (int)} = true - | is_div_mod @{const modulo (int)} = true - | is_div_mod _ = false - - val div_by_z3div = @{lemma - "ALL k l. k div l = ( - if k = 0 | l = 0 then 0 - else if (0 < k & 0 < l) | (k < 0 & 0 < l) then z3div k l - else z3div (-k) (-l))" - by (simp add: z3div_def)} - - val mod_by_z3mod = @{lemma - "ALL k l. k mod l = ( - if l = 0 then k - else if k = 0 then 0 - else if (0 < k & 0 < l) | (k < 0 & 0 < l) then z3mod k l - else - z3mod (-k) (-l))" - by (simp add: z3mod_def)} - - val have_int_div_mod = - exists (Term.exists_subterm is_div_mod o Thm.prop_of) - - fun add_div_mod _ (thms, extra_thms) = - if have_int_div_mod thms orelse have_int_div_mod extra_thms then - (thms, div_by_z3div :: mod_by_z3mod :: extra_thms) - else (thms, extra_thms) - - val setup_builtins = - Old_SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const times (int)}, "*") #> - Old_SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const z3div}, "div") #> - Old_SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const z3mod}, "mod") -in - -val setup = Context.theory_map ( - setup_builtins #> - Old_SMT_Normalize.add_extra_norm (smtlib_z3C, add_div_mod) #> - Old_SMT_Translate.add_config (smtlib_z3C, translate_config)) - -end - - - -(* constructors *) - -datatype sym = Sym of string * sym list - - -(** additional constructors **) - -type mk_builtins = { - mk_builtin_typ: sym -> typ option, - mk_builtin_num: theory -> int -> typ -> cterm option, - mk_builtin_fun: theory -> sym -> cterm list -> cterm option } - -fun chained _ [] = NONE - | chained f (b :: bs) = (case f b of SOME y => SOME y | NONE => chained f bs) - -fun chained_mk_builtin_typ bs sym = - chained (fn {mk_builtin_typ=mk, ...} : mk_builtins => mk sym) bs - -fun chained_mk_builtin_num ctxt bs i T = - let val thy = Proof_Context.theory_of ctxt - in chained (fn {mk_builtin_num=mk, ...} : mk_builtins => mk thy i T) bs end - -fun chained_mk_builtin_fun ctxt bs s cts = - let val thy = Proof_Context.theory_of ctxt - in chained (fn {mk_builtin_fun=mk, ...} : mk_builtins => mk thy s cts) bs end - -fun fst_int_ord ((i1, _), (i2, _)) = int_ord (i1, i2) - -structure Mk_Builtins = Generic_Data -( - type T = (int * mk_builtins) list - val empty = [] - val extend = I - fun merge data = Ord_List.merge fst_int_ord data -) - -fun add_mk_builtins mk = - Mk_Builtins.map (Ord_List.insert fst_int_ord (serial (), mk)) - -fun get_mk_builtins ctxt = map snd (Mk_Builtins.get (Context.Proof ctxt)) - - -(** basic and additional constructors **) - -fun mk_builtin_typ _ (Sym ("Bool", _)) = SOME @{typ bool} - | mk_builtin_typ _ (Sym ("Int", _)) = SOME @{typ int} - | mk_builtin_typ _ (Sym ("bool", _)) = SOME @{typ bool} (*FIXME: legacy*) - | mk_builtin_typ _ (Sym ("int", _)) = SOME @{typ int} (*FIXME: legacy*) - | mk_builtin_typ ctxt sym = chained_mk_builtin_typ (get_mk_builtins ctxt) sym - -fun mk_builtin_num _ i @{typ int} = SOME (Numeral.mk_cnumber @{ctyp int} i) - | mk_builtin_num ctxt i T = - chained_mk_builtin_num ctxt (get_mk_builtins ctxt) i T - -val mk_true = Thm.cterm_of @{context} (@{const Not} $ @{const False}) -val mk_false = Thm.cterm_of @{context} @{const False} -val mk_not = Thm.apply (Thm.cterm_of @{context} @{const Not}) -val mk_implies = Thm.mk_binop (Thm.cterm_of @{context} @{const HOL.implies}) -val mk_iff = Thm.mk_binop (Thm.cterm_of @{context} @{const HOL.eq (bool)}) -val conj = Thm.cterm_of @{context} @{const HOL.conj} -val disj = Thm.cterm_of @{context} @{const HOL.disj} - -fun mk_nary _ cu [] = cu - | mk_nary ct _ cts = uncurry (fold_rev (Thm.mk_binop ct)) (split_last cts) - -val eq = Old_SMT_Utils.mk_const_pat @{theory} @{const_name HOL.eq} Old_SMT_Utils.destT1 -fun mk_eq ct cu = Thm.mk_binop (Old_SMT_Utils.instT' ct eq) ct cu - -val if_term = - Old_SMT_Utils.mk_const_pat @{theory} @{const_name If} - (Old_SMT_Utils.destT1 o Old_SMT_Utils.destT2) -fun mk_if cc ct cu = - Thm.mk_binop (Thm.apply (Old_SMT_Utils.instT' ct if_term) cc) ct cu - -val nil_term = - Old_SMT_Utils.mk_const_pat @{theory} @{const_name Nil} Old_SMT_Utils.destT1 -val cons_term = - Old_SMT_Utils.mk_const_pat @{theory} @{const_name Cons} Old_SMT_Utils.destT1 -fun mk_list cT cts = - fold_rev (Thm.mk_binop (Old_SMT_Utils.instT cT cons_term)) cts - (Old_SMT_Utils.instT cT nil_term) - -val distinct = Old_SMT_Utils.mk_const_pat @{theory} @{const_name distinct} - (Old_SMT_Utils.destT1 o Old_SMT_Utils.destT1) -fun mk_distinct [] = mk_true - | mk_distinct (cts as (ct :: _)) = - Thm.apply (Old_SMT_Utils.instT' ct distinct) - (mk_list (Thm.ctyp_of_cterm ct) cts) - -val access = - Old_SMT_Utils.mk_const_pat @{theory} @{const_name fun_app} Old_SMT_Utils.destT1 -fun mk_access array = Thm.apply (Old_SMT_Utils.instT' array access) array - -val update = Old_SMT_Utils.mk_const_pat @{theory} @{const_name fun_upd} - (Thm.dest_ctyp o Old_SMT_Utils.destT1) -fun mk_update array index value = - let val cTs = Thm.dest_ctyp (Thm.ctyp_of_cterm array) - in - Thm.apply (Thm.mk_binop (Old_SMT_Utils.instTs cTs update) array index) value - end - -val mk_uminus = Thm.apply (Thm.cterm_of @{context} @{const uminus (int)}) -val add = Thm.cterm_of @{context} @{const plus (int)} -val int0 = Numeral.mk_cnumber @{ctyp int} 0 -val mk_sub = Thm.mk_binop (Thm.cterm_of @{context} @{const minus (int)}) -val mk_mul = Thm.mk_binop (Thm.cterm_of @{context} @{const times (int)}) -val mk_div = Thm.mk_binop (Thm.cterm_of @{context} @{const z3div}) -val mk_mod = Thm.mk_binop (Thm.cterm_of @{context} @{const z3mod}) -val mk_lt = Thm.mk_binop (Thm.cterm_of @{context} @{const less (int)}) -val mk_le = Thm.mk_binop (Thm.cterm_of @{context} @{const less_eq (int)}) - -fun mk_builtin_fun ctxt sym cts = - (case (sym, cts) of - (Sym ("true", _), []) => SOME mk_true - | (Sym ("false", _), []) => SOME mk_false - | (Sym ("not", _), [ct]) => SOME (mk_not ct) - | (Sym ("and", _), _) => SOME (mk_nary conj mk_true cts) - | (Sym ("or", _), _) => SOME (mk_nary disj mk_false cts) - | (Sym ("implies", _), [ct, cu]) => SOME (mk_implies ct cu) - | (Sym ("iff", _), [ct, cu]) => SOME (mk_iff ct cu) - | (Sym ("~", _), [ct, cu]) => SOME (mk_iff ct cu) - | (Sym ("xor", _), [ct, cu]) => SOME (mk_not (mk_iff ct cu)) - | (Sym ("if", _), [ct1, ct2, ct3]) => SOME (mk_if ct1 ct2 ct3) - | (Sym ("ite", _), [ct1, ct2, ct3]) => SOME (mk_if ct1 ct2 ct3) (* FIXME: remove *) - | (Sym ("=", _), [ct, cu]) => SOME (mk_eq ct cu) - | (Sym ("distinct", _), _) => SOME (mk_distinct cts) - | (Sym ("select", _), [ca, ck]) => SOME (Thm.apply (mk_access ca) ck) - | (Sym ("store", _), [ca, ck, cv]) => SOME (mk_update ca ck cv) - | _ => - (case (sym, try (Thm.typ_of_cterm o hd) cts, cts) of - (Sym ("+", _), SOME @{typ int}, _) => SOME (mk_nary add int0 cts) - | (Sym ("-", _), SOME @{typ int}, [ct]) => SOME (mk_uminus ct) - | (Sym ("-", _), SOME @{typ int}, [ct, cu]) => SOME (mk_sub ct cu) - | (Sym ("*", _), SOME @{typ int}, [ct, cu]) => SOME (mk_mul ct cu) - | (Sym ("div", _), SOME @{typ int}, [ct, cu]) => SOME (mk_div ct cu) - | (Sym ("mod", _), SOME @{typ int}, [ct, cu]) => SOME (mk_mod ct cu) - | (Sym ("<", _), SOME @{typ int}, [ct, cu]) => SOME (mk_lt ct cu) - | (Sym ("<=", _), SOME @{typ int}, [ct, cu]) => SOME (mk_le ct cu) - | (Sym (">", _), SOME @{typ int}, [ct, cu]) => SOME (mk_lt cu ct) - | (Sym (">=", _), SOME @{typ int}, [ct, cu]) => SOME (mk_le cu ct) - | _ => chained_mk_builtin_fun ctxt (get_mk_builtins ctxt) sym cts)) - - - -(* abstraction *) - -fun is_builtin_theory_term ctxt t = - if Old_SMT_Builtin.is_builtin_num ctxt t then true - else - (case Term.strip_comb t of - (Const c, ts) => Old_SMT_Builtin.is_builtin_fun ctxt c ts - | _ => false) - -end diff --git a/src/test-gen/Old_SMT/old_z3_model.ML b/src/test-gen/Old_SMT/old_z3_model.ML deleted file mode 100644 index b61f104..0000000 --- a/src/test-gen/Old_SMT/old_z3_model.ML +++ /dev/null @@ -1,337 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_z3_model.ML - Author: Sascha Boehme and Philipp Meyer, TU Muenchen - -Parser for counterexamples generated by Z3. -*) - -signature OLD_Z3_MODEL = -sig - val parse_counterex: Proof.context -> Old_SMT_Translate.recon -> string list -> - term list * term list -end - -structure Old_Z3_Model: OLD_Z3_MODEL = -struct - - -(* counterexample expressions *) - -datatype expr = True | False | Number of int * int option | Value of int | - Array of array | App of string * expr list -and array = Fresh of expr | Store of (array * expr) * expr - - -(* parsing *) - -val space = Scan.many Symbol.is_ascii_blank -fun spaced p = p --| space -fun in_parens p = spaced (Scan.$$ "(") |-- p --| spaced (Scan.$$ ")") -fun in_braces p = spaced (Scan.$$ "{") |-- p --| spaced (Scan.$$ "}") - -val digit = (fn - "0" => SOME 0 | "1" => SOME 1 | "2" => SOME 2 | "3" => SOME 3 | - "4" => SOME 4 | "5" => SOME 5 | "6" => SOME 6 | "7" => SOME 7 | - "8" => SOME 8 | "9" => SOME 9 | _ => NONE) - -val nat_num = spaced (Scan.repeat1 (Scan.some digit) >> - (fn ds => fold (fn d => fn i => i * 10 + d) ds 0)) -val int_num = spaced (Scan.optional ($$ "-" >> K (fn i => ~i)) I :|-- - (fn sign => nat_num >> sign)) - -val is_char = Symbol.is_ascii_letter orf Symbol.is_ascii_digit orf - member (op =) (raw_explode "_+*-/%~=<>$&|?!.@^#") -val name = spaced (Scan.many1 is_char >> implode) - -fun $$$ s = spaced (Scan.this_string s) - -fun array_expr st = st |> in_parens ( - $$$ "const" |-- expr >> Fresh || - $$$ "store" |-- array_expr -- expr -- expr >> Store) - -and expr st = st |> ( - $$$ "true" >> K True || - $$$ "false" >> K False || - int_num -- Scan.option ($$$ "/" |-- int_num) >> Number || - $$$ "val!" |-- nat_num >> Value || - name >> (App o rpair []) || - array_expr >> Array || - in_parens (name -- Scan.repeat1 expr) >> App) - -fun args st = ($$$ "->" >> K [] || expr ::: args) st -val args_case = args -- expr -val else_case = $$$ "else" -- $$$ "->" |-- expr >> pair ([] : expr list) - -val func = - let fun cases st = (else_case >> single || args_case ::: cases) st - in in_braces cases end - -val cex = space |-- - Scan.repeat (name --| $$$ "->" -- (func || expr >> (single o pair []))) - -fun resolve terms ((n, k), cases) = - (case Symtab.lookup terms n of - NONE => NONE - | SOME t => SOME ((t, k), cases)) - -fun annotate _ (_, []) = NONE - | annotate terms (n, [([], c)]) = resolve terms ((n, 0), (c, [])) - | annotate _ (_, [_]) = NONE - | annotate terms (n, cases as (args, _) :: _) = - let val (cases', (_, else_case)) = split_last cases - in resolve terms ((n, length args), (else_case, cases')) end - -fun read_cex terms ls = - maps (cons "\n" o raw_explode) ls - |> try (fst o Scan.finite Symbol.stopper cex) - |> the_default [] - |> map_filter (annotate terms) - - -(* translation into terms *) - -fun max_value vs = - let - fun max_val_expr (Value i) = Integer.max i - | max_val_expr (App (_, es)) = fold max_val_expr es - | max_val_expr (Array a) = max_val_array a - | max_val_expr _ = I - - and max_val_array (Fresh e) = max_val_expr e - | max_val_array (Store ((a, e1), e2)) = - max_val_array a #> max_val_expr e1 #> max_val_expr e2 - - fun max_val (_, (ec, cs)) = - max_val_expr ec #> fold (fn (es, e) => fold max_val_expr (e :: es)) cs - - in fold max_val vs ~1 end - -fun with_context terms f vs = fst (fold_map f vs (terms, max_value vs + 1)) - -fun get_term n T es (cx as (terms, next_val)) = - (case Symtab.lookup terms n of - SOME t => ((t, es), cx) - | NONE => - let val t = Var (("skolem", next_val), T) - in ((t, []), (Symtab.update (n, t) terms, next_val + 1)) end) - -fun trans_expr _ True = pair @{const True} - | trans_expr _ False = pair @{const False} - | trans_expr T (Number (i, NONE)) = pair (HOLogic.mk_number T i) - | trans_expr T (Number (i, SOME j)) = - pair (Const (@{const_name divide}, [T, T] ---> T) $ - HOLogic.mk_number T i $ HOLogic.mk_number T j) - | trans_expr T (Value i) = pair (Var (("value", i), T)) - | trans_expr T (Array a) = trans_array T a - | trans_expr T (App (n, es)) = get_term n T es #-> (fn (t, es') => - let val Ts = fst (Old_SMT_Utils.dest_funT (length es') (Term.fastype_of t)) - in - fold_map (uncurry trans_expr) (Ts ~~ es') #>> Term.list_comb o pair t - end) - -and trans_array T a = - let val (dT, rT) = Term.dest_funT T - in - (case a of - Fresh e => trans_expr rT e #>> (fn t => Abs ("x", dT, t)) - | Store ((a', e1), e2) => - trans_array T a' ##>> trans_expr dT e1 ##>> trans_expr rT e2 #>> - (fn ((m, k), v) => - Const (@{const_name fun_upd}, [T, dT, rT] ---> T) $ m $ k $ v)) - end - -fun trans_pattern T ([], e) = trans_expr T e #>> pair [] - | trans_pattern T (arg :: args, e) = - trans_expr (Term.domain_type T) arg ##>> - trans_pattern (Term.range_type T) (args, e) #>> - (fn (arg', (args', e')) => (arg' :: args', e')) - -fun mk_fun_upd T U = Const (@{const_name fun_upd}, [T --> U, T, U, T] ---> U) - -fun mk_update ([], u) _ = u - | mk_update ([t], u) f = - uncurry mk_fun_upd (Term.dest_funT (Term.fastype_of f)) $ f $ t $ u - | mk_update (t :: ts, u) f = - let - val (dT, rT) = Term.dest_funT (Term.fastype_of f) - val (dT', rT') = Term.dest_funT rT - in - mk_fun_upd dT rT $ f $ t $ - mk_update (ts, u) (absdummy dT' (Const ("_", rT'))) - end - -fun mk_lambda Ts (t, pats) = - fold_rev absdummy Ts t |> fold mk_update pats - -fun translate ((t, k), (e, cs)) = - let - val T = Term.fastype_of t - val (Us, U) = Old_SMT_Utils.dest_funT k (Term.fastype_of t) - - fun mk_full_def u' pats = - pats - |> filter_out (fn (_, u) => u aconv u') - |> HOLogic.mk_eq o pair t o mk_lambda Us o pair u' - - fun mk_eq (us, u) = HOLogic.mk_eq (Term.list_comb (t, us), u) - fun mk_eqs u' [] = [HOLogic.mk_eq (t, u')] - | mk_eqs _ pats = map mk_eq pats - in - trans_expr U e ##>> - (if k = 0 then pair [] else fold_map (trans_pattern T) cs) #>> - (fn (u', pats) => (mk_eqs u' pats, mk_full_def u' pats)) - end - - -(* normalization *) - -fun partition_eqs f = - let - fun part t (xs, ts) = - (case try HOLogic.dest_eq t of - SOME (l, r) => (case f l r of SOME x => (x::xs, ts) | _ => (xs, t::ts)) - | NONE => (xs, t :: ts)) - in (fn ts => fold part ts ([], [])) end - -fun first_eq pred = - let - fun part _ [] = NONE - | part us (t :: ts) = - (case try (pred o HOLogic.dest_eq) t of - SOME (SOME lr) => SOME (lr, fold cons us ts) - | _ => part (t :: us) ts) - in (fn ts => part [] ts) end - -fun replace_vars tab = - let - fun repl v = the_default v (AList.lookup (op aconv) tab v) - fun replace (v as Var _) = repl v - | replace (v as Free _) = repl v - | replace t = t - in map (Term.map_aterms replace) end - -fun remove_int_nat_coercions (eqs, defs) = - let - fun mk_nat_num t i = - (case try HOLogic.dest_number i of - SOME (_, n) => SOME (t, HOLogic.mk_number @{typ nat} n) - | NONE => NONE) - fun nat_of (@{const of_nat (int)} $ (t as Var _)) i = mk_nat_num t i - | nat_of (@{const nat} $ i) (t as Var _) = mk_nat_num t i - | nat_of _ _ = NONE - val (nats, eqs') = partition_eqs nat_of eqs - - fun is_coercion t = - (case try HOLogic.dest_eq t of - SOME (@{const of_nat (int)}, _) => true - | SOME (@{const nat}, _) => true - | _ => false) - in apply2 (replace_vars nats) (eqs', filter_out is_coercion defs) end - -fun unfold_funapp (eqs, defs) = - let - fun unfold_app (Const (@{const_name fun_app}, _) $ f $ t) = f $ t - | unfold_app t = t - fun unfold_eq ((eq as Const (@{const_name HOL.eq}, _)) $ t $ u) = - eq $ unfold_app t $ u - | unfold_eq t = t - - fun is_fun_app t = - (case try HOLogic.dest_eq t of - SOME (Const (@{const_name fun_app}, _), _) => true - | _ => false) - - in (map unfold_eq eqs, filter_out is_fun_app defs) end - -val unfold_eqs = - let - val is_ground = not o Term.exists_subterm Term.is_Var - fun is_non_rec (v, t) = not (Term.exists_subterm (equal v) t) - - fun rewr_var (l as Var _, r) = if is_ground r then SOME (l, r) else NONE - | rewr_var (r, l as Var _) = if is_ground r then SOME (l, r) else NONE - | rewr_var _ = NONE - - fun rewr_free' e = if is_non_rec e then SOME e else NONE - fun rewr_free (e as (Free _, _)) = rewr_free' e - | rewr_free (e as (_, Free _)) = rewr_free' (swap e) - | rewr_free _ = NONE - - fun is_trivial (Const (@{const_name HOL.eq}, _) $ t $ u) = t aconv u - | is_trivial _ = false - - fun replace r = replace_vars [r] #> filter_out is_trivial - - fun unfold_vars (es, ds) = - (case first_eq rewr_var es of - SOME (lr, es') => unfold_vars (apply2 (replace lr) (es', ds)) - | NONE => (es, ds)) - - fun unfold_frees ues (es, ds) = - (case first_eq rewr_free es of - SOME (lr, es') => - apply2 (replace lr) (es', ds) - |> unfold_frees (HOLogic.mk_eq lr :: replace lr ues) - | NONE => (ues @ es, ds)) - - in unfold_vars #> unfold_frees [] end - -fun swap_free ((eq as Const (@{const_name HOL.eq}, _)) $ t $ (u as Free _)) = - eq $ u $ t - | swap_free t = t - -fun frees_for_vars ctxt (eqs, defs) = - let - fun fresh_free i T (cx as (frees, ctxt)) = - (case Inttab.lookup frees i of - SOME t => (t, cx) - | NONE => - let - val (n, ctxt') = yield_singleton Variable.variant_fixes "" ctxt - val t = Free (n, T) - in (t, (Inttab.update (i, t) frees, ctxt')) end) - - fun repl_var (Var ((_, i), T)) = fresh_free i T - | repl_var (t $ u) = repl_var t ##>> repl_var u #>> op $ - | repl_var (Abs (n, T, t)) = repl_var t #>> (fn t' => Abs (n, T, t')) - | repl_var t = pair t - in - (Inttab.empty, ctxt) - |> fold_map repl_var eqs - ||>> fold_map repl_var defs - |> fst - end - - -(* overall procedure *) - -val is_free_constraint = Term.exists_subterm (fn Free _ => true | _ => false) - -fun is_free_def (Const (@{const_name HOL.eq}, _) $ Free _ $ _) = true - | is_free_def _ = false - -fun defined tp = - try (apply2 (fst o HOLogic.dest_eq)) tp - |> the_default false o Option.map (op aconv) - -fun add_free_defs free_cs defs = - let val (free_defs, defs') = List.partition is_free_def defs - in (free_cs @ filter_out (member defined free_cs) free_defs, defs') end - -fun is_const_def (Const (@{const_name HOL.eq}, _) $ Const _ $ _) = true - | is_const_def _ = false - -fun parse_counterex ctxt ({terms, ...} : Old_SMT_Translate.recon) ls = - read_cex terms ls - |> with_context terms translate - |> apfst flat o split_list - |> remove_int_nat_coercions - |> unfold_funapp - |> unfold_eqs - |>> map swap_free - |>> filter is_free_constraint - |-> add_free_defs - |> frees_for_vars ctxt - ||> filter is_const_def - -end - diff --git a/src/test-gen/Old_SMT/old_z3_proof_literals.ML b/src/test-gen/Old_SMT/old_z3_proof_literals.ML deleted file mode 100644 index 89ce7d1..0000000 --- a/src/test-gen/Old_SMT/old_z3_proof_literals.ML +++ /dev/null @@ -1,363 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_z3_proof_literals.ML - Author: Sascha Boehme, TU Muenchen - -Proof tools related to conjunctions and disjunctions. -*) - -signature OLD_Z3_PROOF_LITERALS = -sig - (*literal table*) - type littab = thm Termtab.table - val make_littab: thm list -> littab - val insert_lit: thm -> littab -> littab - val delete_lit: thm -> littab -> littab - val lookup_lit: littab -> term -> thm option - val get_first_lit: (term -> bool) -> littab -> thm option - - (*rules*) - val true_thm: thm - val rewrite_true: thm - - (*properties*) - val is_conj: term -> bool - val is_disj: term -> bool - val exists_lit: bool -> (term -> bool) -> term -> bool - val negate: cterm -> cterm - - (*proof tools*) - val explode: bool -> bool -> bool -> term list -> thm -> thm list - val join: bool -> littab -> term -> thm - val prove_conj_disj_eq: cterm -> thm -end - -structure Old_Z3_Proof_Literals: OLD_Z3_PROOF_LITERALS = -struct - - - -(* literal table *) - -type littab = thm Termtab.table - -fun make_littab thms = - fold (Termtab.update o `Old_SMT_Utils.prop_of) thms Termtab.empty - -fun insert_lit thm = Termtab.update (`Old_SMT_Utils.prop_of thm) -fun delete_lit thm = Termtab.delete (Old_SMT_Utils.prop_of thm) -fun lookup_lit lits = Termtab.lookup lits -fun get_first_lit f = - Termtab.get_first (fn (t, thm) => if f t then SOME thm else NONE) - - - -(* rules *) - -val true_thm = @{lemma "~False" by simp} -val rewrite_true = @{lemma "True == ~ False" by simp} - - - -(* properties and term operations *) - -val is_neg = (fn @{const Not} $ _ => true | _ => false) -fun is_neg' f = (fn @{const Not} $ t => f t | _ => false) -val is_dneg = is_neg' is_neg -val is_conj = (fn @{const HOL.conj} $ _ $ _ => true | _ => false) -val is_disj = (fn @{const HOL.disj} $ _ $ _ => true | _ => false) - -fun dest_disj_term' f = (fn - @{const Not} $ (@{const HOL.disj} $ t $ u) => SOME (f t, f u) - | _ => NONE) - -val dest_conj_term = (fn @{const HOL.conj} $ t $ u => SOME (t, u) | _ => NONE) -val dest_disj_term = - dest_disj_term' (fn @{const Not} $ t => t | t => @{const Not} $ t) - -fun exists_lit is_conj P = - let - val dest = if is_conj then dest_conj_term else dest_disj_term - fun exists t = P t orelse - (case dest t of - SOME (t1, t2) => exists t1 orelse exists t2 - | NONE => false) - in exists end - -val negate = Thm.apply (Thm.cterm_of @{context} @{const Not}) - - - -(* proof tools *) - -(** explosion of conjunctions and disjunctions **) - -local - val precomp = Old_Z3_Proof_Tools.precompose2 - - fun destc ct = Thm.dest_binop (Thm.dest_arg ct) - val dest_conj1 = precomp destc @{thm conjunct1} - val dest_conj2 = precomp destc @{thm conjunct2} - fun dest_conj_rules t = - dest_conj_term t |> Option.map (K (dest_conj1, dest_conj2)) - - fun destd f ct = f (Thm.dest_binop (Thm.dest_arg (Thm.dest_arg ct))) - val dn1 = apfst Thm.dest_arg and dn2 = apsnd Thm.dest_arg - val dest_disj1 = precomp (destd I) @{lemma "~(P | Q) ==> ~P" by fast} - val dest_disj2 = precomp (destd dn1) @{lemma "~(~P | Q) ==> P" by fast} - val dest_disj3 = precomp (destd I) @{lemma "~(P | Q) ==> ~Q" by fast} - val dest_disj4 = precomp (destd dn2) @{lemma "~(P | ~Q) ==> Q" by fast} - - fun dest_disj_rules t = - (case dest_disj_term' is_neg t of - SOME (true, true) => SOME (dest_disj2, dest_disj4) - | SOME (true, false) => SOME (dest_disj2, dest_disj3) - | SOME (false, true) => SOME (dest_disj1, dest_disj4) - | SOME (false, false) => SOME (dest_disj1, dest_disj3) - | NONE => NONE) - - fun destn ct = [Thm.dest_arg (Thm.dest_arg (Thm.dest_arg ct))] - val dneg_rule = Old_Z3_Proof_Tools.precompose destn @{thm notnotD} -in - -(* - explode a term into literals and collect all rules to be able to deduce - particular literals afterwards -*) -fun explode_term is_conj = - let - val dest = if is_conj then dest_conj_term else dest_disj_term - val dest_rules = if is_conj then dest_conj_rules else dest_disj_rules - - fun add (t, rs) = Termtab.map_default (t, rs) - (fn rs' => if length rs' < length rs then rs' else rs) - - fun explode1 rules t = - (case dest t of - SOME (t1, t2) => - let val (rule1, rule2) = the (dest_rules t) - in - explode1 (rule1 :: rules) t1 #> - explode1 (rule2 :: rules) t2 #> - add (t, rev rules) - end - | NONE => add (t, rev rules)) - - fun explode0 (@{const Not} $ (@{const Not} $ t)) = - Termtab.make [(t, [dneg_rule])] - | explode0 t = explode1 [] t Termtab.empty - - in explode0 end - -(* - extract a literal by applying previously collected rules -*) -fun extract_lit thm rules = fold Old_Z3_Proof_Tools.compose rules thm - - -(* - explode a theorem into its literals -*) -fun explode is_conj full keep_intermediate stop_lits = - let - val dest_rules = if is_conj then dest_conj_rules else dest_disj_rules - val tab = fold (Termtab.update o rpair ()) stop_lits Termtab.empty - - fun explode1 thm = - if Termtab.defined tab (Old_SMT_Utils.prop_of thm) then cons thm - else - (case dest_rules (Old_SMT_Utils.prop_of thm) of - SOME (rule1, rule2) => - explode2 rule1 thm #> - explode2 rule2 thm #> - keep_intermediate ? cons thm - | NONE => cons thm) - - and explode2 dest_rule thm = - if full orelse - exists_lit is_conj (Termtab.defined tab) (Old_SMT_Utils.prop_of thm) - then explode1 (Old_Z3_Proof_Tools.compose dest_rule thm) - else cons (Old_Z3_Proof_Tools.compose dest_rule thm) - - fun explode0 thm = - if not is_conj andalso is_dneg (Old_SMT_Utils.prop_of thm) - then [Old_Z3_Proof_Tools.compose dneg_rule thm] - else explode1 thm [] - - in explode0 end - -end - - - -(** joining of literals to conjunctions or disjunctions **) - -local - fun on_cprem i f thm = f (Thm.cprem_of thm i) - fun on_cprop f thm = f (Thm.cprop_of thm) - fun precomp2 f g thm = (on_cprem 1 f thm, on_cprem 2 g thm, f, g, thm) - fun comp2 (cv1, cv2, f, g, rule) thm1 thm2 = - Thm.instantiate ([], - [(dest_Var (Thm.term_of cv1), on_cprop f thm1), - (dest_Var (Thm.term_of cv2), on_cprop g thm2)]) rule - |> Old_Z3_Proof_Tools.discharge thm1 |> Old_Z3_Proof_Tools.discharge thm2 - - fun d1 ct = Thm.dest_arg ct and d2 ct = Thm.dest_arg (Thm.dest_arg ct) - - val conj_rule = precomp2 d1 d1 @{thm conjI} - fun comp_conj ((_, thm1), (_, thm2)) = comp2 conj_rule thm1 thm2 - - val disj1 = precomp2 d2 d2 @{lemma "~P ==> ~Q ==> ~(P | Q)" by fast} - val disj2 = precomp2 d2 d1 @{lemma "~P ==> Q ==> ~(P | ~Q)" by fast} - val disj3 = precomp2 d1 d2 @{lemma "P ==> ~Q ==> ~(~P | Q)" by fast} - val disj4 = precomp2 d1 d1 @{lemma "P ==> Q ==> ~(~P | ~Q)" by fast} - - fun comp_disj ((false, thm1), (false, thm2)) = comp2 disj1 thm1 thm2 - | comp_disj ((false, thm1), (true, thm2)) = comp2 disj2 thm1 thm2 - | comp_disj ((true, thm1), (false, thm2)) = comp2 disj3 thm1 thm2 - | comp_disj ((true, thm1), (true, thm2)) = comp2 disj4 thm1 thm2 - - fun dest_conj (@{const HOL.conj} $ t $ u) = ((false, t), (false, u)) - | dest_conj t = raise TERM ("dest_conj", [t]) - - val neg = (fn @{const Not} $ t => (true, t) | t => (false, @{const Not} $ t)) - fun dest_disj (@{const Not} $ (@{const HOL.disj} $ t $ u)) = (neg t, neg u) - | dest_disj t = raise TERM ("dest_disj", [t]) - - val precomp = Old_Z3_Proof_Tools.precompose - val dnegE = precomp (single o d2 o d1) @{thm notnotD} - val dnegI = precomp (single o d1) @{lemma "P ==> ~~P" by fast} - fun as_dneg f t = f (@{const Not} $ (@{const Not} $ t)) - - val precomp2 = Old_Z3_Proof_Tools.precompose2 - fun dni f = apsnd f o Thm.dest_binop o f o d1 - val negIffE = precomp2 (dni d1) @{lemma "~(P = (~Q)) ==> Q = P" by fast} - val negIffI = precomp2 (dni I) @{lemma "P = Q ==> ~(Q = (~P))" by fast} - val iff_const = @{const HOL.eq (bool)} - fun as_negIff f (@{const HOL.eq (bool)} $ t $ u) = - f (@{const Not} $ (iff_const $ u $ (@{const Not} $ t))) - | as_negIff _ _ = NONE -in - -fun join is_conj littab t = - let - val comp = if is_conj then comp_conj else comp_disj - val dest = if is_conj then dest_conj else dest_disj - - val lookup = lookup_lit littab - - fun lookup_rule t = - (case t of - @{const Not} $ (@{const Not} $ t) => - (Old_Z3_Proof_Tools.compose dnegI, lookup t) - | @{const Not} $ (@{const HOL.eq (bool)} $ t $ (@{const Not} $ u)) => - (Old_Z3_Proof_Tools.compose negIffI, lookup (iff_const $ u $ t)) - | @{const Not} $ ((eq as Const (@{const_name HOL.eq}, _)) $ t $ u) => - let fun rewr lit = lit COMP @{thm not_sym} - in (rewr, lookup (@{const Not} $ (eq $ u $ t))) end - | _ => - (case as_dneg lookup t of - NONE => (Old_Z3_Proof_Tools.compose negIffE, as_negIff lookup t) - | x => (Old_Z3_Proof_Tools.compose dnegE, x))) - - fun join1 (s, t) = - (case lookup t of - SOME lit => (s, lit) - | NONE => - (case lookup_rule t of - (rewrite, SOME lit) => (s, rewrite lit) - | (_, NONE) => (s, comp (apply2 join1 (dest t))))) - - in snd (join1 (if is_conj then (false, t) else (true, t))) end - -end - - - -(** proving equality of conjunctions or disjunctions **) - -fun iff_intro thm1 thm2 = thm2 COMP (thm1 COMP @{thm iffI}) - -local - val cp1 = @{lemma "(~P) = (~Q) ==> P = Q" by simp} - val cp2 = @{lemma "(~P) = Q ==> P = (~Q)" by fastforce} - val cp3 = @{lemma "P = (~Q) ==> (~P) = Q" by simp} -in -fun contrapos1 prove (ct, cu) = prove (negate ct, negate cu) COMP cp1 -fun contrapos2 prove (ct, cu) = prove (negate ct, Thm.dest_arg cu) COMP cp2 -fun contrapos3 prove (ct, cu) = prove (Thm.dest_arg ct, negate cu) COMP cp3 -end - - -local - val contra_rule = @{lemma "P ==> ~P ==> False" by (rule notE)} - fun contra_left conj thm = - let - val rules = explode_term conj (Old_SMT_Utils.prop_of thm) - fun contra_lits (t, rs) = - (case t of - @{const Not} $ u => Termtab.lookup rules u |> Option.map (pair rs) - | _ => NONE) - in - (case Termtab.lookup rules @{const False} of - SOME rs => extract_lit thm rs - | NONE => - the (Termtab.get_first contra_lits rules) - |> apply2 (extract_lit thm) - |> (fn (nlit, plit) => nlit COMP (plit COMP contra_rule))) - end - - val falseE_v = dest_Var (Thm.term_of (Thm.dest_arg (Thm.dest_arg (Thm.cprop_of @{thm FalseE})))) - fun contra_right ct = Thm.instantiate ([], [(falseE_v, ct)]) @{thm FalseE} -in -fun contradict conj ct = - iff_intro (Old_Z3_Proof_Tools.under_assumption (contra_left conj) ct) - (contra_right ct) -end - - -local - fun prove_eq l r (cl, cr) = - let - fun explode' is_conj = explode is_conj true (l <> r) [] - fun make_tab is_conj thm = make_littab (true_thm :: explode' is_conj thm) - fun prove is_conj ct tab = join is_conj tab (Thm.term_of ct) - - val thm1 = Old_Z3_Proof_Tools.under_assumption (prove r cr o make_tab l) cl - val thm2 = Old_Z3_Proof_Tools.under_assumption (prove l cl o make_tab r) cr - in iff_intro thm1 thm2 end - - datatype conj_disj = CONJ | DISJ | NCON | NDIS - fun kind_of t = - if is_conj t then SOME CONJ - else if is_disj t then SOME DISJ - else if is_neg' is_conj t then SOME NCON - else if is_neg' is_disj t then SOME NDIS - else NONE -in - -fun prove_conj_disj_eq ct = - let val cp as (cl, cr) = Thm.dest_binop (Thm.dest_arg ct) - in - (case (kind_of (Thm.term_of cl), Thm.term_of cr) of - (SOME CONJ, @{const False}) => contradict true cl - | (SOME DISJ, @{const Not} $ @{const False}) => - contrapos2 (contradict false o fst) cp - | (kl, _) => - (case (kl, kind_of (Thm.term_of cr)) of - (SOME CONJ, SOME CONJ) => prove_eq true true cp - | (SOME CONJ, SOME NDIS) => prove_eq true false cp - | (SOME CONJ, _) => prove_eq true true cp - | (SOME DISJ, SOME DISJ) => contrapos1 (prove_eq false false) cp - | (SOME DISJ, SOME NCON) => contrapos2 (prove_eq false true) cp - | (SOME DISJ, _) => contrapos1 (prove_eq false false) cp - | (SOME NCON, SOME NCON) => contrapos1 (prove_eq true true) cp - | (SOME NCON, SOME DISJ) => contrapos3 (prove_eq true false) cp - | (SOME NCON, NONE) => contrapos3 (prove_eq true false) cp - | (SOME NDIS, SOME NDIS) => prove_eq false false cp - | (SOME NDIS, SOME CONJ) => prove_eq false true cp - | (SOME NDIS, NONE) => prove_eq false true cp - | _ => raise CTERM ("prove_conj_disj_eq", [ct]))) - end - -end - -end diff --git a/src/test-gen/Old_SMT/old_z3_proof_methods.ML b/src/test-gen/Old_SMT/old_z3_proof_methods.ML deleted file mode 100644 index c27174d..0000000 --- a/src/test-gen/Old_SMT/old_z3_proof_methods.ML +++ /dev/null @@ -1,149 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_z3_proof_methods.ML - Author: Sascha Boehme, TU Muenchen - -Proof methods for Z3 proof reconstruction. -*) - -signature OLD_Z3_PROOF_METHODS = -sig - val prove_injectivity: Proof.context -> cterm -> thm - val prove_ite: Proof.context -> cterm -> thm -end - -structure Old_Z3_Proof_Methods: OLD_Z3_PROOF_METHODS = -struct - - -fun apply tac st = - (case Seq.pull (tac 1 st) of - NONE => raise THM ("tactic failed", 1, [st]) - | SOME (st', _) => st') - - - -(* if-then-else *) - -val pull_ite = mk_meta_eq - @{lemma "f (if P then x else y) = (if P then f x else f y)" by simp} - -fun pull_ites_conv ct = - (Conv.rewr_conv pull_ite then_conv - Conv.binop_conv (Conv.try_conv pull_ites_conv)) ct - -fun prove_ite ctxt = - Old_Z3_Proof_Tools.by_tac ctxt ( - CONVERSION (Conv.arg_conv (Conv.arg1_conv pull_ites_conv)) - THEN' resolve_tac ctxt @{thms refl}) - - - -(* injectivity *) - -local - -val B = @{typ bool} -fun mk_univ T = Const (@{const_name top}, HOLogic.mk_setT T) -fun mk_inj_on T U = - Const (@{const_name inj_on}, (T --> U) --> HOLogic.mk_setT T --> B) -fun mk_inv_into T U = - Const (@{const_name inv_into}, [HOLogic.mk_setT T, T --> U, U] ---> T) - -fun mk_inv_of ctxt ct = - let - val (dT, rT) = Term.dest_funT (Thm.typ_of_cterm ct) - val inv = Thm.cterm_of ctxt (mk_inv_into dT rT) - val univ = Thm.cterm_of ctxt (mk_univ dT) - in Thm.mk_binop inv univ ct end - -fun mk_inj_prop ctxt ct = - let - val (dT, rT) = Term.dest_funT (Thm.typ_of_cterm ct) - val inj = Thm.cterm_of ctxt (mk_inj_on dT rT) - val univ = Thm.cterm_of ctxt (mk_univ dT) - in Old_SMT_Utils.mk_cprop (Thm.mk_binop inj ct univ) end - - -val disjE = @{lemma "~P | Q ==> P ==> Q" by fast} - -fun prove_inj_prop ctxt def lhs = - let - val (ct, ctxt') = Old_SMT_Utils.dest_all_cabs (Thm.rhs_of def) ctxt - val rule = disjE OF [Object_Logic.rulify ctxt' (Thm.assume lhs)] - in - Goal.init (mk_inj_prop ctxt' (Thm.dest_arg ct)) - |> apply (resolve_tac ctxt' @{thms injI}) - |> apply (Tactic.solve_tac ctxt' [rule, rule RS @{thm sym}]) - |> Goal.norm_result ctxt' o Goal.finish ctxt' - |> singleton (Variable.export ctxt' ctxt) - end - -fun prove_rhs ctxt def lhs = - Old_Z3_Proof_Tools.by_tac ctxt ( - CONVERSION (Conv.top_sweep_conv (K (Conv.rewr_conv def)) ctxt) - THEN' REPEAT_ALL_NEW (match_tac ctxt @{thms allI}) - THEN' resolve_tac ctxt [@{thm inv_f_f} OF [prove_inj_prop ctxt def lhs]]) - - -fun expand thm ct = - let - val cpat = Thm.dest_arg (Thm.rhs_of thm) - val (cl, cr) = Thm.dest_binop (Thm.dest_arg (Thm.dest_arg1 ct)) - val thm1 = Thm.instantiate (Thm.match (cpat, cl)) thm - val thm2 = Thm.instantiate (Thm.match (cpat, cr)) thm - in Conv.arg_conv (Conv.binop_conv (Conv.rewrs_conv [thm1, thm2])) ct end - -fun prove_lhs ctxt rhs = - let - val eq = Thm.symmetric (mk_meta_eq (Object_Logic.rulify ctxt (Thm.assume rhs))) - val conv = Old_SMT_Utils.binders_conv (K (expand eq)) ctxt - in - Old_Z3_Proof_Tools.by_tac ctxt ( - CONVERSION (Old_SMT_Utils.prop_conv conv) - THEN' Simplifier.simp_tac (put_simpset HOL_ss ctxt)) - end - - -fun mk_inv_def ctxt rhs = - let - val (ct, ctxt') = - Old_SMT_Utils.dest_all_cbinders (Old_SMT_Utils.dest_cprop rhs) ctxt - val (cl, cv) = Thm.dest_binop ct - val (cg, (cargs, cf)) = Drule.strip_comb cl ||> split_last - val cu = fold_rev Thm.lambda cargs (mk_inv_of ctxt' (Thm.lambda cv cf)) - in Thm.assume (Old_SMT_Utils.mk_cequals cg cu) end - -fun prove_inj_eq ctxt ct = - let - val (lhs, rhs) = - apply2 Old_SMT_Utils.mk_cprop (Thm.dest_binop (Old_SMT_Utils.dest_cprop ct)) - val lhs_thm = Thm.implies_intr rhs (prove_lhs ctxt rhs lhs) - val rhs_thm = - Thm.implies_intr lhs (prove_rhs ctxt (mk_inv_def ctxt rhs) lhs rhs) - in lhs_thm COMP (rhs_thm COMP @{thm iffI}) end - - -val swap_eq_thm = mk_meta_eq @{thm eq_commute} -val swap_disj_thm = mk_meta_eq @{thm disj_commute} - -fun swap_conv dest eq = - Old_SMT_Utils.if_true_conv ((op <) o apply2 Term.size_of_term o dest) - (Conv.rewr_conv eq) - -val swap_eq_conv = swap_conv HOLogic.dest_eq swap_eq_thm -val swap_disj_conv = swap_conv Old_SMT_Utils.dest_disj swap_disj_thm - -fun norm_conv ctxt = - swap_eq_conv then_conv - Conv.arg1_conv (Old_SMT_Utils.binders_conv (K swap_disj_conv) ctxt) then_conv - Conv.arg_conv (Old_SMT_Utils.binders_conv (K swap_eq_conv) ctxt) - -in - -fun prove_injectivity ctxt = - Old_Z3_Proof_Tools.by_tac ctxt ( - CONVERSION (Old_SMT_Utils.prop_conv (norm_conv ctxt)) - THEN' CSUBGOAL (uncurry (resolve_tac ctxt o single o prove_inj_eq ctxt))) - -end - -end diff --git a/src/test-gen/Old_SMT/old_z3_proof_parser.ML b/src/test-gen/Old_SMT/old_z3_proof_parser.ML deleted file mode 100644 index aa44b11..0000000 --- a/src/test-gen/Old_SMT/old_z3_proof_parser.ML +++ /dev/null @@ -1,446 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_z3_proof_parser.ML - Author: Sascha Boehme, TU Muenchen - -Parser for Z3 proofs. -*) - -signature OLD_Z3_PROOF_PARSER = -sig - (*proof rules*) - datatype rule = True_Axiom | Asserted | Goal | Modus_Ponens | Reflexivity | - Symmetry | Transitivity | Transitivity_Star | Monotonicity | Quant_Intro | - Distributivity | And_Elim | Not_Or_Elim | Rewrite | Rewrite_Star | - Pull_Quant | Pull_Quant_Star | Push_Quant | Elim_Unused_Vars | - Dest_Eq_Res | Quant_Inst | Hypothesis | Lemma | Unit_Resolution | - Iff_True | Iff_False | Commutativity | Def_Axiom | Intro_Def | Apply_Def | - Iff_Oeq | Nnf_Pos | Nnf_Neg | Nnf_Star | Cnf_Star | Skolemize | - Modus_Ponens_Oeq | Th_Lemma of string list - val string_of_rule: rule -> string - - (*proof parser*) - datatype proof_step = Proof_Step of { - rule: rule, - args: cterm list, - prems: int list, - prop: cterm } - val parse: Proof.context -> typ Symtab.table -> term Symtab.table -> - string list -> - (int * cterm) list * (int * proof_step) list * string list * Proof.context -end - -structure Old_Z3_Proof_Parser: OLD_Z3_PROOF_PARSER = -struct - - -(* proof rules *) - -datatype rule = True_Axiom | Asserted | Goal | Modus_Ponens | Reflexivity | - Symmetry | Transitivity | Transitivity_Star | Monotonicity | Quant_Intro | - Distributivity | And_Elim | Not_Or_Elim | Rewrite | Rewrite_Star | - Pull_Quant | Pull_Quant_Star | Push_Quant | Elim_Unused_Vars | Dest_Eq_Res | - Quant_Inst | Hypothesis | Lemma | Unit_Resolution | Iff_True | Iff_False | - Commutativity | Def_Axiom | Intro_Def | Apply_Def | Iff_Oeq | Nnf_Pos | - Nnf_Neg | Nnf_Star | Cnf_Star | Skolemize | Modus_Ponens_Oeq | - Th_Lemma of string list - -val rule_names = Symtab.make [ - ("true-axiom", True_Axiom), - ("asserted", Asserted), - ("goal", Goal), - ("mp", Modus_Ponens), - ("refl", Reflexivity), - ("symm", Symmetry), - ("trans", Transitivity), - ("trans*", Transitivity_Star), - ("monotonicity", Monotonicity), - ("quant-intro", Quant_Intro), - ("distributivity", Distributivity), - ("and-elim", And_Elim), - ("not-or-elim", Not_Or_Elim), - ("rewrite", Rewrite), - ("rewrite*", Rewrite_Star), - ("pull-quant", Pull_Quant), - ("pull-quant*", Pull_Quant_Star), - ("push-quant", Push_Quant), - ("elim-unused", Elim_Unused_Vars), - ("der", Dest_Eq_Res), - ("quant-inst", Quant_Inst), - ("hypothesis", Hypothesis), - ("lemma", Lemma), - ("unit-resolution", Unit_Resolution), - ("iff-true", Iff_True), - ("iff-false", Iff_False), - ("commutativity", Commutativity), - ("def-axiom", Def_Axiom), - ("intro-def", Intro_Def), - ("apply-def", Apply_Def), - ("iff~", Iff_Oeq), - ("nnf-pos", Nnf_Pos), - ("nnf-neg", Nnf_Neg), - ("nnf*", Nnf_Star), - ("cnf*", Cnf_Star), - ("sk", Skolemize), - ("mp~", Modus_Ponens_Oeq), - ("th-lemma", Th_Lemma [])] - -fun string_of_rule (Th_Lemma args) = space_implode " " ("th-lemma" :: args) - | string_of_rule r = - let fun eq_rule (s, r') = if r = r' then SOME s else NONE - in the (Symtab.get_first eq_rule rule_names) end - - - -(* certified terms and variables *) - -val (var_prefix, decl_prefix) = ("v", "sk") -(* - "decl_prefix" is for skolem constants (represented by free variables), - "var_prefix" is for pseudo-schematic variables (schematic with respect - to the Z3 proof, but represented by free variables). - - Both prefixes must be distinct to avoid name interferences. - More precisely, the naming of pseudo-schematic variables must be - context-independent modulo the current proof context to be able to - use fast inference kernel rules during proof reconstruction. -*) - -fun mk_inst ctxt vars = - let - val max = fold (Integer.max o fst) vars 0 - val ns = fst (Variable.variant_fixes (replicate (max + 1) var_prefix) ctxt) - fun mk (i, v) = - (dest_Var (Thm.term_of v), Thm.cterm_of ctxt (Free (nth ns i, Thm.typ_of_cterm v))) - in map mk vars end - -fun close ctxt (ct, vars) = - let - val inst = mk_inst ctxt vars - val names = fold (Term.add_free_names o Thm.term_of o snd) inst [] - in (Thm.instantiate_cterm ([], inst) ct, names) end - - -fun mk_bound ctxt (i, T) = - let val ct = Thm.cterm_of ctxt (Var ((Name.uu, 0), T)) - in (ct, [(i, ct)]) end - -local - fun mk_quant1 ctxt q T (ct, vars) = - let - val cv = - (case AList.lookup (op =) vars 0 of - SOME cv => cv - | _ => Thm.cterm_of ctxt (Var ((Name.uu, Thm.maxidx_of_cterm ct + 1), T))) - fun dec (i, v) = if i = 0 then NONE else SOME (i-1, v) - val vars' = map_filter dec vars - in (Thm.apply (Old_SMT_Utils.instT' cv q) (Thm.lambda cv ct), vars') end - - fun quant name = - Old_SMT_Utils.mk_const_pat @{theory} name (Old_SMT_Utils.destT1 o Old_SMT_Utils.destT1) - val forall = quant @{const_name All} - val exists = quant @{const_name Ex} -in - -fun mk_quant is_forall ctxt = - fold_rev (mk_quant1 ctxt (if is_forall then forall else exists)) - -end - -local - fun prep (ct, vars) (maxidx, all_vars) = - let - val maxidx' = maxidx + Thm.maxidx_of_cterm ct + 1 - - fun part (i, cv) = - (case AList.lookup (op =) all_vars i of - SOME cu => apfst (if cu aconvc cv then I else cons (cv, cu)) - | NONE => - let val cv' = Thm.incr_indexes_cterm maxidx cv - in apfst (cons (cv, cv')) #> apsnd (cons (i, cv')) end) - - val (inst, vars') = - if null vars then ([], vars) - else fold part vars ([], []) - - in - (Thm.instantiate_cterm ([], map (apfst (dest_Var o Thm.term_of)) inst) ct, - (maxidx', vars' @ all_vars)) - end -in -fun mk_fun f ts = - let val (cts, (_, vars)) = fold_map prep ts (0, []) - in f cts |> Option.map (rpair vars) end -end - - - -(* proof parser *) - -datatype proof_step = Proof_Step of { - rule: rule, - args: cterm list, - prems: int list, - prop: cterm } - - -(** parser context **) - -val not_false = Thm.cterm_of @{context} (@{const Not} $ @{const False}) - -fun make_context ctxt typs terms = - let - val ctxt' = - ctxt - |> Symtab.fold (Variable.declare_typ o snd) typs - |> Symtab.fold (Variable.declare_term o snd) terms - - fun cert @{const True} = not_false - | cert t = Thm.cterm_of ctxt' t - - in (typs, Symtab.map (K cert) terms, Inttab.empty, [], [], ctxt') end - -fun fresh_name n (typs, terms, exprs, steps, vars, ctxt) = - let val (n', ctxt') = yield_singleton Variable.variant_fixes n ctxt - in (n', (typs, terms, exprs, steps, vars, ctxt')) end - -fun context_of (_, _, _, _, _, ctxt) = ctxt - -fun add_decl (n, T) (cx as (_, terms, _, _, _, _)) = - (case Symtab.lookup terms n of - SOME _ => cx - | NONE => cx |> fresh_name (decl_prefix ^ n) - |> (fn (m, (typs, terms, exprs, steps, vars, ctxt)) => - let - val upd = Symtab.update (n, Thm.cterm_of ctxt (Free (m, T))) - in (typs, upd terms, exprs, steps, vars, ctxt) end)) - -fun mk_typ (typs, _, _, _, _, ctxt) (s as Old_Z3_Interface.Sym (n, _)) = - (case Old_Z3_Interface.mk_builtin_typ ctxt s of - SOME T => SOME T - | NONE => Symtab.lookup typs n) - -fun mk_num (_, _, _, _, _, ctxt) (i, T) = - mk_fun (K (Old_Z3_Interface.mk_builtin_num ctxt i T)) [] - -fun mk_app (_, terms, _, _, _, ctxt) (s as Old_Z3_Interface.Sym (n, _), es) = - mk_fun (fn cts => - (case Old_Z3_Interface.mk_builtin_fun ctxt s cts of - SOME ct => SOME ct - | NONE => - Symtab.lookup terms n |> Option.map (Drule.list_comb o rpair cts))) es - -fun add_expr k t (typs, terms, exprs, steps, vars, ctxt) = - (typs, terms, Inttab.update (k, t) exprs, steps, vars, ctxt) - -fun lookup_expr (_, _, exprs, _, _, _) = Inttab.lookup exprs - -fun add_proof_step k ((r, args), prop) cx = - let - val (typs, terms, exprs, steps, vars, ctxt) = cx - val (ct, vs) = close ctxt prop - fun part (SOME e, _) (cts, ps) = (close ctxt e :: cts, ps) - | part (NONE, i) (cts, ps) = (cts, i :: ps) - val (args', prems) = fold (part o `(lookup_expr cx)) args ([], []) - val (cts, vss) = split_list args' - val step = Proof_Step {rule=r, args=rev cts, prems=rev prems, - prop = Old_SMT_Utils.mk_cprop ct} - val vars' = fold (union (op =)) (vs :: vss) vars - in (typs, terms, exprs, (k, step) :: steps, vars', ctxt) end - -fun finish (_, _, _, steps, vars, ctxt) = - let - fun coll (p as (k, Proof_Step {prems, rule, prop, ...})) (ars, ps, ids) = - (case rule of - Asserted => ((k, prop) :: ars, ps, ids) - | Goal => ((k, prop) :: ars, ps, ids) - | _ => - if Inttab.defined ids k then - (ars, p :: ps, fold (Inttab.update o rpair ()) prems ids) - else (ars, ps, ids)) - - val (ars, steps', _) = fold coll steps ([], [], Inttab.make [(~1, ())]) - in (ars, steps', vars, ctxt) end - - -(** core parser **) - -fun parse_exn line_no msg = raise Old_SMT_Failure.SMT (Old_SMT_Failure.Other_Failure - ("Z3 proof parser (line " ^ string_of_int line_no ^ "): " ^ msg)) - -fun scan_exn msg ((line_no, _), _) = parse_exn line_no msg - -fun with_info f cx = - (case f ((NONE, 1), cx) of - ((SOME _, _), cx') => cx' - | ((_, line_no), _) => parse_exn line_no "bad proof") - -fun parse_line _ _ (st as ((SOME _, _), _)) = st - | parse_line scan line ((_, line_no), cx) = - let val st = ((line_no, cx), raw_explode line) - in - (case Scan.catch (Scan.finite' Symbol.stopper (Scan.option scan)) st of - (SOME r, ((_, cx'), _)) => ((r, line_no+1), cx') - | (NONE, _) => parse_exn line_no ("bad proof line: " ^ quote line)) - end - -fun with_context f x ((line_no, cx), st) = - let val (y, cx') = f x cx - in (y, ((line_no, cx'), st)) end - - -fun lookup_context f x (st as ((_, cx), _)) = (f cx x, st) - - -(** parser combinators and parsers for basic entities **) - -fun $$ s = Scan.lift (Scan.$$ s) -fun this s = Scan.lift (Scan.this_string s) -val is_blank = Symbol.is_ascii_blank -fun blank st = Scan.lift (Scan.many1 is_blank) st -fun sep scan = blank |-- scan -fun seps scan = Scan.repeat (sep scan) -fun seps1 scan = Scan.repeat1 (sep scan) -fun seps_by scan_sep scan = scan ::: Scan.repeat (scan_sep |-- scan) - -val lpar = "(" and rpar = ")" -val lbra = "[" and rbra = "]" -fun par scan = $$ lpar |-- scan --| $$ rpar -fun bra scan = $$ lbra |-- scan --| $$ rbra - -val digit = (fn - "0" => SOME 0 | "1" => SOME 1 | "2" => SOME 2 | "3" => SOME 3 | - "4" => SOME 4 | "5" => SOME 5 | "6" => SOME 6 | "7" => SOME 7 | - "8" => SOME 8 | "9" => SOME 9 | _ => NONE) - -fun digits st = (Scan.lift (Scan.many1 Symbol.is_ascii_digit) >> implode) st - -fun nat_num st = (Scan.lift (Scan.repeat1 (Scan.some digit)) >> (fn ds => - fold (fn d => fn i => i * 10 + d) ds 0)) st - -fun int_num st = (Scan.optional ($$ "-" >> K (fn i => ~i)) I :|-- - (fn sign => nat_num >> sign)) st - -val is_char = Symbol.is_ascii_letter orf Symbol.is_ascii_digit orf - member (op =) (raw_explode "_+*-/%~=<>$&|?!.@^#") - -fun name st = (Scan.lift (Scan.many1 is_char) >> implode) st - -fun sym st = (name -- - Scan.optional (bra (seps_by ($$ ":") sym)) [] >> Old_Z3_Interface.Sym) st - -fun id st = ($$ "#" |-- nat_num) st - - -(** parsers for various parts of Z3 proofs **) - -fun sort st = Scan.first [ - this "array" |-- bra (sort --| $$ ":" -- sort) >> (op -->), - par (this "->" |-- seps1 sort) >> ((op --->) o split_last), - sym :|-- (fn s as Old_Z3_Interface.Sym (n, _) => lookup_context mk_typ s :|-- (fn - SOME T => Scan.succeed T - | NONE => scan_exn ("unknown sort: " ^ quote n)))] st - -fun bound st = (par (this ":var" |-- sep nat_num -- sep sort) :|-- - lookup_context (mk_bound o context_of)) st - -fun numb (n as (i, _)) = lookup_context mk_num n :|-- (fn - SOME n' => Scan.succeed n' - | NONE => scan_exn ("unknown number: " ^ quote (string_of_int i))) - -fun appl (app as (Old_Z3_Interface.Sym (n, _), _)) = - lookup_context mk_app app :|-- (fn - SOME app' => Scan.succeed app' - | NONE => scan_exn ("unknown function symbol: " ^ quote n)) - -fun bv_size st = (digits >> (fn sz => - Old_Z3_Interface.Sym ("bv", [Old_Z3_Interface.Sym (sz, [])]))) st - -fun bv_number_sort st = (bv_size :|-- lookup_context mk_typ :|-- (fn - SOME cT => Scan.succeed cT - | NONE => scan_exn ("unknown sort: " ^ quote "bv"))) st - -fun bv_number st = - (this "bv" |-- bra (nat_num --| $$ ":" -- bv_number_sort) :|-- numb) st - -fun frac_number st = ( - int_num --| $$ "/" -- int_num --| this "::" -- sort :|-- (fn ((i, j), T) => - numb (i, T) -- numb (j, T) :|-- (fn (n, m) => - appl (Old_Z3_Interface.Sym ("/", []), [n, m])))) st - -fun plain_number st = (int_num --| this "::" -- sort :|-- numb) st - -fun number st = Scan.first [bv_number, frac_number, plain_number] st - -fun constant st = ((sym >> rpair []) :|-- appl) st - -fun expr_id st = (id :|-- (fn i => lookup_context lookup_expr i :|-- (fn - SOME e => Scan.succeed e - | NONE => scan_exn ("unknown term id: " ^ quote (string_of_int i))))) st - -fun arg st = Scan.first [expr_id, number, constant] st - -fun application st = par ((sym -- Scan.repeat1 (sep arg)) :|-- appl) st - -fun variables st = par (this "vars" |-- seps1 (par (name |-- sep sort))) st - -fun pats st = seps (par ((this ":pat" || this ":nopat") |-- seps1 id)) st - -val ctrue = Thm.cterm_of @{context} @{const True} - -fun pattern st = par (this "pattern" |-- Scan.repeat1 (sep arg) >> - (the o mk_fun (K (SOME ctrue)))) st - -fun quant_kind st = st |> ( - this "forall" >> K (mk_quant true o context_of) || - this "exists" >> K (mk_quant false o context_of)) - -fun quantifier st = - (par (quant_kind -- sep variables --| pats -- sep arg) :|-- - lookup_context (fn cx => fn ((mk_q, Ts), body) => mk_q cx Ts body)) st - -fun expr k = - Scan.first [bound, quantifier, pattern, application, number, constant] :|-- - with_context (pair NONE oo add_expr k) - -val rule_arg = id - (* if this is modified, then 'th_lemma_arg' needs reviewing *) - -fun th_lemma_arg st = Scan.unless (sep rule_arg >> K "" || $$ rbra) (sep name) st - -fun rule_name st = ((name >> `(Symtab.lookup rule_names)) :|-- (fn - (SOME (Th_Lemma _), _) => Scan.repeat th_lemma_arg >> Th_Lemma - | (SOME r, _) => Scan.succeed r - | (NONE, n) => scan_exn ("unknown proof rule: " ^ quote n))) st - -fun rule f k = - bra (rule_name -- seps id) --| $$ ":" -- sep arg #-> - with_context (pair (f k) oo add_proof_step k) - -fun decl st = (this "decl" |-- sep name --| sep (this "::") -- sep sort :|-- - with_context (pair NONE oo add_decl)) st - -fun def st = (id --| sep (this ":=")) st - -fun node st = st |> ( - decl || - def :|-- (fn k => sep (expr k) || sep (rule (K NONE) k)) || - rule SOME ~1) - - -(** overall parser **) - -(* - Currently, terms are parsed bottom-up (i.e., along with parsing the proof - text line by line), but proofs are reconstructed top-down (i.e. by an - in-order top-down traversal of the proof tree/graph). The latter approach - was taken because some proof texts comprise irrelevant proof steps which - will thus not be reconstructed. This approach might also be beneficial - for constructing terms, but it would also increase the complexity of the - (otherwise rather modular) code. -*) - -fun parse ctxt typs terms proof_text = - make_context ctxt typs terms - |> with_info (fold (parse_line node) proof_text) - |> finish - -end diff --git a/src/test-gen/Old_SMT/old_z3_proof_reconstruction.ML b/src/test-gen/Old_SMT/old_z3_proof_reconstruction.ML deleted file mode 100644 index e2302cd..0000000 --- a/src/test-gen/Old_SMT/old_z3_proof_reconstruction.ML +++ /dev/null @@ -1,891 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_z3_proof_reconstruction.ML - Author: Sascha Boehme, TU Muenchen - -Proof reconstruction for proofs found by Z3. -*) - -signature OLD_Z3_PROOF_RECONSTRUCTION = -sig - val add_z3_rule: thm -> Context.generic -> Context.generic - val reconstruct: Proof.context -> Old_SMT_Translate.recon -> string list -> int list * thm -end - -structure Old_Z3_Proof_Reconstruction: OLD_Z3_PROOF_RECONSTRUCTION = -struct - - -fun z3_exn msg = raise Old_SMT_Failure.SMT (Old_SMT_Failure.Other_Failure - ("Z3 proof reconstruction: " ^ msg)) - - - -(* net of schematic rules *) - -local - val description = "declaration of Z3 proof rules" - - val eq = Thm.eq_thm - - structure Old_Z3_Rules = Generic_Data - ( - type T = thm Net.net - val empty = Net.empty - val extend = I - val merge = Net.merge eq - ) - - fun prep context = - `Thm.prop_of o rewrite_rule (Context.proof_of context) [Old_Z3_Proof_Literals.rewrite_true] - - fun ins thm context = - context |> Old_Z3_Rules.map (fn net => Net.insert_term eq (prep context thm) net handle Net.INSERT => net) - fun rem thm context = - context |> Old_Z3_Rules.map (fn net => Net.delete_term eq (prep context thm) net handle Net.DELETE => net) - - val add = Thm.declaration_attribute ins - val del = Thm.declaration_attribute rem -in - -val add_z3_rule = ins - -fun by_schematic_rule ctxt ct = - the (Old_Z3_Proof_Tools.net_instance (Old_Z3_Rules.get (Context.Proof ctxt)) ct) - -val _ = Theory.setup - (Attrib.setup @{binding old_z3_rule} (Attrib.add_del add del) description #> - Global_Theory.add_thms_dynamic (@{binding old_z3_rule}, Net.content o Old_Z3_Rules.get)) - -end - - - -(* proof tools *) - -fun named ctxt name prover ct = - let val _ = Old_SMT_Config.trace_msg ctxt I ("Z3: trying " ^ name ^ " ...") - in prover ct end - -fun NAMED ctxt name tac i st = - let val _ = Old_SMT_Config.trace_msg ctxt I ("Z3: trying " ^ name ^ " ...") - in tac i st end - -fun pretty_goal ctxt thms t = - [Pretty.block [Pretty.str "proposition: ", Syntax.pretty_term ctxt t]] - |> not (null thms) ? cons (Pretty.big_list "assumptions:" - (map (Thm.pretty_thm ctxt) thms)) - -fun try_apply ctxt thms = - let - fun try_apply_err ct = Pretty.string_of (Pretty.chunks [ - Pretty.big_list ("Z3 found a proof," ^ - " but proof reconstruction failed at the following subgoal:") - (pretty_goal ctxt thms (Thm.term_of ct)), - Pretty.str ("Declaring a rule as [old_z3_rule] might solve this problem.")]) - - fun apply [] ct = error (try_apply_err ct) - | apply (prover :: provers) ct = - (case try prover ct of - SOME thm => (Old_SMT_Config.trace_msg ctxt I "Z3: succeeded"; thm) - | NONE => apply provers ct) - - fun schematic_label full = "schematic rules" |> full ? suffix " (full)" - fun schematic ctxt full ct = - ct - |> full ? fold_rev (curry Drule.mk_implies o Thm.cprop_of) thms - |> named ctxt (schematic_label full) (by_schematic_rule ctxt) - |> fold Thm.elim_implies thms - - in apply o cons (schematic ctxt false) o cons (schematic ctxt true) end - -local - val rewr_if = - @{lemma "(if P then Q1 else Q2) = ((P --> Q1) & (~P --> Q2))" by simp} -in - -fun HOL_fast_tac ctxt = - Classical.fast_tac (put_claset HOL_cs ctxt) - -fun simp_fast_tac ctxt = - Simplifier.simp_tac (put_simpset HOL_ss ctxt addsimps [rewr_if]) - THEN_ALL_NEW HOL_fast_tac ctxt - -end - - - -(* theorems and proofs *) - -(** theorem incarnations **) - -datatype theorem = - Thm of thm | (* theorem without special features *) - MetaEq of thm | (* meta equality "t == s" *) - Literals of thm * Old_Z3_Proof_Literals.littab - (* "P1 & ... & Pn" and table of all literals P1, ..., Pn *) - -fun thm_of (Thm thm) = thm - | thm_of (MetaEq thm) = thm COMP @{thm meta_eq_to_obj_eq} - | thm_of (Literals (thm, _)) = thm - -fun meta_eq_of (MetaEq thm) = thm - | meta_eq_of p = mk_meta_eq (thm_of p) - -fun literals_of (Literals (_, lits)) = lits - | literals_of p = Old_Z3_Proof_Literals.make_littab [thm_of p] - - - -(** core proof rules **) - -(* assumption *) - -local - val remove_trigger = mk_meta_eq @{thm trigger_def} - val remove_weight = mk_meta_eq @{thm weight_def} - val remove_fun_app = mk_meta_eq @{thm fun_app_def} - - fun rewrite_conv _ [] = Conv.all_conv - | rewrite_conv ctxt eqs = Simplifier.full_rewrite (empty_simpset ctxt addsimps eqs) - - val prep_rules = [@{thm Let_def}, remove_trigger, remove_weight, - remove_fun_app, Old_Z3_Proof_Literals.rewrite_true] - - fun rewrite _ [] = I - | rewrite ctxt eqs = Conv.fconv_rule (rewrite_conv ctxt eqs) - - fun lookup_assm assms_net ct = - Old_Z3_Proof_Tools.net_instances assms_net ct - |> map (fn ithm as (_, thm) => (ithm, Thm.cprop_of thm aconvc ct)) -in - -fun add_asserted outer_ctxt rewrite_rules assms asserted ctxt = - let - val eqs = map (rewrite ctxt [Old_Z3_Proof_Literals.rewrite_true]) rewrite_rules - val eqs' = union Thm.eq_thm eqs prep_rules - - val assms_net = - assms - |> map (apsnd (rewrite ctxt eqs')) - |> map (apsnd (Conv.fconv_rule Thm.eta_conversion)) - |> Old_Z3_Proof_Tools.thm_net_of snd - - fun revert_conv ctxt = rewrite_conv ctxt eqs' then_conv Thm.eta_conversion - - fun assume thm ctxt = - let - val ct = Thm.cprem_of thm 1 - val (thm', ctxt') = yield_singleton Assumption.add_assumes ct ctxt - in (Thm.implies_elim thm thm', ctxt') end - - fun add1 idx thm1 ((i, th), exact) ((is, thms), (ctxt, ptab)) = - let - val (thm, ctxt') = - if exact then (Thm.implies_elim thm1 th, ctxt) - else assume thm1 ctxt - val thms' = if exact then thms else th :: thms - in - ((insert (op =) i is, thms'), - (ctxt', Inttab.update (idx, Thm thm) ptab)) - end - - fun add (idx, ct) (cx as ((is, thms), (ctxt, ptab))) = - let - val thm1 = - Thm.trivial ct - |> Conv.fconv_rule (Conv.arg1_conv (revert_conv outer_ctxt)) - val thm2 = singleton (Variable.export ctxt outer_ctxt) thm1 - in - (case lookup_assm assms_net (Thm.cprem_of thm2 1) of - [] => - let val (thm, ctxt') = assume thm1 ctxt - in ((is, thms), (ctxt', Inttab.update (idx, Thm thm) ptab)) end - | ithms => fold (add1 idx thm1) ithms cx) - end - in fold add asserted (([], []), (ctxt, Inttab.empty)) end - -end - - -(* P = Q ==> P ==> Q or P --> Q ==> P ==> Q *) -local - val precomp = Old_Z3_Proof_Tools.precompose2 - val comp = Old_Z3_Proof_Tools.compose - - val meta_iffD1 = @{lemma "P == Q ==> P ==> (Q::bool)" by simp} - val meta_iffD1_c = precomp Thm.dest_binop meta_iffD1 - - val iffD1_c = precomp (Thm.dest_binop o Thm.dest_arg) @{thm iffD1} - val mp_c = precomp (Thm.dest_binop o Thm.dest_arg) @{thm mp} -in -fun mp (MetaEq thm) p = Thm (Thm.implies_elim (comp meta_iffD1_c thm) p) - | mp p_q p = - let - val pq = thm_of p_q - val thm = comp iffD1_c pq handle THM _ => comp mp_c pq - in Thm (Thm.implies_elim thm p) end -end - - -(* and_elim: P1 & ... & Pn ==> Pi *) -(* not_or_elim: ~(P1 | ... | Pn) ==> ~Pi *) -local - fun is_sublit conj t = Old_Z3_Proof_Literals.exists_lit conj (fn u => u aconv t) - - fun derive conj t lits idx ptab = - let - val lit = the (Old_Z3_Proof_Literals.get_first_lit (is_sublit conj t) lits) - val ls = Old_Z3_Proof_Literals.explode conj false false [t] lit - val lits' = fold Old_Z3_Proof_Literals.insert_lit ls - (Old_Z3_Proof_Literals.delete_lit lit lits) - - fun upd thm = Literals (thm_of thm, lits') - val ptab' = Inttab.map_entry idx upd ptab - in (the (Old_Z3_Proof_Literals.lookup_lit lits' t), ptab') end - - fun lit_elim conj (p, idx) ct ptab = - let val lits = literals_of p - in - (case Old_Z3_Proof_Literals.lookup_lit lits (Old_SMT_Utils.term_of ct) of - SOME lit => (Thm lit, ptab) - | NONE => apfst Thm (derive conj (Old_SMT_Utils.term_of ct) lits idx ptab)) - end -in -val and_elim = lit_elim true -val not_or_elim = lit_elim false -end - - -(* P1, ..., Pn |- False ==> |- ~P1 | ... | ~Pn *) -local - fun step lit thm = - Thm.implies_elim (Thm.implies_intr (Thm.cprop_of lit) thm) lit - val explode_disj = Old_Z3_Proof_Literals.explode false false false - fun intro hyps thm th = fold step (explode_disj hyps th) thm - - fun dest_ccontr ct = [Thm.dest_arg (Thm.dest_arg (Thm.dest_arg1 ct))] - val ccontr = Old_Z3_Proof_Tools.precompose dest_ccontr @{thm ccontr} -in -fun lemma thm ct = - let - val cu = Old_Z3_Proof_Literals.negate (Thm.dest_arg ct) - val hyps = map_filter (try HOLogic.dest_Trueprop) (Thm.hyps_of thm) - val th = Old_Z3_Proof_Tools.under_assumption (intro hyps thm) cu - in Thm (Old_Z3_Proof_Tools.compose ccontr th) end -end - - -(* \/{P1, ..., Pn, Q1, ..., Qn}, ~P1, ..., ~Pn ==> \/{Q1, ..., Qn} *) -local - val explode_disj = Old_Z3_Proof_Literals.explode false true false - val join_disj = Old_Z3_Proof_Literals.join false - fun unit thm thms th = - let - val t = @{const Not} $ Old_SMT_Utils.prop_of thm - val ts = map Old_SMT_Utils.prop_of thms - in - join_disj (Old_Z3_Proof_Literals.make_littab (thms @ explode_disj ts th)) t - end - - fun dest_arg2 ct = Thm.dest_arg (Thm.dest_arg ct) - fun dest ct = apply2 dest_arg2 (Thm.dest_binop ct) - val contrapos = - Old_Z3_Proof_Tools.precompose2 dest @{lemma "(~P ==> ~Q) ==> Q ==> P" by fast} -in -fun unit_resolution thm thms ct = - Old_Z3_Proof_Literals.negate (Thm.dest_arg ct) - |> Old_Z3_Proof_Tools.under_assumption (unit thm thms) - |> Thm o Old_Z3_Proof_Tools.discharge thm o Old_Z3_Proof_Tools.compose contrapos -end - - -(* P ==> P == True or P ==> P == False *) -local - val iff1 = @{lemma "P ==> P == (~ False)" by simp} - val iff2 = @{lemma "~P ==> P == False" by simp} -in -fun iff_true thm = MetaEq (thm COMP iff1) -fun iff_false thm = MetaEq (thm COMP iff2) -end - - -(* distributivity of | over & *) -fun distributivity ctxt = Thm o try_apply ctxt [] [ - named ctxt "fast" (Old_Z3_Proof_Tools.by_tac ctxt (HOL_fast_tac ctxt))] - (* FIXME: not very well tested *) - - -(* Tseitin-like axioms *) -local - val disjI1 = @{lemma "(P ==> Q) ==> ~P | Q" by fast} - val disjI2 = @{lemma "(~P ==> Q) ==> P | Q" by fast} - val disjI3 = @{lemma "(~Q ==> P) ==> P | Q" by fast} - val disjI4 = @{lemma "(Q ==> P) ==> P | ~Q" by fast} - - fun prove' conj1 conj2 ct2 thm = - let - val littab = - Old_Z3_Proof_Literals.explode conj1 true (conj1 <> conj2) [] thm - |> cons Old_Z3_Proof_Literals.true_thm - |> Old_Z3_Proof_Literals.make_littab - in Old_Z3_Proof_Literals.join conj2 littab (Thm.term_of ct2) end - - fun prove rule (ct1, conj1) (ct2, conj2) = - Old_Z3_Proof_Tools.under_assumption (prove' conj1 conj2 ct2) ct1 COMP rule - - fun prove_def_axiom ct = - let val (ct1, ct2) = Thm.dest_binop (Thm.dest_arg ct) - in - (case Thm.term_of ct1 of - @{const Not} $ (@{const HOL.conj} $ _ $ _) => - prove disjI1 (Thm.dest_arg ct1, true) (ct2, true) - | @{const HOL.conj} $ _ $ _ => - prove disjI3 (Old_Z3_Proof_Literals.negate ct2, false) (ct1, true) - | @{const Not} $ (@{const HOL.disj} $ _ $ _) => - prove disjI3 (Old_Z3_Proof_Literals.negate ct2, false) (ct1, false) - | @{const HOL.disj} $ _ $ _ => - prove disjI2 (Old_Z3_Proof_Literals.negate ct1, false) (ct2, true) - | Const (@{const_name distinct}, _) $ _ => - let - fun dis_conv cv = Conv.arg_conv (Conv.arg1_conv cv) - val unfold_dis_conv = dis_conv Old_Z3_Proof_Tools.unfold_distinct_conv - fun prv cu = - let val (cu1, cu2) = Thm.dest_binop (Thm.dest_arg cu) - in prove disjI4 (Thm.dest_arg cu2, true) (cu1, true) end - in Old_Z3_Proof_Tools.with_conv unfold_dis_conv prv ct end - | @{const Not} $ (Const (@{const_name distinct}, _) $ _) => - let - fun dis_conv cv = Conv.arg_conv (Conv.arg1_conv (Conv.arg_conv cv)) - val unfold_dis_conv = dis_conv Old_Z3_Proof_Tools.unfold_distinct_conv - fun prv cu = - let val (cu1, cu2) = Thm.dest_binop (Thm.dest_arg cu) - in prove disjI1 (Thm.dest_arg cu1, true) (cu2, true) end - in Old_Z3_Proof_Tools.with_conv unfold_dis_conv prv ct end - | _ => raise CTERM ("prove_def_axiom", [ct])) - end -in -fun def_axiom ctxt = Thm o try_apply ctxt [] [ - named ctxt "conj/disj/distinct" prove_def_axiom, - Old_Z3_Proof_Tools.by_abstraction 0 (true, false) ctxt [] (fn ctxt' => - named ctxt' "simp+fast" (Old_Z3_Proof_Tools.by_tac ctxt (simp_fast_tac ctxt')))] -end - - -(* local definitions *) -local - val intro_rules = [ - @{lemma "n == P ==> (~n | P) & (n | ~P)" by simp}, - @{lemma "n == (if P then s else t) ==> (~P | n = s) & (P | n = t)" - by simp}, - @{lemma "n == P ==> n = P" by (rule meta_eq_to_obj_eq)} ] - - val apply_rules = [ - @{lemma "(~n | P) & (n | ~P) ==> P == n" by (atomize(full)) fast}, - @{lemma "(~P | n = s) & (P | n = t) ==> (if P then s else t) == n" - by (atomize(full)) fastforce} ] - - val inst_rule = Old_Z3_Proof_Tools.match_instantiate Thm.dest_arg - - fun apply_rule ct = - (case get_first (try (inst_rule ct)) intro_rules of - SOME thm => thm - | NONE => raise CTERM ("intro_def", [ct])) -in -fun intro_def ct = Old_Z3_Proof_Tools.make_hyp_def (apply_rule ct) #>> Thm - -fun apply_def thm = - get_first (try (fn rule => MetaEq (thm COMP rule))) apply_rules - |> the_default (Thm thm) -end - - -(* negation normal form *) -local - val quant_rules1 = ([ - @{lemma "(!!x. P x == Q) ==> ALL x. P x == Q" by simp}, - @{lemma "(!!x. P x == Q) ==> EX x. P x == Q" by simp}], [ - @{lemma "(!!x. P x == Q x) ==> ALL x. P x == ALL x. Q x" by simp}, - @{lemma "(!!x. P x == Q x) ==> EX x. P x == EX x. Q x" by simp}]) - - val quant_rules2 = ([ - @{lemma "(!!x. ~P x == Q) ==> ~(ALL x. P x) == Q" by simp}, - @{lemma "(!!x. ~P x == Q) ==> ~(EX x. P x) == Q" by simp}], [ - @{lemma "(!!x. ~P x == Q x) ==> ~(ALL x. P x) == EX x. Q x" by simp}, - @{lemma "(!!x. ~P x == Q x) ==> ~(EX x. P x) == ALL x. Q x" by simp}]) - - fun nnf_quant_tac ctxt thm (qs as (qs1, qs2)) i st = ( - resolve_tac ctxt [thm] ORELSE' - (match_tac ctxt qs1 THEN' nnf_quant_tac ctxt thm qs) ORELSE' - (match_tac ctxt qs2 THEN' nnf_quant_tac ctxt thm qs)) i st - - fun nnf_quant_tac_varified ctxt vars eq = - nnf_quant_tac ctxt (Old_Z3_Proof_Tools.varify vars eq) - - fun nnf_quant ctxt vars qs p ct = - Old_Z3_Proof_Tools.as_meta_eq ct - |> Old_Z3_Proof_Tools.by_tac ctxt (nnf_quant_tac_varified ctxt vars (meta_eq_of p) qs) - - fun prove_nnf ctxt = try_apply ctxt [] [ - named ctxt "conj/disj" Old_Z3_Proof_Literals.prove_conj_disj_eq, - Old_Z3_Proof_Tools.by_abstraction 0 (true, false) ctxt [] (fn ctxt' => - named ctxt' "simp+fast" (Old_Z3_Proof_Tools.by_tac ctxt' (simp_fast_tac ctxt')))] -in -fun nnf ctxt vars ps ct = - (case Old_SMT_Utils.term_of ct of - _ $ (l as Const _ $ Abs _) $ (r as Const _ $ Abs _) => - if l aconv r - then MetaEq (Thm.reflexive (Thm.dest_arg (Thm.dest_arg ct))) - else MetaEq (nnf_quant ctxt vars quant_rules1 (hd ps) ct) - | _ $ (@{const Not} $ (Const _ $ Abs _)) $ (Const _ $ Abs _) => - MetaEq (nnf_quant ctxt vars quant_rules2 (hd ps) ct) - | _ => - let - val nnf_rewr_conv = Conv.arg_conv (Conv.arg_conv - (Old_Z3_Proof_Tools.unfold_eqs ctxt - (map (Thm.symmetric o meta_eq_of) ps))) - in Thm (Old_Z3_Proof_Tools.with_conv nnf_rewr_conv (prove_nnf ctxt) ct) end) -end - - - -(** equality proof rules **) - -(* |- t = t *) -fun refl ct = MetaEq (Thm.reflexive (Thm.dest_arg (Thm.dest_arg ct))) - - -(* s = t ==> t = s *) -local - val symm_rule = @{lemma "s = t ==> t == s" by simp} -in -fun symm (MetaEq thm) = MetaEq (Thm.symmetric thm) - | symm p = MetaEq (thm_of p COMP symm_rule) -end - - -(* s = t ==> t = u ==> s = u *) -local - val trans1 = @{lemma "s == t ==> t = u ==> s == u" by simp} - val trans2 = @{lemma "s = t ==> t == u ==> s == u" by simp} - val trans3 = @{lemma "s = t ==> t = u ==> s == u" by simp} -in -fun trans (MetaEq thm1) (MetaEq thm2) = MetaEq (Thm.transitive thm1 thm2) - | trans (MetaEq thm) q = MetaEq (thm_of q COMP (thm COMP trans1)) - | trans p (MetaEq thm) = MetaEq (thm COMP (thm_of p COMP trans2)) - | trans p q = MetaEq (thm_of q COMP (thm_of p COMP trans3)) -end - - -(* t1 = s1 ==> ... ==> tn = sn ==> f t1 ... tn = f s1 .. sn - (reflexive antecendents are droppped) *) -local - exception MONO - - fun prove_refl (ct, _) = Thm.reflexive ct - fun prove_comb f g cp = - let val ((ct1, ct2), (cu1, cu2)) = apply2 Thm.dest_comb cp - in Thm.combination (f (ct1, cu1)) (g (ct2, cu2)) end - fun prove_arg f = prove_comb prove_refl f - - fun prove f cp = prove_comb (prove f) f cp handle CTERM _ => prove_refl cp - - fun prove_nary is_comb f = - let - fun prove (cp as (ct, _)) = f cp handle MONO => - if is_comb (Thm.term_of ct) - then prove_comb (prove_arg prove) prove cp - else prove_refl cp - in prove end - - fun prove_list f n cp = - if n = 0 then prove_refl cp - else prove_comb (prove_arg f) (prove_list f (n-1)) cp - - fun with_length f (cp as (cl, _)) = - f (length (HOLogic.dest_list (Thm.term_of cl))) cp - - fun prove_distinct f = prove_arg (with_length (prove_list f)) - - fun prove_eq exn lookup cp = - (case lookup (Logic.mk_equals (apply2 Thm.term_of cp)) of - SOME eq => eq - | NONE => if exn then raise MONO else prove_refl cp) - - val prove_exn = prove_eq true - and prove_safe = prove_eq false - - fun mono f (cp as (cl, _)) = - (case Term.head_of (Thm.term_of cl) of - @{const HOL.conj} => prove_nary Old_Z3_Proof_Literals.is_conj (prove_exn f) - | @{const HOL.disj} => prove_nary Old_Z3_Proof_Literals.is_disj (prove_exn f) - | Const (@{const_name distinct}, _) => prove_distinct (prove_safe f) - | _ => prove (prove_safe f)) cp -in -fun monotonicity eqs ct = - let - fun and_symmetric (t, thm) = [(t, thm), (t, Thm.symmetric thm)] - val teqs = maps (and_symmetric o `Thm.prop_of o meta_eq_of) eqs - val lookup = AList.lookup (op aconv) teqs - val cp = Thm.dest_binop (Thm.dest_arg ct) - in MetaEq (prove_exn lookup cp handle MONO => mono lookup cp) end -end - - -(* |- f a b = f b a (where f is equality) *) -local - val rule = @{lemma "a = b == b = a" by (atomize(full)) (rule eq_commute)} -in -fun commutativity ct = - MetaEq (Old_Z3_Proof_Tools.match_instantiate I - (Old_Z3_Proof_Tools.as_meta_eq ct) rule) -end - - - -(** quantifier proof rules **) - -(* P ?x = Q ?x ==> (ALL x. P x) = (ALL x. Q x) - P ?x = Q ?x ==> (EX x. P x) = (EX x. Q x) *) -local - val rules = [ - @{lemma "(!!x. P x == Q x) ==> (ALL x. P x) == (ALL x. Q x)" by simp}, - @{lemma "(!!x. P x == Q x) ==> (EX x. P x) == (EX x. Q x)" by simp}] -in -fun quant_intro ctxt vars p ct = - let - val thm = meta_eq_of p - val rules' = Old_Z3_Proof_Tools.varify vars thm :: rules - val cu = Old_Z3_Proof_Tools.as_meta_eq ct - val tac = REPEAT_ALL_NEW (match_tac ctxt rules') - in MetaEq (Old_Z3_Proof_Tools.by_tac ctxt tac cu) end -end - - -(* |- ((ALL x. P x) | Q) = (ALL x. P x | Q) *) -fun pull_quant ctxt = Thm o try_apply ctxt [] [ - named ctxt "fast" (Old_Z3_Proof_Tools.by_tac ctxt (HOL_fast_tac ctxt))] - (* FIXME: not very well tested *) - - -(* |- (ALL x. P x & Q x) = ((ALL x. P x) & (ALL x. Q x)) *) -fun push_quant ctxt = Thm o try_apply ctxt [] [ - named ctxt "fast" (Old_Z3_Proof_Tools.by_tac ctxt (HOL_fast_tac ctxt))] - (* FIXME: not very well tested *) - - -(* |- (ALL x1 ... xn y1 ... yn. P x1 ... xn) = (ALL x1 ... xn. P x1 ... xn) *) -local - val elim_all = @{lemma "P = Q ==> (ALL x. P) = Q" by fast} - val elim_ex = @{lemma "P = Q ==> (EX x. P) = Q" by fast} - - fun elim_unused_tac ctxt i st = ( - match_tac ctxt [@{thm refl}] - ORELSE' (match_tac ctxt [elim_all, elim_ex] THEN' elim_unused_tac ctxt) - ORELSE' ( - match_tac ctxt [@{thm iff_allI}, @{thm iff_exI}] - THEN' elim_unused_tac ctxt)) i st -in - -fun elim_unused_vars ctxt = Thm o Old_Z3_Proof_Tools.by_tac ctxt (elim_unused_tac ctxt) - -end - - -(* |- (ALL x1 ... xn. ~(x1 = t1 & ... xn = tn) | P x1 ... xn) = P t1 ... tn *) -fun dest_eq_res ctxt = Thm o try_apply ctxt [] [ - named ctxt "fast" (Old_Z3_Proof_Tools.by_tac ctxt (HOL_fast_tac ctxt))] - (* FIXME: not very well tested *) - - -(* |- ~(ALL x1...xn. P x1...xn) | P a1...an *) -local - val rule = @{lemma "~ P x | Q ==> ~(ALL x. P x) | Q" by fast} -in -fun quant_inst ctxt = Thm o Old_Z3_Proof_Tools.by_tac ctxt ( - REPEAT_ALL_NEW (match_tac ctxt [rule]) - THEN' resolve_tac ctxt @{thms excluded_middle}) -end - - -(* |- (EX x. P x) = P c |- ~(ALL x. P x) = ~ P c *) -local - val forall = - Old_SMT_Utils.mk_const_pat @{theory} @{const_name Pure.all} - (Old_SMT_Utils.destT1 o Old_SMT_Utils.destT1) - fun mk_forall cv ct = - Thm.apply (Old_SMT_Utils.instT' cv forall) (Thm.lambda cv ct) - - fun get_vars f mk pred ctxt t = - Term.fold_aterms f t [] - |> map_filter (fn v => - if pred v then SOME (Thm.cterm_of ctxt (mk v)) else NONE) - - fun close vars f ct ctxt = - let - val frees_of = get_vars Term.add_frees Free (member (op =) vars o fst) - val vs = frees_of ctxt (Thm.term_of ct) - val (thm, ctxt') = f (fold_rev mk_forall vs ct) ctxt - val vars_of = get_vars Term.add_vars Var (K true) ctxt' - in - (Thm.instantiate ([], map (dest_Var o Thm.term_of) (vars_of (Thm.prop_of thm)) ~~ vs) thm, - ctxt') - end - - val sk_rules = @{lemma - "c = (SOME x. P x) ==> (EX x. P x) = P c" - "c = (SOME x. ~P x) ==> (~(ALL x. P x)) = (~P c)" - by (metis someI_ex)+} -in - -fun skolemize vars = - apfst Thm oo close vars (yield_singleton Assumption.add_assumes) - -fun discharge_sk_tac ctxt i st = ( - resolve_tac ctxt @{thms trans} i - THEN resolve_tac ctxt sk_rules i - THEN (resolve_tac ctxt @{thms refl} ORELSE' discharge_sk_tac ctxt) (i+1) - THEN resolve_tac ctxt @{thms refl} i) st - -end - - - -(** theory proof rules **) - -(* theory lemmas: linear arithmetic, arrays *) -fun th_lemma ctxt simpset thms = Thm o try_apply ctxt thms [ - Old_Z3_Proof_Tools.by_abstraction 0 (false, true) ctxt thms (fn ctxt' => - Old_Z3_Proof_Tools.by_tac ctxt' ( - NAMED ctxt' "arith" (Arith_Data.arith_tac ctxt') - ORELSE' NAMED ctxt' "simp+arith" ( - Simplifier.asm_full_simp_tac (put_simpset simpset ctxt') - THEN_ALL_NEW Arith_Data.arith_tac ctxt')))] - - -(* rewriting: prove equalities: - * ACI of conjunction/disjunction - * contradiction, excluded middle - * logical rewriting rules (for negation, implication, equivalence, - distinct) - * normal forms for polynoms (integer/real arithmetic) - * quantifier elimination over linear arithmetic - * ... ? **) -local - fun spec_meta_eq_of thm = - (case try (fn th => th RS @{thm spec}) thm of - SOME thm' => spec_meta_eq_of thm' - | NONE => mk_meta_eq thm) - - fun prep (Thm thm) = spec_meta_eq_of thm - | prep (MetaEq thm) = thm - | prep (Literals (thm, _)) = spec_meta_eq_of thm - - fun unfold_conv ctxt ths = - Conv.arg_conv (Conv.binop_conv (Old_Z3_Proof_Tools.unfold_eqs ctxt - (map prep ths))) - - fun with_conv _ [] prv = prv - | with_conv ctxt ths prv = - Old_Z3_Proof_Tools.with_conv (unfold_conv ctxt ths) prv - - val unfold_conv = - Conv.arg_conv (Conv.binop_conv - (Conv.try_conv Old_Z3_Proof_Tools.unfold_distinct_conv)) - val prove_conj_disj_eq = - Old_Z3_Proof_Tools.with_conv unfold_conv Old_Z3_Proof_Literals.prove_conj_disj_eq - - fun declare_hyps ctxt thm = - (thm, snd (Assumption.add_assumes (Thm.chyps_of thm) ctxt)) -in - -val abstraction_depth = 3 - (* - This value was chosen large enough to potentially catch exceptions, - yet small enough to not cause too much harm. The value might be - increased in the future, if reconstructing 'rewrite' fails on problems - that get too much abstracted to be reconstructable. - *) - -fun rewrite simpset ths ct ctxt = - apfst Thm (declare_hyps ctxt (with_conv ctxt ths (try_apply ctxt [] [ - named ctxt "conj/disj/distinct" prove_conj_disj_eq, - named ctxt "pull-ite" Old_Z3_Proof_Methods.prove_ite ctxt, - Old_Z3_Proof_Tools.by_abstraction 0 (true, false) ctxt [] (fn ctxt' => - Old_Z3_Proof_Tools.by_tac ctxt' ( - NAMED ctxt' "simp (logic)" (Simplifier.simp_tac (put_simpset simpset ctxt')) - THEN_ALL_NEW NAMED ctxt' "fast (logic)" (fast_tac ctxt'))), - Old_Z3_Proof_Tools.by_abstraction 0 (false, true) ctxt [] (fn ctxt' => - Old_Z3_Proof_Tools.by_tac ctxt' ( - (resolve_tac ctxt' @{thms iff_allI} ORELSE' K all_tac) - THEN' NAMED ctxt' "simp (theory)" (Simplifier.simp_tac (put_simpset simpset ctxt')) - THEN_ALL_NEW ( - NAMED ctxt' "fast (theory)" (HOL_fast_tac ctxt') - ORELSE' NAMED ctxt' "arith (theory)" (Arith_Data.arith_tac ctxt')))), - Old_Z3_Proof_Tools.by_abstraction 0 (true, true) ctxt [] (fn ctxt' => - Old_Z3_Proof_Tools.by_tac ctxt' ( - (resolve_tac ctxt' @{thms iff_allI} ORELSE' K all_tac) - THEN' NAMED ctxt' "simp (full)" (Simplifier.simp_tac (put_simpset simpset ctxt')) - THEN_ALL_NEW ( - NAMED ctxt' "fast (full)" (HOL_fast_tac ctxt') - ORELSE' NAMED ctxt' "arith (full)" (Arith_Data.arith_tac ctxt')))), - named ctxt "injectivity" (Old_Z3_Proof_Methods.prove_injectivity ctxt), - Old_Z3_Proof_Tools.by_abstraction abstraction_depth (true, true) ctxt [] - (fn ctxt' => - Old_Z3_Proof_Tools.by_tac ctxt' ( - (resolve_tac ctxt' @{thms iff_allI} ORELSE' K all_tac) - THEN' NAMED ctxt' "simp (deepen)" (Simplifier.simp_tac (put_simpset simpset ctxt')) - THEN_ALL_NEW ( - NAMED ctxt' "fast (deepen)" (HOL_fast_tac ctxt') - ORELSE' NAMED ctxt' "arith (deepen)" (Arith_Data.arith_tac - ctxt'))))]) ct)) - -end - - - -(* proof reconstruction *) - -(** tracing and checking **) - -fun trace_before ctxt idx = Old_SMT_Config.trace_msg ctxt (fn r => - "Z3: #" ^ string_of_int idx ^ ": " ^ Old_Z3_Proof_Parser.string_of_rule r) - -fun check_after idx r ps ct (p, (ctxt, _)) = - if not (Config.get ctxt Old_SMT_Config.trace) then () - else - let val thm = thm_of p |> tap (Thm.join_proofs o single) - in - if (Thm.cprop_of thm) aconvc ct then () - else - z3_exn (Pretty.string_of (Pretty.big_list - ("proof step failed: " ^ quote (Old_Z3_Proof_Parser.string_of_rule r) ^ - " (#" ^ string_of_int idx ^ ")") - (pretty_goal ctxt (map (thm_of o fst) ps) (Thm.prop_of thm) @ - [Pretty.block [Pretty.str "expected: ", - Syntax.pretty_term ctxt (Thm.term_of ct)]]))) - end - - -(** overall reconstruction procedure **) - -local - fun not_supported r = raise Fail ("Z3: proof rule not implemented: " ^ - quote (Old_Z3_Proof_Parser.string_of_rule r)) - - fun prove_step simpset vars r ps ct (cxp as (cx, ptab)) = - (case (r, ps) of - (* core rules *) - (Old_Z3_Proof_Parser.True_Axiom, _) => (Thm Old_Z3_Proof_Literals.true_thm, cxp) - | (Old_Z3_Proof_Parser.Asserted, _) => raise Fail "bad assertion" - | (Old_Z3_Proof_Parser.Goal, _) => raise Fail "bad assertion" - | (Old_Z3_Proof_Parser.Modus_Ponens, [(p, _), (q, _)]) => - (mp q (thm_of p), cxp) - | (Old_Z3_Proof_Parser.Modus_Ponens_Oeq, [(p, _), (q, _)]) => - (mp q (thm_of p), cxp) - | (Old_Z3_Proof_Parser.And_Elim, [(p, i)]) => - and_elim (p, i) ct ptab ||> pair cx - | (Old_Z3_Proof_Parser.Not_Or_Elim, [(p, i)]) => - not_or_elim (p, i) ct ptab ||> pair cx - | (Old_Z3_Proof_Parser.Hypothesis, _) => (Thm (Thm.assume ct), cxp) - | (Old_Z3_Proof_Parser.Lemma, [(p, _)]) => (lemma (thm_of p) ct, cxp) - | (Old_Z3_Proof_Parser.Unit_Resolution, (p, _) :: ps) => - (unit_resolution (thm_of p) (map (thm_of o fst) ps) ct, cxp) - | (Old_Z3_Proof_Parser.Iff_True, [(p, _)]) => (iff_true (thm_of p), cxp) - | (Old_Z3_Proof_Parser.Iff_False, [(p, _)]) => (iff_false (thm_of p), cxp) - | (Old_Z3_Proof_Parser.Distributivity, _) => (distributivity cx ct, cxp) - | (Old_Z3_Proof_Parser.Def_Axiom, _) => (def_axiom cx ct, cxp) - | (Old_Z3_Proof_Parser.Intro_Def, _) => intro_def ct cx ||> rpair ptab - | (Old_Z3_Proof_Parser.Apply_Def, [(p, _)]) => (apply_def (thm_of p), cxp) - | (Old_Z3_Proof_Parser.Iff_Oeq, [(p, _)]) => (p, cxp) - | (Old_Z3_Proof_Parser.Nnf_Pos, _) => (nnf cx vars (map fst ps) ct, cxp) - | (Old_Z3_Proof_Parser.Nnf_Neg, _) => (nnf cx vars (map fst ps) ct, cxp) - - (* equality rules *) - | (Old_Z3_Proof_Parser.Reflexivity, _) => (refl ct, cxp) - | (Old_Z3_Proof_Parser.Symmetry, [(p, _)]) => (symm p, cxp) - | (Old_Z3_Proof_Parser.Transitivity, [(p, _), (q, _)]) => (trans p q, cxp) - | (Old_Z3_Proof_Parser.Monotonicity, _) => (monotonicity (map fst ps) ct, cxp) - | (Old_Z3_Proof_Parser.Commutativity, _) => (commutativity ct, cxp) - - (* quantifier rules *) - | (Old_Z3_Proof_Parser.Quant_Intro, [(p, _)]) => (quant_intro cx vars p ct, cxp) - | (Old_Z3_Proof_Parser.Pull_Quant, _) => (pull_quant cx ct, cxp) - | (Old_Z3_Proof_Parser.Push_Quant, _) => (push_quant cx ct, cxp) - | (Old_Z3_Proof_Parser.Elim_Unused_Vars, _) => (elim_unused_vars cx ct, cxp) - | (Old_Z3_Proof_Parser.Dest_Eq_Res, _) => (dest_eq_res cx ct, cxp) - | (Old_Z3_Proof_Parser.Quant_Inst, _) => (quant_inst cx ct, cxp) - | (Old_Z3_Proof_Parser.Skolemize, _) => skolemize vars ct cx ||> rpair ptab - - (* theory rules *) - | (Old_Z3_Proof_Parser.Th_Lemma _, _) => (* FIXME: use arguments *) - (th_lemma cx simpset (map (thm_of o fst) ps) ct, cxp) - | (Old_Z3_Proof_Parser.Rewrite, _) => rewrite simpset [] ct cx ||> rpair ptab - | (Old_Z3_Proof_Parser.Rewrite_Star, ps) => - rewrite simpset (map fst ps) ct cx ||> rpair ptab - - | (Old_Z3_Proof_Parser.Nnf_Star, _) => not_supported r - | (Old_Z3_Proof_Parser.Cnf_Star, _) => not_supported r - | (Old_Z3_Proof_Parser.Transitivity_Star, _) => not_supported r - | (Old_Z3_Proof_Parser.Pull_Quant_Star, _) => not_supported r - - | _ => raise Fail ("Z3: proof rule " ^ - quote (Old_Z3_Proof_Parser.string_of_rule r) ^ - " has an unexpected number of arguments.")) - - fun lookup_proof ptab idx = - (case Inttab.lookup ptab idx of - SOME p => (p, idx) - | NONE => z3_exn ("unknown proof id: " ^ quote (string_of_int idx))) - - fun prove simpset vars (idx, step) (_, cxp as (ctxt, ptab)) = - let - val Old_Z3_Proof_Parser.Proof_Step {rule=r, prems, prop, ...} = step - val ps = map (lookup_proof ptab) prems - val _ = trace_before ctxt idx r - val (thm, (ctxt', ptab')) = - cxp - |> prove_step simpset vars r ps prop - |> tap (check_after idx r ps prop) - in (thm, (ctxt', Inttab.update (idx, thm) ptab')) end - - fun make_discharge_rules rules = rules @ [@{thm allI}, @{thm refl}, - @{thm reflexive}, Old_Z3_Proof_Literals.true_thm] - - fun discharge_assms_tac ctxt rules = - REPEAT (HEADGOAL (resolve_tac ctxt rules ORELSE' SOLVED' (discharge_sk_tac ctxt))) - - fun discharge_assms ctxt rules thm = - if Thm.nprems_of thm = 0 then Goal.norm_result ctxt thm - else - (case Seq.pull (discharge_assms_tac ctxt rules thm) of - SOME (thm', _) => Goal.norm_result ctxt thm' - | NONE => raise THM ("failed to discharge premise", 1, [thm])) - - fun discharge rules outer_ctxt (p, (inner_ctxt, _)) = - thm_of p - |> singleton (Proof_Context.export inner_ctxt outer_ctxt) - |> discharge_assms outer_ctxt (make_discharge_rules rules) -in - -fun reconstruct outer_ctxt recon output = - let - val {context=ctxt, typs, terms, rewrite_rules, assms} = recon - val (asserted, steps, vars, ctxt1) = - Old_Z3_Proof_Parser.parse ctxt typs terms output - - val simpset = - Old_Z3_Proof_Tools.make_simpset ctxt1 (Named_Theorems.get ctxt1 @{named_theorems old_z3_simp}) - - val ((is, rules), cxp as (ctxt2, _)) = - add_asserted outer_ctxt rewrite_rules assms asserted ctxt1 - in - if Config.get ctxt2 Old_SMT_Config.filter_only_facts then (is, @{thm TrueI}) - else - (Thm @{thm TrueI}, cxp) - |> fold (prove simpset vars) steps - |> discharge rules outer_ctxt - |> pair [] - end - -end - -end diff --git a/src/test-gen/Old_SMT/old_z3_proof_tools.ML b/src/test-gen/Old_SMT/old_z3_proof_tools.ML deleted file mode 100644 index 8fc65ba..0000000 --- a/src/test-gen/Old_SMT/old_z3_proof_tools.ML +++ /dev/null @@ -1,374 +0,0 @@ -(* Title: HOL/Library/Old_SMT/old_z3_proof_tools.ML - Author: Sascha Boehme, TU Muenchen - -Helper functions required for Z3 proof reconstruction. -*) - -signature OLD_Z3_PROOF_TOOLS = -sig - (*modifying terms*) - val as_meta_eq: cterm -> cterm - - (*theorem nets*) - val thm_net_of: ('a -> thm) -> 'a list -> 'a Net.net - val net_instances: (int * thm) Net.net -> cterm -> (int * thm) list - val net_instance: thm Net.net -> cterm -> thm option - - (*proof combinators*) - val under_assumption: (thm -> thm) -> cterm -> thm - val with_conv: conv -> (cterm -> thm) -> cterm -> thm - val discharge: thm -> thm -> thm - val varify: string list -> thm -> thm - val unfold_eqs: Proof.context -> thm list -> conv - val match_instantiate: (cterm -> cterm) -> cterm -> thm -> thm - val by_tac: Proof.context -> (int -> tactic) -> cterm -> thm - val make_hyp_def: thm -> Proof.context -> thm * Proof.context - val by_abstraction: int -> bool * bool -> Proof.context -> thm list -> - (Proof.context -> cterm -> thm) -> cterm -> thm - - (*a faster COMP*) - type compose_data = cterm list * (cterm -> cterm list) * thm - val precompose: (cterm -> cterm list) -> thm -> compose_data - val precompose2: (cterm -> cterm * cterm) -> thm -> compose_data - val compose: compose_data -> thm -> thm - - (*unfolding of 'distinct'*) - val unfold_distinct_conv: conv - - (*simpset*) - val add_simproc: Simplifier.simproc -> Context.generic -> Context.generic - val make_simpset: Proof.context -> thm list -> simpset -end - -structure Old_Z3_Proof_Tools: OLD_Z3_PROOF_TOOLS = -struct - - - -(* modifying terms *) - -fun as_meta_eq ct = - uncurry Old_SMT_Utils.mk_cequals (Thm.dest_binop (Old_SMT_Utils.dest_cprop ct)) - - - -(* theorem nets *) - -fun thm_net_of f xthms = - let fun insert xthm = Net.insert_term (K false) (Thm.prop_of (f xthm), xthm) - in fold insert xthms Net.empty end - -fun maybe_instantiate ct thm = - try Thm.first_order_match (Thm.cprop_of thm, ct) - |> Option.map (fn inst => Thm.instantiate inst thm) - -local - fun instances_from_net match f net ct = - let - val lookup = if match then Net.match_term else Net.unify_term - val xthms = lookup net (Thm.term_of ct) - fun select ct = map_filter (f (maybe_instantiate ct)) xthms - fun select' ct = - let val thm = Thm.trivial ct - in map_filter (f (try (fn rule => rule COMP thm))) xthms end - in (case select ct of [] => select' ct | xthms' => xthms') end -in - -fun net_instances net = - instances_from_net false (fn f => fn (i, thm) => Option.map (pair i) (f thm)) - net - -fun net_instance net = try hd o instances_from_net true I net - -end - - - -(* proof combinators *) - -fun under_assumption f ct = - let val ct' = Old_SMT_Utils.mk_cprop ct - in Thm.implies_intr ct' (f (Thm.assume ct')) end - -fun with_conv conv prove ct = - let val eq = Thm.symmetric (conv ct) - in Thm.equal_elim eq (prove (Thm.lhs_of eq)) end - -fun discharge p pq = Thm.implies_elim pq p - -fun varify vars = Drule.generalize ([], vars) - -fun unfold_eqs _ [] = Conv.all_conv - | unfold_eqs ctxt eqs = - Conv.top_sweep_conv (K (Conv.rewrs_conv eqs)) ctxt - -fun match_instantiate f ct thm = - Thm.instantiate (Thm.match (f (Thm.cprop_of thm), ct)) thm - -fun by_tac ctxt tac ct = Goal.norm_result ctxt (Goal.prove_internal ctxt [] ct (K (tac 1))) - -(* - |- c x == t x ==> P (c x) - --------------------------- - c == t |- P (c x) -*) -fun make_hyp_def thm ctxt = - let - val (lhs, rhs) = Thm.dest_binop (Thm.cprem_of thm 1) - val (cf, cvs) = Drule.strip_comb lhs - val eq = Old_SMT_Utils.mk_cequals cf (fold_rev Thm.lambda cvs rhs) - fun apply cv th = - Thm.combination th (Thm.reflexive cv) - |> Conv.fconv_rule (Conv.arg_conv (Thm.beta_conversion false)) - in - yield_singleton Assumption.add_assumes eq ctxt - |>> Thm.implies_elim thm o fold apply cvs - end - - - -(* abstraction *) - -local - -fun abs_context ctxt = (ctxt, Termtab.empty, 1, false) - -fun context_of (ctxt, _, _, _) = ctxt - -fun replace (_, (cv, ct)) = Thm.forall_elim ct o Thm.forall_intr cv - -fun abs_instantiate (_, tab, _, beta_norm) = - fold replace (Termtab.dest tab) #> - beta_norm ? Conv.fconv_rule (Thm.beta_conversion true) - -fun lambda_abstract cvs t = - let - val frees = map Free (Term.add_frees t []) - val cvs' = filter (fn cv => member (op aconv) frees (Thm.term_of cv)) cvs - val vs = map (Term.dest_Free o Thm.term_of) cvs' - in (fold_rev absfree vs t, cvs') end - -fun fresh_abstraction (_, cvs) ct (cx as (ctxt, tab, idx, beta_norm)) = - let val (t, cvs') = lambda_abstract cvs (Thm.term_of ct) - in - (case Termtab.lookup tab t of - SOME (cv, _) => (Drule.list_comb (cv, cvs'), cx) - | NONE => - let - val (n, ctxt') = yield_singleton Variable.variant_fixes "x" ctxt - val cv = Thm.cterm_of ctxt' - (Free (n, map Thm.typ_of_cterm cvs' ---> Thm.typ_of_cterm ct)) - val cu = Drule.list_comb (cv, cvs') - val e = (t, (cv, fold_rev Thm.lambda cvs' ct)) - val beta_norm' = beta_norm orelse not (null cvs') - in (cu, (ctxt', Termtab.update e tab, idx + 1, beta_norm')) end) - end - -fun abs_comb f g dcvs ct = - let val (cf, cu) = Thm.dest_comb ct - in f dcvs cf ##>> g dcvs cu #>> uncurry Thm.apply end - -fun abs_arg f = abs_comb (K pair) f - -fun abs_args f dcvs ct = - (case Thm.term_of ct of - _ $ _ => abs_comb (abs_args f) f dcvs ct - | _ => pair ct) - -fun abs_list f g dcvs ct = - (case Thm.term_of ct of - Const (@{const_name Nil}, _) => pair ct - | Const (@{const_name Cons}, _) $ _ $ _ => - abs_comb (abs_arg f) (abs_list f g) dcvs ct - | _ => g dcvs ct) - -fun abs_abs f (depth, cvs) ct = - let val (cv, cu) = Thm.dest_abs NONE ct - in f (depth, cv :: cvs) cu #>> Thm.lambda cv end - -val is_atomic = - (fn Free _ => true | Var _ => true | Bound _ => true | _ => false) - -fun abstract depth (ext_logic, with_theories) = - let - fun abstr1 cvs ct = abs_arg abstr cvs ct - and abstr2 cvs ct = abs_comb abstr1 abstr cvs ct - and abstr3 cvs ct = abs_comb abstr2 abstr cvs ct - and abstr_abs cvs ct = abs_arg (abs_abs abstr) cvs ct - - and abstr (dcvs as (d, cvs)) ct = - (case Thm.term_of ct of - @{const Trueprop} $ _ => abstr1 dcvs ct - | @{const Pure.imp} $ _ $ _ => abstr2 dcvs ct - | @{const True} => pair ct - | @{const False} => pair ct - | @{const Not} $ _ => abstr1 dcvs ct - | @{const HOL.conj} $ _ $ _ => abstr2 dcvs ct - | @{const HOL.disj} $ _ $ _ => abstr2 dcvs ct - | @{const HOL.implies} $ _ $ _ => abstr2 dcvs ct - | Const (@{const_name HOL.eq}, _) $ _ $ _ => abstr2 dcvs ct - | Const (@{const_name distinct}, _) $ _ => - if ext_logic then abs_arg (abs_list abstr fresh_abstraction) dcvs ct - else fresh_abstraction dcvs ct - | Const (@{const_name If}, _) $ _ $ _ $ _ => - if ext_logic then abstr3 dcvs ct else fresh_abstraction dcvs ct - | Const (@{const_name All}, _) $ _ => - if ext_logic then abstr_abs dcvs ct else fresh_abstraction dcvs ct - | Const (@{const_name Ex}, _) $ _ => - if ext_logic then abstr_abs dcvs ct else fresh_abstraction dcvs ct - | t => (fn cx => - if is_atomic t orelse can HOLogic.dest_number t then (ct, cx) - else if with_theories andalso - Old_Z3_Interface.is_builtin_theory_term (context_of cx) t - then abs_args abstr dcvs ct cx - else if d = 0 then fresh_abstraction dcvs ct cx - else - (case Term.strip_comb t of - (Const _, _) => abs_args abstr (d-1, cvs) ct cx - | (Free _, _) => abs_args abstr (d-1, cvs) ct cx - | _ => fresh_abstraction dcvs ct cx))) - in abstr (depth, []) end - -val cimp = Thm.cterm_of @{context} @{const Pure.imp} - -fun deepen depth f x = - if depth = 0 then f depth x - else (case try (f depth) x of SOME y => y | NONE => deepen (depth - 1) f x) - -fun with_prems depth thms f ct = - fold_rev (Thm.mk_binop cimp o Thm.cprop_of) thms ct - |> deepen depth f - |> fold (fn prem => fn th => Thm.implies_elim th prem) thms - -in - -fun by_abstraction depth mode ctxt thms prove = - with_prems depth thms (fn d => fn ct => - let val (cu, cx) = abstract d mode ct (abs_context ctxt) - in abs_instantiate cx (prove (context_of cx) cu) end) - -end - - - -(* a faster COMP *) - -type compose_data = cterm list * (cterm -> cterm list) * thm - -fun list2 (x, y) = [x, y] - -fun precompose f rule : compose_data = (f (Thm.cprem_of rule 1), f, rule) -fun precompose2 f rule : compose_data = precompose (list2 o f) rule - -fun compose (cvs, f, rule) thm = - discharge thm (Thm.instantiate ([], map (dest_Var o Thm.term_of) cvs ~~ f (Thm.cprop_of thm)) rule) - - - -(* unfolding of 'distinct' *) - -local - val set1 = @{lemma "x ~: set [] == ~False" by simp} - val set2 = @{lemma "x ~: set [x] == False" by simp} - val set3 = @{lemma "x ~: set [y] == x ~= y" by simp} - val set4 = @{lemma "x ~: set (x # ys) == False" by simp} - val set5 = @{lemma "x ~: set (y # ys) == x ~= y & x ~: set ys" by simp} - - fun set_conv ct = - (Conv.rewrs_conv [set1, set2, set3, set4] else_conv - (Conv.rewr_conv set5 then_conv Conv.arg_conv set_conv)) ct - - val dist1 = @{lemma "distinct [] == ~False" by (simp add: distinct_def)} - val dist2 = @{lemma "distinct [x] == ~False" by (simp add: distinct_def)} - val dist3 = @{lemma "distinct (x # xs) == x ~: set xs & distinct xs" - by (simp add: distinct_def)} - - fun binop_conv cv1 cv2 = Conv.combination_conv (Conv.arg_conv cv1) cv2 -in -fun unfold_distinct_conv ct = - (Conv.rewrs_conv [dist1, dist2] else_conv - (Conv.rewr_conv dist3 then_conv binop_conv set_conv unfold_distinct_conv)) ct -end - - - -(* simpset *) - -local - val antisym_le1 = mk_meta_eq @{thm order_class.antisym_conv} - val antisym_le2 = mk_meta_eq @{thm linorder_class.antisym_conv2} - val antisym_less1 = mk_meta_eq @{thm linorder_class.antisym_conv1} - val antisym_less2 = mk_meta_eq @{thm linorder_class.antisym_conv3} - - fun eq_prop t thm = HOLogic.mk_Trueprop t aconv Thm.prop_of thm - fun dest_binop ((c as Const _) $ t $ u) = (c, t, u) - | dest_binop t = raise TERM ("dest_binop", [t]) - - fun prove_antisym_le ctxt ct = - let - val (le, r, s) = dest_binop (Thm.term_of ct) - val less = Const (@{const_name less}, Term.fastype_of le) - val prems = Simplifier.prems_of ctxt - in - (case find_first (eq_prop (le $ s $ r)) prems of - NONE => - find_first (eq_prop (HOLogic.mk_not (less $ r $ s))) prems - |> Option.map (fn thm => thm RS antisym_less1) - | SOME thm => SOME (thm RS antisym_le1)) - end - handle THM _ => NONE - - fun prove_antisym_less ctxt ct = - let - val (less, r, s) = dest_binop (HOLogic.dest_not (Thm.term_of ct)) - val le = Const (@{const_name less_eq}, Term.fastype_of less) - val prems = Simplifier.prems_of ctxt - in - (case find_first (eq_prop (le $ r $ s)) prems of - NONE => - find_first (eq_prop (HOLogic.mk_not (less $ s $ r))) prems - |> Option.map (fn thm => thm RS antisym_less2) - | SOME thm => SOME (thm RS antisym_le2)) - end - handle THM _ => NONE - - val basic_simpset = - simpset_of (put_simpset HOL_ss @{context} - addsimps @{thms field_simps} - addsimps [@{thm times_divide_eq_right}, @{thm times_divide_eq_left}] - addsimps @{thms arith_special} addsimps @{thms arith_simps} - addsimps @{thms rel_simps} - addsimps @{thms array_rules} - addsimps @{thms term_true_def} addsimps @{thms term_false_def} - addsimps @{thms z3div_def} addsimps @{thms z3mod_def} - addsimprocs [@{simproc numeral_divmod}] - addsimprocs [ - Simplifier.make_simproc @{context} "fast_int_arith" - {lhss = [@{term "(m::int) < n"}, @{term "(m::int) \ n"}, @{term "(m::int) = n"}], - proc = K Lin_Arith.simproc}, - Simplifier.make_simproc @{context} "antisym_le" - {lhss = [@{term "(x::'a::order) \ y"}], - proc = K prove_antisym_le}, - Simplifier.make_simproc @{context} "antisym_less" - {lhss = [@{term "\ (x::'a::linorder) < y"}], - proc = K prove_antisym_less}]) - - structure Simpset = Generic_Data - ( - type T = simpset - val empty = basic_simpset - val extend = I - val merge = Simplifier.merge_ss - ) -in - -fun add_simproc simproc context = - Simpset.map (simpset_map (Context.proof_of context) - (fn ctxt => ctxt addsimprocs [simproc])) context - -fun make_simpset ctxt rules = - simpset_of (put_simpset (Simpset.get (Context.Proof ctxt)) ctxt addsimps rules) - -end - -end diff --git a/src/test-gen/SMT/conj_disj_perm.ML b/src/test-gen/SMT/conj_disj_perm.ML deleted file mode 100644 index 30b85d6..0000000 --- a/src/test-gen/SMT/conj_disj_perm.ML +++ /dev/null @@ -1,127 +0,0 @@ -(* Title: HOL/Tools/SMT/conj_disj_perm.ML - Author: Sascha Boehme, TU Muenchen - -Tactic to prove equivalence of permutations of conjunctions and disjunctions. -*) - -signature CONJ_DISJ_PERM = -sig - val conj_disj_perm_tac: Proof.context -> int -> tactic -end - -structure Conj_Disj_Perm: CONJ_DISJ_PERM = -struct - -fun with_assumption ct f = - let val ct' = Thm.apply @{cterm HOL.Trueprop} ct - in Thm.implies_intr ct' (f (Thm.assume ct')) end - -fun eq_from_impls thm1 thm2 = thm2 INCR_COMP (thm1 INCR_COMP @{thm iffI}) - -fun add_lit thm = Termtab.update (HOLogic.dest_Trueprop (Thm.prop_of thm), thm) - -val ndisj1_rule = @{lemma "~(P | Q) ==> ~P" by auto} -val ndisj2_rule = @{lemma "~(P | Q) ==> ~Q" by auto} - -fun explode_thm thm = - (case HOLogic.dest_Trueprop (Thm.prop_of thm) of - @{const HOL.conj} $ _ $ _ => explode_conj_thm @{thm conjunct1} @{thm conjunct2} thm - | @{const HOL.Not} $ (@{const HOL.disj} $ _ $ _) => explode_conj_thm ndisj1_rule ndisj2_rule thm - | @{const HOL.Not} $ (@{const HOL.Not} $ _) => explode_thm (thm RS @{thm notnotD}) - | _ => add_lit thm) - -and explode_conj_thm rule1 rule2 thm lits = - explode_thm (thm RS rule1) (explode_thm (thm RS rule2) (add_lit thm lits)) - -val not_false_rule = @{lemma "~False" by auto} -fun explode thm = explode_thm thm (add_lit not_false_rule (add_lit @{thm TrueI} Termtab.empty)) - -fun find_dual_lit lits (@{const HOL.Not} $ t, thm) = Termtab.lookup lits t |> Option.map (pair thm) - | find_dual_lit _ _ = NONE - -fun find_dual_lits lits = Termtab.get_first (find_dual_lit lits) lits - -val not_not_rule = @{lemma "P ==> ~~P" by auto} -val ndisj_rule = @{lemma "~P ==> ~Q ==> ~(P | Q)" by auto} - -fun join lits t = - (case Termtab.lookup lits t of - SOME thm => thm - | NONE => join_term lits t) - -and join_term lits (@{const HOL.conj} $ t $ u) = @{thm conjI} OF (map (join lits) [t, u]) - | join_term lits (@{const HOL.Not} $ (@{const HOL.disj} $ t $ u)) = - ndisj_rule OF (map (join lits o HOLogic.mk_not) [t, u]) - | join_term lits (@{const HOL.Not} $ (@{const HOL.Not} $ t)) = join lits t RS not_not_rule - | join_term _ t = raise TERM ("join_term", [t]) - -fun prove_conj_disj_imp ct cu = with_assumption ct (fn thm => join (explode thm) (Thm.term_of cu)) - -fun prove_conj_disj_eq (clhs, crhs) = - let - val thm1 = prove_conj_disj_imp clhs crhs - val thm2 = prove_conj_disj_imp crhs clhs - in eq_from_impls thm1 thm2 end - -val not_not_false_rule = @{lemma "~~False ==> P" by auto} -val not_true_rule = @{lemma "~True ==> P" by auto} - -fun prove_any_imp ct = - (case Thm.term_of ct of - @{const HOL.False} => @{thm FalseE} - | @{const HOL.Not} $ (@{const HOL.Not} $ @{const HOL.False}) => not_not_false_rule - | @{const HOL.Not} $ @{const HOL.True} => not_true_rule - | _ => raise CTERM ("prove_any_imp", [ct])) - -fun prove_contradiction_imp ct = - with_assumption ct (fn thm => - let val lits = explode thm - in - (case Termtab.lookup lits @{const HOL.False} of - SOME thm' => thm' RS @{thm FalseE} - | NONE => - (case Termtab.lookup lits (@{const HOL.Not} $ @{const HOL.True}) of - SOME thm' => thm' RS not_true_rule - | NONE => - (case find_dual_lits lits of - SOME (not_lit_thm, lit_thm) => @{thm notE} OF [not_lit_thm, lit_thm] - | NONE => raise CTERM ("prove_contradiction", [ct])))) - end) - -fun prove_contradiction_eq to_right (clhs, crhs) = - let - val thm1 = if to_right then prove_contradiction_imp clhs else prove_any_imp clhs - val thm2 = if to_right then prove_any_imp crhs else prove_contradiction_imp crhs - in eq_from_impls thm1 thm2 end - -val contrapos_rule = @{lemma "(~P) = (~Q) ==> P = Q" by auto} -fun contrapos prove cp = contrapos_rule OF [prove (apply2 (Thm.apply @{cterm HOL.Not}) cp)] - -datatype kind = True | False | Conj | Disj | Other - -fun choose t _ _ _ @{const HOL.True} = t - | choose _ f _ _ @{const HOL.False} = f - | choose _ _ c _ (@{const HOL.conj} $ _ $ _) = c - | choose _ _ _ d (@{const HOL.disj} $ _ $ _) = d - | choose _ _ _ _ _ = Other - -fun kind_of (@{const HOL.Not} $ t) = choose False True Disj Conj t - | kind_of t = choose True False Conj Disj t - -fun prove_conj_disj_perm ct cp = - (case apply2 (kind_of o Thm.term_of) cp of - (Conj, Conj) => prove_conj_disj_eq cp - | (Disj, Disj) => contrapos prove_conj_disj_eq cp - | (Conj, False) => prove_contradiction_eq true cp - | (False, Conj) => prove_contradiction_eq false cp - | (Disj, True) => contrapos (prove_contradiction_eq true) cp - | (True, Disj) => contrapos (prove_contradiction_eq false) cp - | _ => raise CTERM ("prove_conj_disj_perm", [ct])) - -fun conj_disj_perm_tac ctxt = CSUBGOAL (fn (ct, i) => - (case Thm.term_of ct of - @{const HOL.Trueprop} $ (@{const HOL.eq(bool)} $ _ $ _) => - resolve_tac ctxt [prove_conj_disj_perm ct (Thm.dest_binop (Thm.dest_arg ct))] i - | _ => no_tac)) - -end diff --git a/src/test-gen/SMT/cvc4_interface.ML b/src/test-gen/SMT/cvc4_interface.ML deleted file mode 100644 index 68cad31..0000000 --- a/src/test-gen/SMT/cvc4_interface.ML +++ /dev/null @@ -1,31 +0,0 @@ -(* Title: HOL/Tools/SMT/cvc4_interface.ML - Author: Jasmin Blanchette, TU Muenchen - -Interface to CVC4 based on an extended version of SMT-LIB. -*) - -signature CVC4_INTERFACE = -sig - val smtlib_cvc4C: SMT_Util.class -end; - -structure CVC4_Interface: CVC4_INTERFACE = -struct - -val smtlib_cvc4C = SMTLIB_Interface.smtlibC @ ["cvc4"] - - -(* interface *) - -local - fun translate_config ctxt = - {logic = K "(set-logic ALL_SUPPORTED)\n", fp_kinds = [BNF_Util.Least_FP, BNF_Util.Greatest_FP], - serialize = #serialize (SMTLIB_Interface.translate_config ctxt)} -in - -val _ = - Theory.setup (Context.theory_map (SMT_Translate.add_config (smtlib_cvc4C, translate_config))) - -end - -end; diff --git a/src/test-gen/SMT/cvc4_proof_parse.ML b/src/test-gen/SMT/cvc4_proof_parse.ML deleted file mode 100644 index 2807164..0000000 --- a/src/test-gen/SMT/cvc4_proof_parse.ML +++ /dev/null @@ -1,46 +0,0 @@ -(* Title: HOL/Tools/SMT/cvc4_proof_parse.ML - Author: Jasmin Blanchette, TU Muenchen - -CVC4 proof (actually, unsat core) parsing. -*) - -signature CVC4_PROOF_PARSE = -sig - val parse_proof: SMT_Translate.replay_data -> - ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> - SMT_Solver.parsed_proof -end; - -structure CVC4_Proof_Parse: CVC4_PROOF_PARSE = -struct - -fun parse_proof ({ll_defs, assms, ...} : SMT_Translate.replay_data) xfacts prems _ output = - if exists (String.isPrefix "(error \"This build of CVC4 doesn't have proof support") output then - {outcome = NONE, fact_ids = NONE, atp_proof = K []} - else - let - val num_ll_defs = length ll_defs - - val id_of_index = Integer.add num_ll_defs - val index_of_id = Integer.add (~ num_ll_defs) - - val used_assert_ids = map_filter (try SMTLIB_Interface.assert_index_of_name) output - val used_assm_js = - map_filter (fn id => let val i = index_of_id id in if i >= 0 then SOME i else NONE end) - used_assert_ids - - val conjecture_i = 0 - val prems_i = conjecture_i + 1 - val num_prems = length prems - val facts_i = prems_i + num_prems - - val fact_ids' = - map_filter (fn j => - let val (i, _) = nth assms j in - try (apsnd (nth xfacts)) (id_of_index j, i - facts_i) - end) used_assm_js - in - {outcome = NONE, fact_ids = SOME fact_ids', atp_proof = K []} - end - -end; diff --git a/src/test-gen/SMT/smt_builtin.ML b/src/test-gen/SMT/smt_builtin.ML deleted file mode 100644 index a5955c7..0000000 --- a/src/test-gen/SMT/smt_builtin.ML +++ /dev/null @@ -1,222 +0,0 @@ -(* Title: HOL/Tools/SMT/smt_builtin.ML - Author: Sascha Boehme, TU Muenchen - -Tables of types and terms directly supported by SMT solvers. -*) - -signature SMT_BUILTIN = -sig - (*for experiments*) - val filter_builtins: (typ -> bool) -> Proof.context -> Proof.context - - (*built-in types*) - val add_builtin_typ: SMT_Util.class -> - typ * (typ -> string option) * (typ -> int -> string option) -> Context.generic -> - Context.generic - val add_builtin_typ_ext: typ * (typ -> bool) -> Context.generic -> - Context.generic - val dest_builtin_typ: Proof.context -> typ -> string option - val is_builtin_typ_ext: Proof.context -> typ -> bool - - (*built-in numbers*) - val dest_builtin_num: Proof.context -> term -> (string * typ) option - val is_builtin_num: Proof.context -> term -> bool - val is_builtin_num_ext: Proof.context -> term -> bool - - (*built-in functions*) - type 'a bfun = Proof.context -> typ -> term list -> 'a - type bfunr = string * int * term list * (term list -> term) - val add_builtin_fun: SMT_Util.class -> (string * typ) * bfunr option bfun -> Context.generic -> - Context.generic - val add_builtin_fun': SMT_Util.class -> term * string -> Context.generic -> Context.generic - val add_builtin_fun_ext: (string * typ) * term list bfun -> Context.generic -> Context.generic - val add_builtin_fun_ext': string * typ -> Context.generic -> Context.generic - val add_builtin_fun_ext'': string -> Context.generic -> Context.generic - val dest_builtin_fun: Proof.context -> string * typ -> term list -> bfunr option - val dest_builtin_eq: Proof.context -> term -> term -> bfunr option - val dest_builtin_pred: Proof.context -> string * typ -> term list -> bfunr option - val dest_builtin_conn: Proof.context -> string * typ -> term list -> bfunr option - val dest_builtin: Proof.context -> string * typ -> term list -> bfunr option - val dest_builtin_ext: Proof.context -> string * typ -> term list -> term list option - val is_builtin_fun: Proof.context -> string * typ -> term list -> bool - val is_builtin_fun_ext: Proof.context -> string * typ -> term list -> bool -end; - -structure SMT_Builtin: SMT_BUILTIN = -struct - - -(* built-in tables *) - -datatype ('a, 'b) kind = Ext of 'a | Int of 'b - -type ('a, 'b) ttab = ((typ * ('a, 'b) kind) Ord_List.T) SMT_Util.dict - -fun typ_ord ((T, _), (U, _)) = - let - fun tord (TVar _, Type _) = GREATER - | tord (Type _, TVar _) = LESS - | tord (Type (n, Ts), Type (m, Us)) = - if n = m then list_ord tord (Ts, Us) - else Term_Ord.typ_ord (T, U) - | tord TU = Term_Ord.typ_ord TU - in tord (T, U) end - -fun insert_ttab cs T f = - SMT_Util.dict_map_default (cs, []) - (Ord_List.insert typ_ord (perhaps (try Logic.varifyT_global) T, f)) - -fun merge_ttab ttabp = SMT_Util.dict_merge (Ord_List.merge typ_ord) ttabp - -fun lookup_ttab ctxt ttab T = - let fun match (U, _) = Sign.typ_instance (Proof_Context.theory_of ctxt) (T, U) - in - get_first (find_first match) (SMT_Util.dict_lookup ttab (SMT_Config.solver_class_of ctxt)) - end - -type ('a, 'b) btab = ('a, 'b) ttab Symtab.table - -fun insert_btab cs n T f = - Symtab.map_default (n, []) (insert_ttab cs T f) - -fun merge_btab btabp = Symtab.join (K merge_ttab) btabp - -fun lookup_btab ctxt btab (n, T) = - (case Symtab.lookup btab n of - NONE => NONE - | SOME ttab => lookup_ttab ctxt ttab T) - -type 'a bfun = Proof.context -> typ -> term list -> 'a - -type bfunr = string * int * term list * (term list -> term) - -structure Builtins = Generic_Data -( - type T = - (typ -> bool, (typ -> string option) * (typ -> int -> string option)) ttab * - (term list bfun, bfunr option bfun) btab - val empty = ([], Symtab.empty) - val extend = I - fun merge ((t1, b1), (t2, b2)) = (merge_ttab (t1, t2), merge_btab (b1, b2)) -) - -fun filter_ttab keep_T = map (apsnd (filter (keep_T o fst))) - -fun filter_builtins keep_T = - Context.proof_map (Builtins.map (fn (ttab, btab) => - (filter_ttab keep_T ttab, Symtab.map (K (filter_ttab keep_T)) btab))) - - -(* built-in types *) - -fun add_builtin_typ cs (T, f, g) = - Builtins.map (apfst (insert_ttab cs T (Int (f, g)))) - -fun add_builtin_typ_ext (T, f) = Builtins.map (apfst (insert_ttab SMT_Util.basicC T (Ext f))) - -fun lookup_builtin_typ ctxt = - lookup_ttab ctxt (fst (Builtins.get (Context.Proof ctxt))) - -fun dest_builtin_typ ctxt T = - (case lookup_builtin_typ ctxt T of - SOME (_, Int (f, _)) => f T - | _ => NONE) - -fun is_builtin_typ_ext ctxt T = - (case lookup_builtin_typ ctxt T of - SOME (_, Int (f, _)) => is_some (f T) - | SOME (_, Ext f) => f T - | NONE => false) - - -(* built-in numbers *) - -fun dest_builtin_num ctxt t = - (case try HOLogic.dest_number t of - NONE => NONE - | SOME (T, i) => - if i < 0 then NONE else - (case lookup_builtin_typ ctxt T of - SOME (_, Int (_, g)) => g T i |> Option.map (rpair T) - | _ => NONE)) - -val is_builtin_num = is_some oo dest_builtin_num - -fun is_builtin_num_ext ctxt t = - (case try HOLogic.dest_number t of - NONE => false - | SOME (T, _) => is_builtin_typ_ext ctxt T) - - -(* built-in functions *) - -fun add_builtin_fun cs ((n, T), f) = - Builtins.map (apsnd (insert_btab cs n T (Int f))) - -fun add_builtin_fun' cs (t, n) = - let - val c as (m, T) = Term.dest_Const t - fun app U ts = Term.list_comb (Const (m, U), ts) - fun bfun _ U ts = SOME (n, length (Term.binder_types T), ts, app U) - in add_builtin_fun cs (c, bfun) end - -fun add_builtin_fun_ext ((n, T), f) = - Builtins.map (apsnd (insert_btab SMT_Util.basicC n T (Ext f))) - -fun add_builtin_fun_ext' c = add_builtin_fun_ext (c, fn _ => fn _ => I) - -fun add_builtin_fun_ext'' n context = - let val thy = Context.theory_of context - in add_builtin_fun_ext' (n, Sign.the_const_type thy n) context end - -fun lookup_builtin_fun ctxt = - lookup_btab ctxt (snd (Builtins.get (Context.Proof ctxt))) - -fun dest_builtin_fun ctxt (c as (_, T)) ts = - (case lookup_builtin_fun ctxt c of - SOME (_, Int f) => f ctxt T ts - | _ => NONE) - -fun dest_builtin_eq ctxt t u = - let - val aT = TFree (Name.aT, @{sort type}) - val c = (@{const_name HOL.eq}, aT --> aT --> @{typ bool}) - fun mk ts = Term.list_comb (HOLogic.eq_const (Term.fastype_of (hd ts)), ts) - in - dest_builtin_fun ctxt c [] - |> Option.map (fn (n, i, _, _) => (n, i, [t, u], mk)) - end - -fun special_builtin_fun pred ctxt (c as (_, T)) ts = - if pred (Term.body_type T, Term.binder_types T) then - dest_builtin_fun ctxt c ts - else NONE - -fun dest_builtin_pred ctxt = special_builtin_fun (equal @{typ bool} o fst) ctxt - -fun dest_builtin_conn ctxt = - special_builtin_fun (forall (equal @{typ bool}) o (op ::)) ctxt - -fun dest_builtin ctxt c ts = - let val t = Term.list_comb (Const c, ts) - in - (case dest_builtin_num ctxt t of - SOME (n, _) => SOME (n, 0, [], K t) - | NONE => dest_builtin_fun ctxt c ts) - end - -fun dest_builtin_fun_ext ctxt (c as (_, T)) ts = - (case lookup_builtin_fun ctxt c of - SOME (_, Int f) => f ctxt T ts |> Option.map (fn (_, _, us, _) => us) - | SOME (_, Ext f) => SOME (f ctxt T ts) - | NONE => NONE) - -fun dest_builtin_ext ctxt c ts = - if is_builtin_num_ext ctxt (Term.list_comb (Const c, ts)) then SOME [] - else dest_builtin_fun_ext ctxt c ts - -fun is_builtin_fun ctxt c ts = is_some (dest_builtin_fun ctxt c ts) - -fun is_builtin_fun_ext ctxt c ts = is_some (dest_builtin_fun_ext ctxt c ts) - -end; diff --git a/src/test-gen/SMT/smt_config.ML b/src/test-gen/SMT/smt_config.ML deleted file mode 100644 index 8b8d029..0000000 --- a/src/test-gen/SMT/smt_config.ML +++ /dev/null @@ -1,265 +0,0 @@ -(* Title: HOL/Tools/SMT/smt_config.ML - Author: Sascha Boehme, TU Muenchen - -Configuration options and diagnostic tools for SMT. -*) - -signature SMT_CONFIG = -sig - (*solver*) - type solver_info = { - name: string, - class: Proof.context -> SMT_Util.class, - avail: unit -> bool, - options: Proof.context -> string list } - val add_solver: solver_info -> Context.generic -> Context.generic - val set_solver_options: string * string -> Context.generic -> Context.generic - val is_available: Proof.context -> string -> bool - val available_solvers_of: Proof.context -> string list - val select_solver: string -> Context.generic -> Context.generic - val solver_of: Proof.context -> string - val solver_class_of: Proof.context -> SMT_Util.class - val solver_options_of: Proof.context -> string list - - (*options*) - val oracle: bool Config.T - val timeout: real Config.T - val reconstruction_step_timeout: real Config.T - val random_seed: int Config.T - val read_only_certificates: bool Config.T - val verbose: bool Config.T - val trace: bool Config.T - val statistics: bool Config.T - val monomorph_limit: int Config.T - val monomorph_instances: int Config.T - val infer_triggers: bool Config.T - val debug_files: string Config.T - val sat_solver: string Config.T - - (*tools*) - val with_time_limit: Proof.context -> real Config.T -> ('a -> 'b) -> 'a -> 'b - val with_timeout: Proof.context -> ('a -> 'b) -> 'a -> 'b - - (*diagnostics*) - val trace_msg: Proof.context -> ('a -> string) -> 'a -> unit - val verbose_msg: Proof.context -> ('a -> string) -> 'a -> unit - val statistics_msg: Proof.context -> ('a -> string) -> 'a -> unit - - (*certificates*) - val select_certificates: string -> Context.generic -> Context.generic - val certificates_of: Proof.context -> Cache_IO.cache option - - (*setup*) - val print_setup: Proof.context -> unit -end; - -structure SMT_Config: SMT_CONFIG = -struct - -(* solver *) - -type solver_info = { - name: string, - class: Proof.context -> SMT_Util.class, - avail: unit -> bool, - options: Proof.context -> string list} - -type data = { - solvers: (solver_info * string list) Symtab.table, - solver: string option, - certs: Cache_IO.cache option} - -fun mk_data solvers solver certs: data = {solvers=solvers, solver=solver, certs=certs} - -val empty_data = mk_data Symtab.empty NONE NONE - -fun solvers_of ({solvers, ...}: data) = solvers -fun solver_of ({solver, ...}: data) = solver -fun certs_of ({certs, ...}: data) = certs - -fun map_solvers f ({solvers, solver, certs}: data) = mk_data (f solvers) solver certs -fun map_solver f ({solvers, solver, certs}: data) = mk_data solvers (f solver) certs -fun put_certs c ({solvers, solver, ...}: data) = mk_data solvers solver c - -fun merge_data ({solvers=ss1,solver=s1,certs=c1}: data, {solvers=ss2,solver=s2,certs=c2}: data) = - mk_data (Symtab.merge (K true) (ss1, ss2)) (merge_options (s1, s2)) (merge_options (c1, c2)) - -structure Data = Generic_Data -( - type T = data - val empty = empty_data - val extend = I - val merge = merge_data -) - -fun set_solver_options (name, options) = - let val opts = String.tokens (Symbol.is_ascii_blank o str) options - in Data.map (map_solvers (Symtab.map_entry name (apsnd (K opts)))) end - -fun add_solver (info as {name, ...} : solver_info) context = - if Symtab.defined (solvers_of (Data.get context)) name then - error ("Solver already registered: " ^ quote name) - else - context - |> Data.map (map_solvers (Symtab.update (name, (info, [])))) - |> Context.map_theory (Attrib.setup (Binding.name (name ^ "_options")) - (Scan.lift (@{keyword "="} |-- Args.name) >> - (Thm.declaration_attribute o K o set_solver_options o pair name)) - ("additional command line options for SMT solver " ^ quote name)) - -fun all_solvers_of ctxt = Symtab.keys (solvers_of (Data.get (Context.Proof ctxt))) - -fun solver_name_of ctxt = solver_of (Data.get (Context.Proof ctxt)) - -fun is_available ctxt name = - (case Symtab.lookup (solvers_of (Data.get (Context.Proof ctxt))) name of - SOME ({avail, ...}, _) => avail () - | NONE => false) - -fun available_solvers_of ctxt = - filter (is_available ctxt) (all_solvers_of ctxt) - -fun warn_solver (Context.Proof ctxt) name = - if Context_Position.is_visible ctxt then - warning ("The SMT solver " ^ quote name ^ " is not installed") - else () - | warn_solver _ _ = () - -fun select_solver name context = - let - val ctxt = Context.proof_of context - val upd = Data.map (map_solver (K (SOME name))) - in - if not (member (op =) (all_solvers_of ctxt) name) then - error ("Trying to select unknown solver: " ^ quote name) - else if not (is_available ctxt name) then - (warn_solver context name; upd context) - else upd context - end - -fun no_solver_err () = error "No SMT solver selected" - -fun solver_of ctxt = - (case solver_name_of ctxt of - SOME name => name - | NONE => no_solver_err ()) - -fun solver_info_of default select ctxt = - (case solver_name_of ctxt of - NONE => default () - | SOME name => select (Symtab.lookup (solvers_of (Data.get (Context.Proof ctxt))) name)) - -fun solver_class_of ctxt = - let fun class_of ({class, ...}: solver_info, _) = class ctxt - in solver_info_of no_solver_err (class_of o the) ctxt end - -fun solver_options_of ctxt = - let - fun all_options NONE = [] - | all_options (SOME ({options, ...} : solver_info, opts)) = - opts @ options ctxt - in solver_info_of (K []) all_options ctxt end - -val setup_solver = - Attrib.setup @{binding smt_solver} - (Scan.lift (@{keyword "="} |-- Args.name) >> - (Thm.declaration_attribute o K o select_solver)) - "SMT solver configuration" - - -(* options *) - -val oracle = Attrib.setup_config_bool @{binding smt_oracle} (K true) -val timeout = Attrib.setup_config_real @{binding smt_timeout} (K 30.0) -val reconstruction_step_timeout = Attrib.setup_config_real @{binding smt_reconstruction_step_timeout} (K 10.0) -val random_seed = Attrib.setup_config_int @{binding smt_random_seed} (K 1) -val read_only_certificates = Attrib.setup_config_bool @{binding smt_read_only_certificates} (K false) -val verbose = Attrib.setup_config_bool @{binding smt_verbose} (K true) -val trace = Attrib.setup_config_bool @{binding smt_trace} (K false) -val statistics = Attrib.setup_config_bool @{binding smt_statistics} (K false) -val monomorph_limit = Attrib.setup_config_int @{binding smt_monomorph_limit} (K 10) -val monomorph_instances = Attrib.setup_config_int @{binding smt_monomorph_instances} (K 500) -val infer_triggers = Attrib.setup_config_bool @{binding smt_infer_triggers} (K false) -val debug_files = Attrib.setup_config_string @{binding smt_debug_files} (K "") -val sat_solver = Attrib.setup_config_string @{binding smt_sat_solver} (K "cdclite") - - -(* diagnostics *) - -fun cond_trace flag f x = if flag then tracing ("SMT: " ^ f x) else () - -fun verbose_msg ctxt = cond_trace (Config.get ctxt verbose) -fun trace_msg ctxt = cond_trace (Config.get ctxt trace) -fun statistics_msg ctxt = cond_trace (Config.get ctxt statistics) - - -(* tools *) - -fun with_time_limit ctxt timeout_config f x = - Timeout.apply (seconds (Config.get ctxt timeout_config)) f x - handle Timeout.TIMEOUT _ => raise SMT_Failure.SMT SMT_Failure.Time_Out - -fun with_timeout ctxt = with_time_limit ctxt timeout - - -(* certificates *) - -val certificates_of = certs_of o Data.get o Context.Proof - -val get_certificates_path = Option.map (Cache_IO.cache_path_of) o certificates_of - -fun select_certificates name context = context |> Data.map (put_certs ( - if name = "" then NONE - else - Path.explode name - |> Path.append (Resources.master_directory (Context.theory_of context)) - |> SOME o Cache_IO.unsynchronized_init)) - -val setup_certificates = - Attrib.setup @{binding smt_certificates} - (Scan.lift (@{keyword "="} |-- Args.name) >> - (Thm.declaration_attribute o K o select_certificates)) - "SMT certificates configuration" - - -(* setup *) - -val _ = Theory.setup ( - setup_solver #> - setup_certificates) - -fun print_setup ctxt = - let - fun string_of_bool b = if b then "true" else "false" - - val names = available_solvers_of ctxt - val ns = if null names then ["(none)"] else sort_strings names - val n = the_default "(none)" (solver_name_of ctxt) - val opts = solver_options_of ctxt - - val t = string_of_real (Config.get ctxt timeout) - - val certs_filename = - (case get_certificates_path ctxt of - SOME path => Path.print path - | NONE => "(disabled)") - in - Pretty.writeln (Pretty.big_list "SMT setup:" [ - Pretty.str ("Current SMT solver: " ^ n), - Pretty.str ("Current SMT solver options: " ^ space_implode " " opts), - Pretty.str_list "Available SMT solvers: " "" ns, - Pretty.str ("Current timeout: " ^ t ^ " seconds"), - Pretty.str ("With proofs: " ^ - string_of_bool (not (Config.get ctxt oracle))), - Pretty.str ("Certificates cache: " ^ certs_filename), - Pretty.str ("Fixed certificates: " ^ - string_of_bool (Config.get ctxt read_only_certificates))]) - end - -val _ = - Outer_Syntax.command @{command_keyword smt_status} - "show the available SMT solvers, the currently selected SMT solver, \ - \and the values of SMT configuration options" - (Scan.succeed (Toplevel.keep (print_setup o Toplevel.context_of))) - -end; diff --git a/src/test-gen/SMT/smt_datatypes.ML b/src/test-gen/SMT/smt_datatypes.ML deleted file mode 100644 index 2467cab..0000000 --- a/src/test-gen/SMT/smt_datatypes.ML +++ /dev/null @@ -1,152 +0,0 @@ -(* Title: HOL/Tools/SMT/smt_datatypes.ML - Author: Sascha Boehme, TU Muenchen - -Collector functions for common type declarations and their representation -as (co)algebraic datatypes. -*) - -signature SMT_DATATYPES = -sig - val add_decls: BNF_Util.fp_kind list -> typ -> - (BNF_Util.fp_kind * (typ * (term * term list) list)) list list * Proof.context -> - (BNF_Util.fp_kind * (typ * (term * term list) list)) list list * Proof.context -end; - -structure SMT_Datatypes: SMT_DATATYPES = -struct - -fun mk_selectors T Ts sels = - if null sels then - Variable.variant_fixes (replicate (length Ts) "select") - #>> map2 (fn U => fn n => Free (n, T --> U)) Ts - else - pair sels - - -(* free constructor type declarations *) - -fun get_ctr_sugar_decl ({ctrs = ctrs0, selss = selss0, ...} : Ctr_Sugar.ctr_sugar) T Ts ctxt = - let - val selss = map (map (Ctr_Sugar.mk_disc_or_sel Ts)) selss0 - val ctrs = map (Ctr_Sugar.mk_ctr Ts) ctrs0 - - fun mk_constr ctr sels = - mk_selectors T (binder_types (fastype_of ctr)) sels #>> pair ctr - - val selss' = - (if has_duplicates (op aconv) (flat selss) orelse - exists (exists (can (dest_funT o range_type o fastype_of))) selss then - [] - else - selss) - |> Ctr_Sugar_Util.pad_list [] (length ctrs) - in - @{fold_map 2} mk_constr ctrs selss' ctxt - |>> (pair T #> single) - end - - -(* typedef declarations *) - -fun get_typedef_decl (({Abs_name, Rep_name, abs_type, rep_type, ...}, {Abs_inverse, ...}) - : Typedef.info) T Ts = - if can (curry (op RS) @{thm UNIV_I}) Abs_inverse then - let - val env = snd (Term.dest_Type abs_type) ~~ Ts - val instT = Term.map_atyps (perhaps (AList.lookup (op =) env)) - - val constr = Const (Abs_name, instT (rep_type --> abs_type)) - val select = Const (Rep_name, instT (abs_type --> rep_type)) - in [(T, [(constr, [select])])] end - else - [] - - -(* collection of declarations *) - -val extN = "_ext" (* cf. "HOL/Tools/typedef.ML" *) - -fun get_decls fps T n Ts ctxt = - let - fun maybe_typedef () = - (case Typedef.get_info ctxt n of - [] => ([], ctxt) - | info :: _ => (map (pair (hd fps)) (get_typedef_decl info T Ts), ctxt)) - in - (case BNF_FP_Def_Sugar.fp_sugar_of ctxt n of - SOME {fp, fp_res = {Ts = fp_Ts, ...}, fp_ctr_sugar = {ctr_sugar, ...}, ...} => - if member (op =) fps fp then - let - val ns = map (fst o dest_Type) fp_Ts - val mutual_fp_sugars = map_filter (BNF_FP_Def_Sugar.fp_sugar_of ctxt) ns - val Xs = map #X mutual_fp_sugars - val ctrXs_Tsss = map (#ctrXs_Tss o #fp_ctr_sugar) mutual_fp_sugars - - (* Datatypes nested through datatypes and codatatypes nested through codatatypes are - allowed. So are mutually (co)recursive (co)datatypes. *) - fun is_same_fp s = - (case BNF_FP_Def_Sugar.fp_sugar_of ctxt s of - SOME {fp = fp', ...} => fp' = fp - | NONE => false) - fun is_homogenously_nested_co_recursive (Type (s, Ts)) = - forall (if is_same_fp s then is_homogenously_nested_co_recursive - else not o BNF_FP_Rec_Sugar_Util.exists_subtype_in Xs) Ts - | is_homogenously_nested_co_recursive _ = true - - val Type (_, As) :: _ = fp_Ts - val substAs = Term.typ_subst_atomic (As ~~ Ts); - in - (* TODO/FIXME: The "bool" check is there to work around a CVC4 bug - (http://church.cims.nyu.edu/bugzilla3/show_bug.cgi?id=597). It should be removed once - the bug is fixed. *) - if forall (forall (forall (is_homogenously_nested_co_recursive))) ctrXs_Tsss andalso - forall (forall (forall (curry (op <>) @{typ bool}))) - (map (map (map substAs)) ctrXs_Tsss) then - get_ctr_sugar_decl ctr_sugar T Ts ctxt |>> map (pair fp) - else - maybe_typedef () - end - else - ([], ctxt) - | NONE => - if String.isSuffix extN n then - (* for records (FIXME: hack) *) - (case Ctr_Sugar.ctr_sugar_of ctxt n of - SOME ctr_sugar => - get_ctr_sugar_decl ctr_sugar T Ts ctxt |>> map (pair (hd fps)) - | NONE => maybe_typedef ()) - else - maybe_typedef ()) - end - -fun add_decls fps T (declss, ctxt) = - let - fun declared T = exists (exists (equal T o fst o snd)) - fun declared' T = exists (exists (equal T o fst o snd) o snd) - fun depends ds = exists (member (op =) (map (fst o snd) ds)) - - fun add (TFree _) = I - | add (TVar _) = I - | add (T as Type (@{type_name fun}, _)) = - fold add (Term.body_type T :: Term.binder_types T) - | add @{typ bool} = I - | add (T as Type (n, Ts)) = (fn (dss, ctxt1) => - if declared T declss orelse declared' T dss then - (dss, ctxt1) - else if SMT_Builtin.is_builtin_typ_ext ctxt1 T then - (dss, ctxt1) - else - (case get_decls fps T n Ts ctxt1 of - ([], _) => (dss, ctxt1) - | (ds, ctxt2) => - let - val constrTs = maps (map (snd o Term.dest_Const o fst) o snd o snd) ds - val Us = fold (union (op =) o Term.binder_types) constrTs [] - - fun ins [] = [(Us, ds)] - | ins ((Uds as (Us', _)) :: Udss) = - if depends ds Us' then (Us, ds) :: Uds :: Udss else Uds :: ins Udss - in fold add Us (ins dss, ctxt2) end)) - in add T ([], ctxt) |>> append declss o map snd end - -end; diff --git a/src/test-gen/SMT/smt_failure.ML b/src/test-gen/SMT/smt_failure.ML deleted file mode 100644 index ba892ae..0000000 --- a/src/test-gen/SMT/smt_failure.ML +++ /dev/null @@ -1,40 +0,0 @@ -(* Title: HOL/Tools/SMT/smt_failure.ML - Author: Sascha Boehme, TU Muenchen - -Failures and exception of SMT. -*) - -signature SMT_FAILURE = -sig - datatype failure = - Counterexample of bool | - Time_Out | - Out_Of_Memory | - Abnormal_Termination of int | - Other_Failure of string - val string_of_failure: failure -> string - exception SMT of failure -end; - -structure SMT_Failure: SMT_FAILURE = -struct - -datatype failure = - Counterexample of bool | - Time_Out | - Out_Of_Memory | - Abnormal_Termination of int | - Other_Failure of string - -fun string_of_failure (Counterexample genuine) = - if genuine then "Counterexample found (possibly spurious)" - else "Potential counterexample found" - | string_of_failure Time_Out = "Timed out" - | string_of_failure Out_Of_Memory = "Ran out of memory" - | string_of_failure (Abnormal_Termination err) = - "Solver terminated abnormally with error code " ^ string_of_int err - | string_of_failure (Other_Failure msg) = msg - -exception SMT of failure - -end; diff --git a/src/test-gen/SMT/smt_normalize.ML b/src/test-gen/SMT/smt_normalize.ML deleted file mode 100644 index 98e820b..0000000 --- a/src/test-gen/SMT/smt_normalize.ML +++ /dev/null @@ -1,444 +0,0 @@ -(* Title: HOL/Tools/SMT/smt_normalize.ML - Author: Sascha Boehme, TU Muenchen - -Normalization steps on theorems required by SMT solvers. -*) - -signature SMT_NORMALIZE = -sig - val drop_fact_warning: Proof.context -> thm -> unit - val atomize_conv: Proof.context -> conv - - val special_quant_table: (string * thm) list - val case_bool_entry: string * thm - val abs_min_max_table: (string * thm) list - - type extra_norm = Proof.context -> thm list * thm list -> thm list * thm list - val add_extra_norm: SMT_Util.class * extra_norm -> Context.generic -> Context.generic - val normalize: Proof.context -> thm list -> (int * thm) list -end; - -structure SMT_Normalize: SMT_NORMALIZE = -struct - -fun drop_fact_warning ctxt = - SMT_Config.verbose_msg ctxt (prefix "Warning: dropping assumption: " o - Thm.string_of_thm ctxt) - - -(* general theorem normalizations *) - -(** instantiate elimination rules **) - -local - val (cpfalse, cfalse) = `SMT_Util.mk_cprop (Thm.cterm_of @{context} @{const False}) - - fun inst f ct thm = - let val cv = f (Drule.strip_imp_concl (Thm.cprop_of thm)) - in Thm.instantiate ([], [(dest_Var (Thm.term_of cv), ct)]) thm end -in - -fun instantiate_elim thm = - (case Thm.concl_of thm of - @{const Trueprop} $ Var (_, @{typ bool}) => inst Thm.dest_arg cfalse thm - | Var _ => inst I cpfalse thm - | _ => thm) - -end - - -(** normalize definitions **) - -fun norm_def thm = - (case Thm.prop_of thm of - @{const Trueprop} $ (Const (@{const_name HOL.eq}, _) $ _ $ Abs _) => - norm_def (thm RS @{thm fun_cong}) - | Const (@{const_name Pure.eq}, _) $ _ $ Abs _ => norm_def (thm RS @{thm meta_eq_to_obj_eq}) - | _ => thm) - - -(** atomization **) - -fun atomize_conv ctxt ct = - (case Thm.term_of ct of - @{const Pure.imp} $ _ $ _ => - Conv.binop_conv (atomize_conv ctxt) then_conv Conv.rewr_conv @{thm atomize_imp} - | Const (@{const_name Pure.eq}, _) $ _ $ _ => - Conv.binop_conv (atomize_conv ctxt) then_conv Conv.rewr_conv @{thm atomize_eq} - | Const (@{const_name Pure.all}, _) $ Abs _ => - Conv.binder_conv (atomize_conv o snd) ctxt then_conv Conv.rewr_conv @{thm atomize_all} - | _ => Conv.all_conv) ct - handle CTERM _ => Conv.all_conv ct - -val setup_atomize = - fold SMT_Builtin.add_builtin_fun_ext'' [@{const_name Pure.imp}, @{const_name Pure.eq}, - @{const_name Pure.all}, @{const_name Trueprop}] - - -(** unfold special quantifiers **) - -val special_quant_table = [ - (@{const_name Ex1}, @{thm Ex1_def_raw}), - (@{const_name Ball}, @{thm Ball_def_raw}), - (@{const_name Bex}, @{thm Bex_def_raw})] - -local - fun special_quant (Const (n, _)) = AList.lookup (op =) special_quant_table n - | special_quant _ = NONE - - fun special_quant_conv _ ct = - (case special_quant (Thm.term_of ct) of - SOME thm => Conv.rewr_conv thm - | NONE => Conv.all_conv) ct -in - -fun unfold_special_quants_conv ctxt = - SMT_Util.if_exists_conv (is_some o special_quant) (Conv.top_conv special_quant_conv ctxt) - -val setup_unfolded_quants = fold (SMT_Builtin.add_builtin_fun_ext'' o fst) special_quant_table - -end - - -(** trigger inference **) - -local - (*** check trigger syntax ***) - - fun dest_trigger (Const (@{const_name pat}, _) $ _) = SOME true - | dest_trigger (Const (@{const_name nopat}, _) $ _) = SOME false - | dest_trigger _ = NONE - - fun eq_list [] = false - | eq_list (b :: bs) = forall (equal b) bs - - fun proper_trigger t = - t - |> these o try SMT_Util.dest_symb_list - |> map (map_filter dest_trigger o these o try SMT_Util.dest_symb_list) - |> (fn [] => false | bss => forall eq_list bss) - - fun proper_quant inside f t = - (case t of - Const (@{const_name All}, _) $ Abs (_, _, u) => proper_quant true f u - | Const (@{const_name Ex}, _) $ Abs (_, _, u) => proper_quant true f u - | @{const trigger} $ p $ u => - (if inside then f p else false) andalso proper_quant false f u - | Abs (_, _, u) => proper_quant false f u - | u1 $ u2 => proper_quant false f u1 andalso proper_quant false f u2 - | _ => true) - - fun check_trigger_error ctxt t = - error ("SMT triggers must only occur under quantifier and multipatterns " ^ - "must have the same kind: " ^ Syntax.string_of_term ctxt t) - - fun check_trigger_conv ctxt ct = - if proper_quant false proper_trigger (SMT_Util.term_of ct) then Conv.all_conv ct - else check_trigger_error ctxt (Thm.term_of ct) - - - (*** infer simple triggers ***) - - fun dest_cond_eq ct = - (case Thm.term_of ct of - Const (@{const_name HOL.eq}, _) $ _ $ _ => Thm.dest_binop ct - | @{const HOL.implies} $ _ $ _ => dest_cond_eq (Thm.dest_arg ct) - | _ => raise CTERM ("no equation", [ct])) - - fun get_constrs thy (Type (n, _)) = these (BNF_LFP_Compat.get_constrs thy n) - | get_constrs _ _ = [] - - fun is_constr thy (n, T) = - let fun match (m, U) = m = n andalso Sign.typ_instance thy (T, U) - in can (the o find_first match o get_constrs thy o Term.body_type) T end - - fun is_constr_pat thy t = - (case Term.strip_comb t of - (Free _, []) => true - | (Const c, ts) => is_constr thy c andalso forall (is_constr_pat thy) ts - | _ => false) - - fun is_simp_lhs ctxt t = - (case Term.strip_comb t of - (Const c, ts as _ :: _) => - not (SMT_Builtin.is_builtin_fun_ext ctxt c ts) andalso - forall (is_constr_pat (Proof_Context.theory_of ctxt)) ts - | _ => false) - - fun has_all_vars vs t = - subset (op aconv) (vs, map Free (Term.add_frees t [])) - - fun minimal_pats vs ct = - if has_all_vars vs (Thm.term_of ct) then - (case Thm.term_of ct of - _ $ _ => - (case apply2 (minimal_pats vs) (Thm.dest_comb ct) of - ([], []) => [[ct]] - | (ctss, ctss') => union (eq_set (op aconvc)) ctss ctss') - | _ => []) - else [] - - fun proper_mpat _ _ _ [] = false - | proper_mpat thy gen u cts = - let - val tps = (op ~~) (`gen (map Thm.term_of cts)) - fun some_match u = tps |> exists (fn (t', t) => - Pattern.matches thy (t', u) andalso not (t aconv u)) - in not (Term.exists_subterm some_match u) end - - val pat = SMT_Util.mk_const_pat @{theory} @{const_name pat} SMT_Util.destT1 - fun mk_pat ct = Thm.apply (SMT_Util.instT' ct pat) ct - - fun mk_clist T = - apply2 (Thm.cterm_of @{context}) (SMT_Util.symb_cons_const T, SMT_Util.symb_nil_const T) - fun mk_list (ccons, cnil) f cts = fold_rev (Thm.mk_binop ccons o f) cts cnil - val mk_pat_list = mk_list (mk_clist @{typ pattern}) - val mk_mpat_list = mk_list (mk_clist @{typ "pattern symb_list"}) - fun mk_trigger ctss = mk_mpat_list (mk_pat_list mk_pat) ctss - - val trigger_eq = mk_meta_eq @{lemma "p = trigger t p" by (simp add: trigger_def)} - - fun insert_trigger_conv [] ct = Conv.all_conv ct - | insert_trigger_conv ctss ct = - let - val (ctr, cp) = Thm.dest_binop (Thm.rhs_of trigger_eq) ||> rpair ct - val inst = map (apfst (dest_Var o Thm.term_of)) [cp, (ctr, mk_trigger ctss)] - in Thm.instantiate ([], inst) trigger_eq end - - fun infer_trigger_eq_conv outer_ctxt (ctxt, cvs) ct = - let - val (lhs, rhs) = dest_cond_eq ct - - val vs = map Thm.term_of cvs - val thy = Proof_Context.theory_of ctxt - - fun get_mpats ct = - if is_simp_lhs ctxt (Thm.term_of ct) then minimal_pats vs ct - else [] - val gen = Variable.export_terms ctxt outer_ctxt - val filter_mpats = filter (proper_mpat thy gen (Thm.term_of rhs)) - - in insert_trigger_conv (filter_mpats (get_mpats lhs)) ct end - - fun has_trigger (@{const trigger} $ _ $ _) = true - | has_trigger _ = false - - fun try_trigger_conv cv ct = - if SMT_Util.under_quant has_trigger (SMT_Util.term_of ct) then Conv.all_conv ct - else Conv.try_conv cv ct - - fun infer_trigger_conv ctxt = - if Config.get ctxt SMT_Config.infer_triggers then - try_trigger_conv (SMT_Util.under_quant_conv (infer_trigger_eq_conv ctxt) ctxt) - else Conv.all_conv -in - -fun trigger_conv ctxt = - SMT_Util.prop_conv (check_trigger_conv ctxt then_conv infer_trigger_conv ctxt) - -val setup_trigger = - fold SMT_Builtin.add_builtin_fun_ext'' - [@{const_name pat}, @{const_name nopat}, @{const_name trigger}] - -end - - -(** combined general normalizations **) - -fun gen_normalize1_conv ctxt = - atomize_conv ctxt then_conv - unfold_special_quants_conv ctxt then_conv - Thm.beta_conversion true then_conv - trigger_conv ctxt - -fun gen_normalize1 ctxt = - instantiate_elim #> - norm_def #> - Conv.fconv_rule (Thm.beta_conversion true then_conv Thm.eta_conversion) #> - Drule.forall_intr_vars #> - Conv.fconv_rule (gen_normalize1_conv ctxt) #> - (* Z3 4.3.1 silently normalizes "P --> Q --> R" to "P & Q --> R" *) - Raw_Simplifier.rewrite_rule ctxt @{thms HOL.imp_conjL[symmetric, THEN eq_reflection]} - -fun gen_norm1_safe ctxt (i, thm) = - (case try (gen_normalize1 ctxt) thm of - SOME thm' => SOME (i, thm') - | NONE => (drop_fact_warning ctxt thm; NONE)) - -fun gen_normalize ctxt iwthms = map_filter (gen_norm1_safe ctxt) iwthms - - -(* unfolding of definitions and theory-specific rewritings *) - -fun expand_head_conv cv ct = - (case Thm.term_of ct of - _ $ _ => - Conv.fun_conv (expand_head_conv cv) then_conv - Conv.try_conv (Thm.beta_conversion false) - | _ => cv) ct - - -(** rewrite bool case expressions as if expressions **) - -val case_bool_entry = (@{const_name "bool.case_bool"}, @{thm case_bool_if}) - -local - fun is_case_bool (Const (@{const_name "bool.case_bool"}, _)) = true - | is_case_bool _ = false - - fun unfold_conv _ = - SMT_Util.if_true_conv (is_case_bool o Term.head_of) - (expand_head_conv (Conv.rewr_conv @{thm case_bool_if})) -in - -fun rewrite_case_bool_conv ctxt = - SMT_Util.if_exists_conv is_case_bool (Conv.top_conv unfold_conv ctxt) - -val setup_case_bool = SMT_Builtin.add_builtin_fun_ext'' @{const_name "bool.case_bool"} - -end - - -(** unfold abs, min and max **) - -val abs_min_max_table = [ - (@{const_name min}, @{thm min_def_raw}), - (@{const_name max}, @{thm max_def_raw}), - (@{const_name abs}, @{thm abs_if_raw})] - -local - fun abs_min_max ctxt (Const (n, Type (@{type_name fun}, [T, _]))) = - (case AList.lookup (op =) abs_min_max_table n of - NONE => NONE - | SOME thm => if SMT_Builtin.is_builtin_typ_ext ctxt T then SOME thm else NONE) - | abs_min_max _ _ = NONE - - fun unfold_amm_conv ctxt ct = - (case abs_min_max ctxt (Term.head_of (Thm.term_of ct)) of - SOME thm => expand_head_conv (Conv.rewr_conv thm) - | NONE => Conv.all_conv) ct -in - -fun unfold_abs_min_max_conv ctxt = - SMT_Util.if_exists_conv (is_some o abs_min_max ctxt) (Conv.top_conv unfold_amm_conv ctxt) - -val setup_abs_min_max = fold (SMT_Builtin.add_builtin_fun_ext'' o fst) abs_min_max_table - -end - - -(** normalize numerals **) - -local - (* - rewrite Numeral1 into 1 - rewrite - 0 into 0 - *) - - fun is_irregular_number (Const (@{const_name numeral}, _) $ Const (@{const_name num.One}, _)) = - true - | is_irregular_number (Const (@{const_name uminus}, _) $ Const (@{const_name Groups.zero}, _)) = - true - | is_irregular_number _ = false - - fun is_strange_number ctxt t = is_irregular_number t andalso SMT_Builtin.is_builtin_num ctxt t - - val proper_num_ss = - simpset_of (put_simpset HOL_ss @{context} addsimps @{thms Num.numeral_One minus_zero}) - - fun norm_num_conv ctxt = - SMT_Util.if_conv (is_strange_number ctxt) (Simplifier.rewrite (put_simpset proper_num_ss ctxt)) - Conv.no_conv -in - -fun normalize_numerals_conv ctxt = - SMT_Util.if_exists_conv (is_strange_number ctxt) (Conv.top_sweep_conv norm_num_conv ctxt) - -end - - -(** combined unfoldings and rewritings **) - -fun unfold_conv ctxt = - rewrite_case_bool_conv ctxt then_conv - unfold_abs_min_max_conv ctxt then_conv - Thm.beta_conversion true - -fun unfold_polymorph ctxt = map (apsnd (Conv.fconv_rule (unfold_conv ctxt))) -fun unfold_monomorph ctxt = map (apsnd (Conv.fconv_rule (normalize_numerals_conv ctxt))) - - -(* overall normalization *) - -fun burrow_ids f ithms = - let - val (is, thms) = split_list ithms - val (thms', extra_thms) = f thms - in (is ~~ thms') @ map (pair ~1) extra_thms end - -type extra_norm = Proof.context -> thm list * thm list -> thm list * thm list - -structure Extra_Norms = Generic_Data -( - type T = extra_norm SMT_Util.dict - val empty = [] - val extend = I - fun merge data = SMT_Util.dict_merge fst data -) - -fun add_extra_norm (cs, norm) = Extra_Norms.map (SMT_Util.dict_update (cs, norm)) - -fun apply_extra_norms ctxt ithms = - let - val cs = SMT_Config.solver_class_of ctxt - val es = SMT_Util.dict_lookup (Extra_Norms.get (Context.Proof ctxt)) cs - in burrow_ids (fold (fn e => e ctxt) es o rpair []) ithms end - -local - val ignored = member (op =) [@{const_name All}, @{const_name Ex}, - @{const_name Let}, @{const_name If}, @{const_name HOL.eq}] - - val schematic_consts_of = - let - fun collect (@{const trigger} $ p $ t) = collect_trigger p #> collect t - | collect (t $ u) = collect t #> collect u - | collect (Abs (_, _, t)) = collect t - | collect (t as Const (n, _)) = - if not (ignored n) then Monomorph.add_schematic_consts_of t else I - | collect _ = I - and collect_trigger t = - let val dest = these o try SMT_Util.dest_symb_list - in fold (fold collect_pat o dest) (dest t) end - and collect_pat (Const (@{const_name pat}, _) $ t) = collect t - | collect_pat (Const (@{const_name nopat}, _) $ t) = collect t - | collect_pat _ = I - in (fn t => collect t Symtab.empty) end -in - -fun monomorph ctxt xthms = - let val (xs, thms) = split_list xthms - in - map (pair 1) thms - |> Monomorph.monomorph schematic_consts_of ctxt - |> maps (uncurry (map o pair)) o map2 pair xs o map (map snd) - end - -end - -fun normalize ctxt wthms = - wthms - |> map_index I - |> gen_normalize ctxt - |> unfold_polymorph ctxt - |> monomorph ctxt - |> unfold_monomorph ctxt - |> apply_extra_norms ctxt - -val _ = Theory.setup (Context.theory_map ( - setup_atomize #> - setup_unfolded_quants #> - setup_trigger #> - setup_case_bool #> - setup_abs_min_max)) - -end; diff --git a/src/test-gen/SMT/smt_real.ML b/src/test-gen/SMT/smt_real.ML deleted file mode 100644 index 8e08d4c..0000000 --- a/src/test-gen/SMT/smt_real.ML +++ /dev/null @@ -1,115 +0,0 @@ -(* Title: HOL/Tools/SMT/smt_real.ML - Author: Sascha Boehme, TU Muenchen - -SMT setup for reals. -*) - -structure SMT_Real: sig end = -struct - - -(* SMT-LIB logic *) - -fun smtlib_logic ts = - if exists (Term.exists_type (Term.exists_subtype (equal @{typ real}))) ts - then SOME "AUFLIRA" - else NONE - - -(* SMT-LIB and Z3 built-ins *) - -local - fun real_num _ i = SOME (string_of_int i ^ ".0") - - fun is_linear [t] = SMT_Util.is_number t - | is_linear [t, u] = SMT_Util.is_number t orelse SMT_Util.is_number u - | is_linear _ = false - - fun mk_times ts = Term.list_comb (@{const times (real)}, ts) - - fun times _ _ ts = if is_linear ts then SOME ("*", 2, ts, mk_times) else NONE -in - -val setup_builtins = - SMT_Builtin.add_builtin_typ SMTLIB_Interface.smtlibC - (@{typ real}, K (SOME "Real"), real_num) #> - fold (SMT_Builtin.add_builtin_fun' SMTLIB_Interface.smtlibC) [ - (@{const less (real)}, "<"), - (@{const less_eq (real)}, "<="), - (@{const uminus (real)}, "-"), - (@{const plus (real)}, "+"), - (@{const minus (real)}, "-") ] #> - SMT_Builtin.add_builtin_fun SMTLIB_Interface.smtlibC - (Term.dest_Const @{const times (real)}, times) #> - SMT_Builtin.add_builtin_fun' Z3_Interface.smtlib_z3C - (@{const times (real)}, "*") #> - SMT_Builtin.add_builtin_fun' Z3_Interface.smtlib_z3C - (@{const divide (real)}, "/") - -end - - -(* Z3 constructors *) - -local - fun z3_mk_builtin_typ (Z3_Interface.Sym ("Real", _)) = SOME @{typ real} - | z3_mk_builtin_typ (Z3_Interface.Sym ("real", _)) = SOME @{typ real} - (*FIXME: delete*) - | z3_mk_builtin_typ _ = NONE - - fun z3_mk_builtin_num _ i T = - if T = @{typ real} then SOME (Numeral.mk_cnumber @{ctyp real} i) - else NONE - - fun mk_nary _ cu [] = cu - | mk_nary ct _ cts = uncurry (fold_rev (Thm.mk_binop ct)) (split_last cts) - - val mk_uminus = Thm.apply (Thm.cterm_of @{context} @{const uminus (real)}) - val add = Thm.cterm_of @{context} @{const plus (real)} - val real0 = Numeral.mk_cnumber @{ctyp real} 0 - val mk_sub = Thm.mk_binop (Thm.cterm_of @{context} @{const minus (real)}) - val mk_mul = Thm.mk_binop (Thm.cterm_of @{context} @{const times (real)}) - val mk_div = Thm.mk_binop (Thm.cterm_of @{context} @{const divide (real)}) - val mk_lt = Thm.mk_binop (Thm.cterm_of @{context} @{const less (real)}) - val mk_le = Thm.mk_binop (Thm.cterm_of @{context} @{const less_eq (real)}) - - fun z3_mk_builtin_fun (Z3_Interface.Sym ("-", _)) [ct] = SOME (mk_uminus ct) - | z3_mk_builtin_fun (Z3_Interface.Sym ("+", _)) cts = SOME (mk_nary add real0 cts) - | z3_mk_builtin_fun (Z3_Interface.Sym ("-", _)) [ct, cu] = SOME (mk_sub ct cu) - | z3_mk_builtin_fun (Z3_Interface.Sym ("*", _)) [ct, cu] = SOME (mk_mul ct cu) - | z3_mk_builtin_fun (Z3_Interface.Sym ("/", _)) [ct, cu] = SOME (mk_div ct cu) - | z3_mk_builtin_fun (Z3_Interface.Sym ("<", _)) [ct, cu] = SOME (mk_lt ct cu) - | z3_mk_builtin_fun (Z3_Interface.Sym ("<=", _)) [ct, cu] = SOME (mk_le ct cu) - | z3_mk_builtin_fun (Z3_Interface.Sym (">", _)) [ct, cu] = SOME (mk_lt cu ct) - | z3_mk_builtin_fun (Z3_Interface.Sym (">=", _)) [ct, cu] = SOME (mk_le cu ct) - | z3_mk_builtin_fun _ _ = NONE -in - -val z3_mk_builtins = { - mk_builtin_typ = z3_mk_builtin_typ, - mk_builtin_num = z3_mk_builtin_num, - mk_builtin_fun = (fn _ => fn sym => fn cts => - (case try (Thm.typ_of_cterm o hd) cts of - SOME @{typ real} => z3_mk_builtin_fun sym cts - | _ => NONE)) } - -end - - -(* Z3 proof replay *) - -val real_linarith_proc = - Simplifier.make_simproc @{context} "fast_real_arith" - {lhss = [@{term "(m::real) < n"}, @{term "(m::real) \ n"}, @{term "(m::real) = n"}], - proc = K Lin_Arith.simproc} - - -(* setup *) - -val _ = Theory.setup (Context.theory_map ( - SMTLIB_Interface.add_logic (10, smtlib_logic) #> - setup_builtins #> - Z3_Interface.add_mk_builtins z3_mk_builtins #> - Z3_Replay_Util.add_simproc real_linarith_proc)) - -end; diff --git a/src/test-gen/SMT/smt_solver.ML b/src/test-gen/SMT/smt_solver.ML deleted file mode 100644 index 7ff85dd..0000000 --- a/src/test-gen/SMT/smt_solver.ML +++ /dev/null @@ -1,307 +0,0 @@ -(* Title: HOL/Tools/SMT/smt_solver.ML - Author: Sascha Boehme, TU Muenchen - -SMT solvers registry and SMT tactic. -*) - -signature SMT_SOLVER = -sig - (*configuration*) - datatype outcome = Unsat | Sat | Unknown - - type parsed_proof = - {outcome: SMT_Failure.failure option, - fact_ids: (int * ((string * ATP_Problem_Generate.stature) * thm)) list option, - atp_proof: unit -> (term, string) ATP_Proof.atp_step list} - - type solver_config = - {name: string, - class: Proof.context -> SMT_Util.class, - avail: unit -> bool, - command: unit -> string list, - options: Proof.context -> string list, - smt_options: (string * string) list, - default_max_relevant: int, - outcome: string -> string list -> outcome * string list, - parse_proof: (Proof.context -> SMT_Translate.replay_data -> - ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> - parsed_proof) option, - replay: (Proof.context -> SMT_Translate.replay_data -> string list -> thm) option} - - (*registry*) - val add_solver: solver_config -> theory -> theory - val default_max_relevant: Proof.context -> string -> int - - (*filter*) - val smt_filter: Proof.context -> thm -> ((string * ATP_Problem_Generate.stature) * thm) list -> - int -> Time.time -> parsed_proof - - (*tactic*) - val smt_tac: Proof.context -> thm list -> int -> tactic - val smt_tac': Proof.context -> thm list -> int -> tactic -end; - -structure SMT_Solver: SMT_SOLVER = -struct - -(* interface to external solvers *) - -local - -fun make_command command options problem_path proof_path = - "(exec 2>&1;" :: map Bash.string (command () @ options) @ - [File.bash_path problem_path, ")", ">", File.bash_path proof_path] - |> space_implode " " - -fun with_trace ctxt msg f x = - let val _ = SMT_Config.trace_msg ctxt (fn () => msg) () - in f x end - -fun run ctxt name mk_cmd input = - (case SMT_Config.certificates_of ctxt of - NONE => - if not (SMT_Config.is_available ctxt name) then - error ("The SMT solver " ^ quote name ^ " is not installed") - else if Config.get ctxt SMT_Config.debug_files = "" then - with_trace ctxt ("Invoking SMT solver " ^ quote name ^ " ...") (Cache_IO.run mk_cmd) input - else - let - val base_path = Path.explode (Config.get ctxt SMT_Config.debug_files) - val in_path = Path.ext "smt_in" base_path - val out_path = Path.ext "smt_out" base_path - in Cache_IO.raw_run mk_cmd input in_path out_path end - | SOME certs => - (case Cache_IO.lookup certs input of - (NONE, key) => - if Config.get ctxt SMT_Config.read_only_certificates then - error ("Bad certificate cache: missing certificate") - else - Cache_IO.run_and_cache certs key mk_cmd input - | (SOME output, _) => - with_trace ctxt ("Using cached certificate from " ^ - Path.print (Cache_IO.cache_path_of certs) ^ " ...") I output)) - -(* Z3 returns 1 if "get-proof" or "get-model" fails. veriT returns 255. *) -val normal_return_codes = [0, 1, 255] - -fun run_solver ctxt name mk_cmd input = - let - fun pretty tag lines = Pretty.string_of (Pretty.big_list tag (map Pretty.str lines)) - - val _ = SMT_Config.trace_msg ctxt (pretty "Problem:" o split_lines) input - - val {redirected_output = res, output = err, return_code} = - SMT_Config.with_timeout ctxt (run ctxt name mk_cmd) input - val _ = SMT_Config.trace_msg ctxt (pretty "Solver:") err - - val output = fst (take_suffix (equal "") res) - val _ = SMT_Config.trace_msg ctxt (pretty "Result:") output - - val _ = member (op =) normal_return_codes return_code orelse - raise SMT_Failure.SMT (SMT_Failure.Abnormal_Termination return_code) - in output end - -fun trace_assms ctxt = - SMT_Config.trace_msg ctxt (Pretty.string_of o - Pretty.big_list "Assertions:" o map (Thm.pretty_thm ctxt o snd)) - -fun trace_replay_data ({context = ctxt, typs, terms, ...} : SMT_Translate.replay_data) = - let - fun pretty_eq n p = Pretty.block [Pretty.str n, Pretty.str " = ", p] - fun p_typ (n, T) = pretty_eq n (Syntax.pretty_typ ctxt T) - fun p_term (n, t) = pretty_eq n (Syntax.pretty_term ctxt t) - in - SMT_Config.trace_msg ctxt (fn () => - Pretty.string_of (Pretty.big_list "Names:" [ - Pretty.big_list "sorts:" (map p_typ (Symtab.dest typs)), - Pretty.big_list "functions:" (map p_term (Symtab.dest terms))])) () - end - -in - -fun invoke name command smt_options ithms ctxt = - let - val options = SMT_Config.solver_options_of ctxt - val comments = [space_implode " " options] - - val (str, replay_data as {context = ctxt', ...}) = - ithms - |> tap (trace_assms ctxt) - |> SMT_Translate.translate ctxt smt_options comments - ||> tap trace_replay_data - in (run_solver ctxt' name (make_command command options) str, replay_data) end - -end - - -(* configuration *) - -datatype outcome = Unsat | Sat | Unknown - -type parsed_proof = - {outcome: SMT_Failure.failure option, - fact_ids: (int * ((string * ATP_Problem_Generate.stature) * thm)) list option, - atp_proof: unit -> (term, string) ATP_Proof.atp_step list} - -type solver_config = - {name: string, - class: Proof.context -> SMT_Util.class, - avail: unit -> bool, - command: unit -> string list, - options: Proof.context -> string list, - smt_options: (string * string) list, - default_max_relevant: int, - outcome: string -> string list -> outcome * string list, - parse_proof: (Proof.context -> SMT_Translate.replay_data -> - ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> - parsed_proof) option, - replay: (Proof.context -> SMT_Translate.replay_data -> string list -> thm) option} - - -(* check well-sortedness *) - -val has_topsort = Term.exists_type (Term.exists_subtype (fn - TFree (_, []) => true - | TVar (_, []) => true - | _ => false)) - -(* top sorts cause problems with atomization *) -fun check_topsort ctxt thm = - if has_topsort (Thm.prop_of thm) then (SMT_Normalize.drop_fact_warning ctxt thm; TrueI) else thm - - -(* registry *) - -type solver_info = { - command: unit -> string list, - smt_options: (string * string) list, - default_max_relevant: int, - parse_proof: Proof.context -> SMT_Translate.replay_data -> - ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> - parsed_proof, - replay: Proof.context -> SMT_Translate.replay_data -> string list -> thm} - -structure Solvers = Generic_Data -( - type T = solver_info Symtab.table - val empty = Symtab.empty - val extend = I - fun merge data = Symtab.merge (K true) data -) - -local - fun parse_proof outcome parse_proof0 outer_ctxt replay_data xfacts prems concl output = - (case outcome output of - (Unsat, lines) => - (case parse_proof0 of - SOME pp => pp outer_ctxt replay_data xfacts prems concl lines - | NONE => {outcome = NONE, fact_ids = NONE, atp_proof = K []}) - | (result, _) => raise SMT_Failure.SMT (SMT_Failure.Counterexample (result = Sat))) - - fun replay outcome replay0 oracle outer_ctxt - (replay_data as {context = ctxt, ...} : SMT_Translate.replay_data) output = - (case outcome output of - (Unsat, lines) => - if not (Config.get ctxt SMT_Config.oracle) andalso is_some replay0 - then the replay0 outer_ctxt replay_data lines - else oracle () - | (result, _) => raise SMT_Failure.SMT (SMT_Failure.Counterexample (result = Sat))) - - val cfalse = Thm.cterm_of @{context} @{prop False} -in - -fun add_solver ({name, class, avail, command, options, smt_options, default_max_relevant, outcome, - parse_proof = parse_proof0, replay = replay0} : solver_config) = - let - fun solver oracle = { - command = command, - smt_options = smt_options, - default_max_relevant = default_max_relevant, - parse_proof = parse_proof (outcome name) parse_proof0, - replay = replay (outcome name) replay0 oracle} - - val info = {name = name, class = class, avail = avail, options = options} - in - Thm.add_oracle (Binding.name name, K cfalse) #-> (fn (_, oracle) => - Context.theory_map (Solvers.map (Symtab.update_new (name, solver oracle)))) #> - Context.theory_map (SMT_Config.add_solver info) - end - -end - -fun get_info ctxt name = the (Symtab.lookup (Solvers.get (Context.Proof ctxt)) name) - -fun name_and_info_of ctxt = - let val name = SMT_Config.solver_of ctxt - in (name, get_info ctxt name) end - -val default_max_relevant = #default_max_relevant oo get_info - -fun apply_solver_and_replay ctxt thms0 = - let - val thms = map (check_topsort ctxt) thms0 - val (name, {command, smt_options, replay, ...}) = name_and_info_of ctxt - val (output, replay_data) = - invoke name command smt_options (SMT_Normalize.normalize ctxt thms) ctxt - in replay ctxt replay_data output end - - -(* filter *) - -fun smt_filter ctxt0 goal xfacts i time_limit = - let - val ctxt = ctxt0 |> Config.put SMT_Config.timeout (Time.toReal time_limit) - - val ({context = ctxt, prems, concl, ...}, _) = Subgoal.focus ctxt i NONE goal - fun negate ct = Thm.dest_comb ct ||> Thm.apply @{cterm Not} |-> Thm.apply - val cprop = - (case try negate (Thm.rhs_of (SMT_Normalize.atomize_conv ctxt concl)) of - SOME ct => ct - | NONE => raise SMT_Failure.SMT (SMT_Failure.Other_Failure "cannot atomize goal")) - - val conjecture = Thm.assume cprop - val facts = map snd xfacts - val thms = conjecture :: prems @ facts - val thms' = map (check_topsort ctxt) thms - - val (name, {command, smt_options, parse_proof, ...}) = name_and_info_of ctxt - val (output, replay_data) = - invoke name command smt_options (SMT_Normalize.normalize ctxt thms') ctxt - in - parse_proof ctxt replay_data xfacts (map Thm.prop_of prems) (Thm.term_of concl) output - end - handle SMT_Failure.SMT fail => {outcome = SOME fail, fact_ids = NONE, atp_proof = K []} - - -(* SMT tactic *) - -local - fun str_of ctxt fail = - "Solver " ^ SMT_Config.solver_of ctxt ^ ": " ^ SMT_Failure.string_of_failure fail - - fun safe_solve ctxt facts = SOME (apply_solver_and_replay ctxt facts) - handle - SMT_Failure.SMT (fail as SMT_Failure.Counterexample _) => - (SMT_Config.verbose_msg ctxt (str_of ctxt) fail; NONE) - | SMT_Failure.SMT (fail as SMT_Failure.Time_Out) => - error ("SMT: Solver " ^ quote (SMT_Config.solver_of ctxt) ^ ": " ^ - SMT_Failure.string_of_failure fail ^ " (setting the " ^ - "configuration option " ^ quote (Config.name_of SMT_Config.timeout) ^ " might help)") - | SMT_Failure.SMT fail => error (str_of ctxt fail) - - fun resolve ctxt (SOME thm) = resolve_tac ctxt [thm] 1 - | resolve _ NONE = no_tac - - fun tac prove ctxt rules = - CONVERSION (SMT_Normalize.atomize_conv ctxt) - THEN' resolve_tac ctxt @{thms ccontr} - THEN' SUBPROOF (fn {context = ctxt', prems, ...} => - resolve ctxt' (prove ctxt' (rules @ prems))) ctxt -in - -val smt_tac = tac safe_solve -val smt_tac' = tac (SOME oo apply_solver_and_replay) - -end - -end; diff --git a/src/test-gen/SMT/smt_systems.ML b/src/test-gen/SMT/smt_systems.ML deleted file mode 100644 index b7581cb..0000000 --- a/src/test-gen/SMT/smt_systems.ML +++ /dev/null @@ -1,154 +0,0 @@ -(* Title: HOL/Tools/SMT/smt_systems.ML - Author: Sascha Boehme, TU Muenchen - -Setup SMT solvers. -*) - -signature SMT_SYSTEMS = -sig - val cvc4_extensions: bool Config.T - val z3_extensions: bool Config.T -end; - -structure SMT_Systems: SMT_SYSTEMS = -struct - -(* helper functions *) - -fun make_avail name () = getenv (name ^ "_SOLVER") <> "" - -fun make_command name () = [getenv (name ^ "_SOLVER")] - -fun outcome_of unsat sat unknown solver_name line = - if String.isPrefix unsat line then SMT_Solver.Unsat - else if String.isPrefix sat line then SMT_Solver.Sat - else if String.isPrefix unknown line then SMT_Solver.Unknown - else raise SMT_Failure.SMT (SMT_Failure.Other_Failure ("Solver " ^ quote solver_name ^ - " failed -- enable tracing using the " ^ quote (Config.name_of SMT_Config.trace) ^ - " option for details")) - -fun is_blank_or_error_line "" = true - | is_blank_or_error_line s = String.isPrefix "(error " s - -fun on_first_line test_outcome solver_name lines = - let - val split_first = (fn [] => ("", []) | l :: ls => (l, ls)) - val (l, ls) = split_first (snd (take_prefix is_blank_or_error_line lines)) - in (test_outcome solver_name l, ls) end - -fun on_first_non_unsupported_line test_outcome solver_name lines = - on_first_line test_outcome solver_name (filter (curry (op <>) "unsupported") lines) - -(* CVC3 *) - -local - fun cvc3_options ctxt = [ - "-seed", string_of_int (Config.get ctxt SMT_Config.random_seed), - "-lang", "smt2", - "-timeout", string_of_int (Real.ceil (Config.get ctxt SMT_Config.timeout))] -in - -val cvc3: SMT_Solver.solver_config = { - name = "cvc3", - class = K SMTLIB_Interface.smtlibC, - avail = make_avail "CVC3", - command = make_command "CVC3", - options = cvc3_options, - smt_options = [], - default_max_relevant = 400 (* FUDGE *), - outcome = on_first_line (outcome_of "unsat" "sat" "unknown"), - parse_proof = NONE, - replay = NONE } - -end - -(* CVC4 *) - -val cvc4_extensions = Attrib.setup_config_bool @{binding cvc4_extensions} (K false) - -local - fun cvc4_options ctxt = [ - "--no-statistics", - "--random-seed=" ^ string_of_int (Config.get ctxt SMT_Config.random_seed), - "--lang=smt2", - "--continued-execution", - "--tlimit", string_of_int (Real.ceil (1000.0 * Config.get ctxt SMT_Config.timeout))] - - fun select_class ctxt = - if Config.get ctxt cvc4_extensions then CVC4_Interface.smtlib_cvc4C - else SMTLIB_Interface.smtlibC -in - -val cvc4: SMT_Solver.solver_config = { - name = "cvc4", - class = select_class, - avail = make_avail "CVC4", - command = make_command "CVC4", - options = cvc4_options, - smt_options = [(":produce-unsat-cores", "true")], - default_max_relevant = 400 (* FUDGE *), - outcome = on_first_line (outcome_of "unsat" "sat" "unknown"), - parse_proof = SOME (K CVC4_Proof_Parse.parse_proof), - replay = NONE } - -end - -(* veriT *) - -val veriT: SMT_Solver.solver_config = { - name = "verit", - class = K SMTLIB_Interface.smtlibC, - avail = make_avail "VERIT", - command = make_command "VERIT", - options = (fn ctxt => [ - "--proof-version=1", - "--proof-prune", - "--proof-merge", - "--disable-print-success", - "--disable-banner", - "--max-time=" ^ string_of_int (Real.ceil (Config.get ctxt SMT_Config.timeout))]), - smt_options = [(":produce-proofs", "true")], - default_max_relevant = 200 (* FUDGE *), - outcome = on_first_non_unsupported_line (outcome_of "unsat" "sat" "unknown"), - parse_proof = SOME (K VeriT_Proof_Parse.parse_proof), - replay = NONE } - -(* Z3 *) - -val z3_extensions = Attrib.setup_config_bool @{binding z3_extensions} (K false) - -local - fun z3_options ctxt = - ["smt.random_seed=" ^ string_of_int (Config.get ctxt SMT_Config.random_seed), - "smt.refine_inj_axioms=false", - "-T:" ^ string_of_int (Real.ceil (Config.get ctxt SMT_Config.timeout)), - "-smt2"] - - fun select_class ctxt = - if Config.get ctxt z3_extensions then Z3_Interface.smtlib_z3C else SMTLIB_Interface.smtlibC -in - -val z3: SMT_Solver.solver_config = { - name = "z3", - class = select_class, - avail = make_avail "Z3", - command = make_command "Z3", - options = z3_options, - smt_options = [(":produce-proofs", "true")], - default_max_relevant = 350 (* FUDGE *), - outcome = on_first_line (outcome_of "unsat" "sat" "unknown"), - parse_proof = SOME Z3_Replay.parse_proof, - replay = SOME Z3_Replay.replay } - -end - - -(* overall setup *) - -val _ = Theory.setup ( - SMT_Solver.add_solver cvc3 #> - SMT_Solver.add_solver cvc4 #> - SMT_Solver.add_solver veriT #> - SMT_Solver.add_solver z3) - -end; diff --git a/src/test-gen/SMT/smt_translate.ML b/src/test-gen/SMT/smt_translate.ML deleted file mode 100644 index 9e9bb6a..0000000 --- a/src/test-gen/SMT/smt_translate.ML +++ /dev/null @@ -1,527 +0,0 @@ -(* Title: HOL/Tools/SMT/smt_translate.ML - Author: Sascha Boehme, TU Muenchen - -Translate theorems into an SMT intermediate format and serialize them. -*) - -signature SMT_TRANSLATE = -sig - (*intermediate term structure*) - datatype squant = SForall | SExists - datatype 'a spattern = SPat of 'a list | SNoPat of 'a list - datatype sterm = - SVar of int | - SApp of string * sterm list | - SLet of string * sterm * sterm | - SQua of squant * string list * sterm spattern list * sterm - - (*translation configuration*) - type sign = { - logic: string, - sorts: string list, - dtyps: (BNF_Util.fp_kind * (string * (string * (string * string) list) list)) list, - funcs: (string * (string list * string)) list } - type config = { - logic: term list -> string, - fp_kinds: BNF_Util.fp_kind list, - serialize: (string * string) list -> string list -> sign -> sterm list -> string } - type replay_data = { - context: Proof.context, - typs: typ Symtab.table, - terms: term Symtab.table, - ll_defs: term list, - rewrite_rules: thm list, - assms: (int * thm) list } - - (*translation*) - val add_config: SMT_Util.class * (Proof.context -> config) -> Context.generic -> Context.generic - val translate: Proof.context -> (string * string) list -> string list -> (int * thm) list -> - string * replay_data -end; - -structure SMT_Translate: SMT_TRANSLATE = -struct - - -(* intermediate term structure *) - -datatype squant = SForall | SExists - -datatype 'a spattern = SPat of 'a list | SNoPat of 'a list - -datatype sterm = - SVar of int | - SApp of string * sterm list | - SLet of string * sterm * sterm | - SQua of squant * string list * sterm spattern list * sterm - - -(* translation configuration *) - -type sign = { - logic: string, - sorts: string list, - dtyps: (BNF_Util.fp_kind * (string * (string * (string * string) list) list)) list, - funcs: (string * (string list * string)) list } - -type config = { - logic: term list -> string, - fp_kinds: BNF_Util.fp_kind list, - serialize: (string * string) list -> string list -> sign -> sterm list -> string } - -type replay_data = { - context: Proof.context, - typs: typ Symtab.table, - terms: term Symtab.table, - ll_defs: term list, - rewrite_rules: thm list, - assms: (int * thm) list } - - -(* translation context *) - -fun add_components_of_typ (Type (s, Ts)) = - cons (Long_Name.base_name s) #> fold_rev add_components_of_typ Ts - | add_components_of_typ (TFree (s, _)) = cons (perhaps (try (unprefix "'")) s) - | add_components_of_typ _ = I; - -fun suggested_name_of_typ T = space_implode "_" (add_components_of_typ T []); - -fun suggested_name_of_term (Const (s, _)) = Long_Name.base_name s - | suggested_name_of_term (Free (s, _)) = s - | suggested_name_of_term _ = Name.uu - -val empty_tr_context = (Name.context, Typtab.empty, Termtab.empty) -val safe_suffix = "$" - -fun add_typ T proper (cx as (names, typs, terms)) = - (case Typtab.lookup typs T of - SOME (name, _) => (name, cx) - | NONE => - let - val sugg = Name.desymbolize (SOME true) (suggested_name_of_typ T) ^ safe_suffix - val (name, names') = Name.variant sugg names - val typs' = Typtab.update (T, (name, proper)) typs - in (name, (names', typs', terms)) end) - -fun add_fun t sort (cx as (names, typs, terms)) = - (case Termtab.lookup terms t of - SOME (name, _) => (name, cx) - | NONE => - let - val sugg = Name.desymbolize (SOME false) (suggested_name_of_term t) ^ safe_suffix - val (name, names') = Name.variant sugg names - val terms' = Termtab.update (t, (name, sort)) terms - in (name, (names', typs, terms')) end) - -fun sign_of logic dtyps (_, typs, terms) = { - logic = logic, - sorts = Typtab.fold (fn (_, (n, true)) => cons n | _ => I) typs [], - dtyps = dtyps, - funcs = Termtab.fold (fn (_, (n, SOME ss)) => cons (n,ss) | _ => I) terms []} - -fun replay_data_of ctxt ll_defs rules assms (_, typs, terms) = - let - fun add_typ (T, (n, _)) = Symtab.update (n, T) - val typs' = Typtab.fold add_typ typs Symtab.empty - - fun add_fun (t, (n, _)) = Symtab.update (n, t) - val terms' = Termtab.fold add_fun terms Symtab.empty - in - {context = ctxt, typs = typs', terms = terms', ll_defs = ll_defs, rewrite_rules = rules, - assms = assms} - end - - -(* preprocessing *) - -(** (co)datatype declarations **) - -fun collect_co_datatypes fp_kinds (tr_context, ctxt) ts = - let - val (fp_decls, ctxt') = - ([], ctxt) - |> fold (Term.fold_types (SMT_Datatypes.add_decls fp_kinds)) ts - |>> flat - - fun is_decl_typ T = exists (equal T o fst o snd) fp_decls - - fun add_typ' T proper = - (case SMT_Builtin.dest_builtin_typ ctxt' T of - SOME n => pair n - | NONE => add_typ T proper) - - fun tr_select sel = - let val T = Term.range_type (Term.fastype_of sel) - in add_fun sel NONE ##>> add_typ' T (not (is_decl_typ T)) end - fun tr_constr (constr, selects) = - add_fun constr NONE ##>> fold_map tr_select selects - fun tr_typ (fp, (T, cases)) = - add_typ' T false ##>> fold_map tr_constr cases #>> pair fp - - val (fp_decls', tr_context') = fold_map tr_typ fp_decls tr_context - - fun add (constr, selects) = - Termtab.update (constr, length selects) #> - fold (Termtab.update o rpair 1) selects - - val funcs = fold (fold add o snd o snd) fp_decls Termtab.empty - - in ((funcs, fp_decls', tr_context', ctxt'), ts) end - (* FIXME: also return necessary (co)datatype theorems *) - - -(** eta-expand quantifiers, let expressions and built-ins *) - -local - fun eta f T t = Abs (Name.uu, T, f (Term.incr_boundvars 1 t $ Bound 0)) - - fun exp f T = eta f (Term.domain_type (Term.domain_type T)) - - fun exp2 T q = - let val U = Term.domain_type T - in Abs (Name.uu, U, q $ eta I (Term.domain_type U) (Bound 0)) end - - fun expf k i T t = - let val Ts = drop i (fst (SMT_Util.dest_funT k T)) - in - Term.incr_boundvars (length Ts) t - |> fold_rev (fn i => fn u => u $ Bound i) (0 upto length Ts - 1) - |> fold_rev (fn T => fn u => Abs (Name.uu, T, u)) Ts - end -in - -fun eta_expand ctxt funcs = - let - fun exp_func t T ts = - (case Termtab.lookup funcs t of - SOME k => Term.list_comb (t, ts) |> k <> length ts ? expf k (length ts) T - | NONE => Term.list_comb (t, ts)) - - fun expand ((q as Const (@{const_name All}, _)) $ Abs a) = q $ abs_expand a - | expand ((q as Const (@{const_name All}, T)) $ t) = q $ exp expand T t - | expand (q as Const (@{const_name All}, T)) = exp2 T q - | expand ((q as Const (@{const_name Ex}, _)) $ Abs a) = q $ abs_expand a - | expand ((q as Const (@{const_name Ex}, T)) $ t) = q $ exp expand T t - | expand (q as Const (@{const_name Ex}, T)) = exp2 T q - | expand (Const (@{const_name Let}, _) $ t $ u) = expand (Term.betapply (u, t)) - | expand (Const (@{const_name Let}, T) $ t) = - let val U = Term.domain_type (Term.range_type T) - in Abs (Name.uu, U, Bound 0 $ Term.incr_boundvars 1 t) end - | expand (Const (@{const_name Let}, T)) = - let val U = Term.domain_type (Term.range_type T) - in Abs (Name.uu, Term.domain_type T, Abs (Name.uu, U, Bound 0 $ Bound 1)) end - | expand t = - (case Term.strip_comb t of - (u as Const (c as (_, T)), ts) => - (case SMT_Builtin.dest_builtin ctxt c ts of - SOME (_, k, us, mk) => - if k = length us then mk (map expand us) - else if k < length us then chop k (map expand us) |>> mk |> Term.list_comb - else expf k (length ts) T (mk (map expand us)) - | NONE => exp_func u T (map expand ts)) - | (u as Free (_, T), ts) => exp_func u T (map expand ts) - | (Abs a, ts) => Term.list_comb (abs_expand a, map expand ts) - | (u, ts) => Term.list_comb (u, map expand ts)) - - and abs_expand (n, T, t) = Abs (n, T, expand t) - - in map expand end - -end - - -(** introduce explicit applications **) - -local - (* - Make application explicit for functions with varying number of arguments. - *) - - fun add t i = apfst (Termtab.map_default (t, i) (Integer.min i)) - fun add_type T = apsnd (Typtab.update (T, ())) - - fun min_arities t = - (case Term.strip_comb t of - (u as Const _, ts) => add u (length ts) #> fold min_arities ts - | (u as Free _, ts) => add u (length ts) #> fold min_arities ts - | (Abs (_, T, u), ts) => (can dest_funT T ? add_type T) #> min_arities u #> fold min_arities ts - | (_, ts) => fold min_arities ts) - - fun minimize types t i = - let - fun find_min j [] _ = j - | find_min j (U :: Us) T = - if Typtab.defined types T then j else find_min (j + 1) Us (U --> T) - - val (Ts, T) = Term.strip_type (Term.type_of t) - in find_min 0 (take i (rev Ts)) T end - - fun app u (t, T) = (Const (@{const_name fun_app}, T --> T) $ t $ u, Term.range_type T) - - fun apply i t T ts = - let - val (ts1, ts2) = chop i ts - val (_, U) = SMT_Util.dest_funT i T - in fst (fold app ts2 (Term.list_comb (t, ts1), U)) end -in - -fun intro_explicit_application ctxt funcs ts = - let - val (arities, types) = fold min_arities ts (Termtab.empty, Typtab.empty) - val arities' = Termtab.map (minimize types) arities (* FIXME: highly suspicious *) - - fun app_func t T ts = - if is_some (Termtab.lookup funcs t) then Term.list_comb (t, ts) - else apply (the (Termtab.lookup arities' t)) t T ts - - fun in_list T f t = SMT_Util.mk_symb_list T (map f (SMT_Util.dest_symb_list t)) - - fun traverse Ts t = - (case Term.strip_comb t of - (q as Const (@{const_name All}, _), [Abs (x, T, u)]) => - q $ Abs (x, T, in_trigger (T :: Ts) u) - | (q as Const (@{const_name Ex}, _), [Abs (x, T, u)]) => - q $ Abs (x, T, in_trigger (T :: Ts) u) - | (q as Const (@{const_name Let}, _), [u1, u2 as Abs _]) => - q $ traverse Ts u1 $ traverse Ts u2 - | (u as Const (c as (_, T)), ts) => - (case SMT_Builtin.dest_builtin ctxt c ts of - SOME (_, k, us, mk) => - let - val (ts1, ts2) = chop k (map (traverse Ts) us) - val U = Term.strip_type T |>> snd o chop k |> (op --->) - in apply 0 (mk ts1) U ts2 end - | NONE => app_func u T (map (traverse Ts) ts)) - | (u as Free (_, T), ts) => app_func u T (map (traverse Ts) ts) - | (u as Bound i, ts) => apply 0 u (nth Ts i) (map (traverse Ts) ts) - | (Abs (n, T, u), ts) => traverses Ts (Abs (n, T, traverse (T::Ts) u)) ts - | (u, ts) => traverses Ts u ts) - and in_trigger Ts ((c as @{const trigger}) $ p $ t) = c $ in_pats Ts p $ traverse Ts t - | in_trigger Ts t = traverse Ts t - and in_pats Ts ps = - in_list @{typ "pattern symb_list"} (in_list @{typ pattern} (in_pat Ts)) ps - and in_pat Ts ((p as Const (@{const_name pat}, _)) $ t) = p $ traverse Ts t - | in_pat Ts ((p as Const (@{const_name nopat}, _)) $ t) = p $ traverse Ts t - | in_pat _ t = raise TERM ("bad pattern", [t]) - and traverses Ts t ts = Term.list_comb (t, map (traverse Ts) ts) - in map (traverse []) ts end - -val fun_app_eq = mk_meta_eq @{thm fun_app_def} - -end - - -(** map HOL formulas to FOL formulas (i.e., separate formulas froms terms) **) - -local - val is_quant = member (op =) [@{const_name All}, @{const_name Ex}] - - val fol_rules = [ - Let_def, - @{lemma "P = True == P" by (rule eq_reflection) simp}] - - exception BAD_PATTERN of unit - - fun is_builtin_conn_or_pred ctxt c ts = - is_some (SMT_Builtin.dest_builtin_conn ctxt c ts) orelse - is_some (SMT_Builtin.dest_builtin_pred ctxt c ts) -in - -fun folify ctxt = - let - fun in_list T f t = SMT_Util.mk_symb_list T (map_filter f (SMT_Util.dest_symb_list t)) - - fun in_term pat t = - (case Term.strip_comb t of - (@{const True}, []) => t - | (@{const False}, []) => t - | (u as Const (@{const_name If}, _), [t1, t2, t3]) => - if pat then raise BAD_PATTERN () else u $ in_form t1 $ in_term pat t2 $ in_term pat t3 - | (Const (c as (n, _)), ts) => - if is_builtin_conn_or_pred ctxt c ts orelse is_quant n then - if pat then raise BAD_PATTERN () else in_form t - else - Term.list_comb (Const c, map (in_term pat) ts) - | (Free c, ts) => Term.list_comb (Free c, map (in_term pat) ts) - | _ => t) - - and in_pat ((p as Const (@{const_name pat}, _)) $ t) = - p $ in_term true t - | in_pat ((p as Const (@{const_name nopat}, _)) $ t) = - p $ in_term true t - | in_pat t = raise TERM ("bad pattern", [t]) - - and in_pats ps = - in_list @{typ "pattern symb_list"} (SOME o in_list @{typ pattern} (try in_pat)) ps - - and in_trigger ((c as @{const trigger}) $ p $ t) = c $ in_pats p $ in_form t - | in_trigger t = in_form t - - and in_form t = - (case Term.strip_comb t of - (q as Const (qn, _), [Abs (n, T, u)]) => - if is_quant qn then q $ Abs (n, T, in_trigger u) - else in_term false t - | (Const c, ts) => - (case SMT_Builtin.dest_builtin_conn ctxt c ts of - SOME (_, _, us, mk) => mk (map in_form us) - | NONE => - (case SMT_Builtin.dest_builtin_pred ctxt c ts of - SOME (_, _, us, mk) => mk (map (in_term false) us) - | NONE => in_term false t)) - | _ => in_term false t) - in - map in_form #> - pair (fol_rules, I) - end - -end - - -(* translation into intermediate format *) - -(** utility functions **) - -val quantifier = (fn - @{const_name All} => SOME SForall - | @{const_name Ex} => SOME SExists - | _ => NONE) - -fun group_quant qname Ts (t as Const (q, _) $ Abs (_, T, u)) = - if q = qname then group_quant qname (T :: Ts) u else (Ts, t) - | group_quant _ Ts t = (Ts, t) - -fun dest_pat (Const (@{const_name pat}, _) $ t) = (t, true) - | dest_pat (Const (@{const_name nopat}, _) $ t) = (t, false) - | dest_pat t = raise TERM ("bad pattern", [t]) - -fun dest_pats [] = I - | dest_pats ts = - (case map dest_pat ts |> split_list ||> distinct (op =) of - (ps, [true]) => cons (SPat ps) - | (ps, [false]) => cons (SNoPat ps) - | _ => raise TERM ("bad multi-pattern", ts)) - -fun dest_trigger (@{const trigger} $ tl $ t) = - (rev (fold (dest_pats o SMT_Util.dest_symb_list) (SMT_Util.dest_symb_list tl) []), t) - | dest_trigger t = ([], t) - -fun dest_quant qn T t = quantifier qn |> Option.map (fn q => - let - val (Ts, u) = group_quant qn [T] t - val (ps, p) = dest_trigger u - in (q, rev Ts, ps, p) end) - -fun fold_map_pat f (SPat ts) = fold_map f ts #>> SPat - | fold_map_pat f (SNoPat ts) = fold_map f ts #>> SNoPat - - -(** translation from Isabelle terms into SMT intermediate terms **) - -fun intermediate logic dtyps builtin ctxt ts trx = - let - fun transT (T as TFree _) = add_typ T true - | transT (T as TVar _) = (fn _ => raise TYPE ("bad SMT type", [T], [])) - | transT (T as Type _) = - (case SMT_Builtin.dest_builtin_typ ctxt T of - SOME n => pair n - | NONE => add_typ T true) - - fun app n ts = SApp (n, ts) - - fun trans t = - (case Term.strip_comb t of - (Const (qn, _), [Abs (_, T, t1)]) => - (case dest_quant qn T t1 of - SOME (q, Ts, ps, b) => - fold_map transT Ts ##>> fold_map (fold_map_pat trans) ps ##>> - trans b #>> (fn ((Ts', ps'), b') => SQua (q, Ts', ps', b')) - | NONE => raise TERM ("unsupported quantifier", [t])) - | (Const (@{const_name Let}, _), [t1, Abs (_, T, t2)]) => - transT T ##>> trans t1 ##>> trans t2 #>> (fn ((U, u1), u2) => SLet (U, u1, u2)) - | (u as Const (c as (_, T)), ts) => - (case builtin ctxt c ts of - SOME (n, _, us, _) => fold_map trans us #>> app n - | NONE => transs u T ts) - | (u as Free (_, T), ts) => transs u T ts - | (Bound i, []) => pair (SVar i) - | _ => raise TERM ("bad SMT term", [t])) - - and transs t T ts = - let val (Us, U) = SMT_Util.dest_funT (length ts) T - in - fold_map transT Us ##>> transT U #-> (fn Up => - add_fun t (SOME Up) ##>> fold_map trans ts #>> SApp) - end - - val (us, trx') = fold_map trans ts trx - in ((sign_of (logic ts) dtyps trx', us), trx') end - - -(* translation *) - -structure Configs = Generic_Data -( - type T = (Proof.context -> config) SMT_Util.dict - val empty = [] - val extend = I - fun merge data = SMT_Util.dict_merge fst data -) - -fun add_config (cs, cfg) = Configs.map (SMT_Util.dict_update (cs, cfg)) - -fun get_config ctxt = - let val cs = SMT_Config.solver_class_of ctxt - in - (case SMT_Util.dict_get (Configs.get (Context.Proof ctxt)) cs of - SOME cfg => cfg ctxt - | NONE => error ("SMT: no translation configuration found " ^ - "for solver class " ^ quote (SMT_Util.string_of_class cs))) - end - -fun translate ctxt smt_options comments ithms = - let - val {logic, fp_kinds, serialize} = get_config ctxt - - fun no_dtyps (tr_context, ctxt) ts = - ((Termtab.empty, [], tr_context, ctxt), ts) - - val ts1 = map (Envir.beta_eta_contract o SMT_Util.prop_of o snd) ithms - - val ((funcs, dtyps, tr_context, ctxt1), ts2) = - ((empty_tr_context, ctxt), ts1) - |-> (if null fp_kinds then no_dtyps else collect_co_datatypes fp_kinds) - - fun is_binder (Const (@{const_name Let}, _) $ _) = true - | is_binder t = Lambda_Lifting.is_quantifier t - - fun mk_trigger ((q as Const (@{const_name All}, _)) $ Abs (n, T, t)) = - q $ Abs (n, T, mk_trigger t) - | mk_trigger (eq as (Const (@{const_name HOL.eq}, T) $ lhs $ _)) = - Term.domain_type T --> @{typ pattern} - |> (fn T => Const (@{const_name pat}, T) $ lhs) - |> SMT_Util.mk_symb_list @{typ pattern} o single - |> SMT_Util.mk_symb_list @{typ "pattern symb_list"} o single - |> (fn t => @{const trigger} $ t $ eq) - | mk_trigger t = t - - val (ctxt2, (ts3, ll_defs)) = - ts2 - |> eta_expand ctxt1 funcs - |> rpair ctxt1 - |-> Lambda_Lifting.lift_lambdas NONE is_binder - |-> (fn (ts', ll_defs) => fn ctxt' => - (ctxt', (intro_explicit_application ctxt' funcs (map mk_trigger ll_defs @ ts'), ll_defs))) - - val ((rewrite_rules, builtin), ts4) = folify ctxt2 ts3 - |>> apfst (cons fun_app_eq) -val _ = dtyps : (BNF_Util.fp_kind * (string * (string * (string * string) list) list)) list (*###*) - in - (ts4, tr_context) - |-> intermediate logic dtyps (builtin SMT_Builtin.dest_builtin) ctxt2 - |>> uncurry (serialize smt_options comments) - ||> replay_data_of ctxt2 ll_defs rewrite_rules ithms - end - -end; diff --git a/src/test-gen/SMT/smt_util.ML b/src/test-gen/SMT/smt_util.ML deleted file mode 100644 index 387c204..0000000 --- a/src/test-gen/SMT/smt_util.ML +++ /dev/null @@ -1,240 +0,0 @@ -(* Title: HOL/Tools/SMT/smt_util.ML - Author: Sascha Boehme, TU Muenchen - -General utility functions. -*) - -signature SMT_UTIL = -sig - (*basic combinators*) - val repeat: ('a -> 'a option) -> 'a -> 'a - val repeat_yield: ('a -> 'b -> ('a * 'b) option) -> 'a -> 'b -> 'a * 'b - - (*class dictionaries*) - type class = string list - val basicC: class - val string_of_class: class -> string - type 'a dict = (class * 'a) Ord_List.T - val dict_map_default: class * 'a -> ('a -> 'a) -> 'a dict -> 'a dict - val dict_update: class * 'a -> 'a dict -> 'a dict - val dict_merge: ('a * 'a -> 'a) -> 'a dict * 'a dict -> 'a dict - val dict_lookup: 'a dict -> class -> 'a list - val dict_get: 'a dict -> class -> 'a option - - (*types*) - val dest_funT: int -> typ -> typ list * typ - - (*terms*) - val dest_conj: term -> term * term - val dest_disj: term -> term * term - val under_quant: (term -> 'a) -> term -> 'a - val is_number: term -> bool - - (*symbolic lists*) - val symb_nil_const: typ -> term - val symb_cons_const: typ -> term - val mk_symb_list: typ -> term list -> term - val dest_symb_list: term -> term list - - (*patterns and instantiations*) - val mk_const_pat: theory -> string -> (ctyp -> 'a) -> 'a * cterm - val destT1: ctyp -> ctyp - val destT2: ctyp -> ctyp - val instTs: ctyp list -> ctyp list * cterm -> cterm - val instT: ctyp -> ctyp * cterm -> cterm - val instT': cterm -> ctyp * cterm -> cterm - - (*certified terms*) - val dest_cabs: cterm -> Proof.context -> cterm * Proof.context - val dest_all_cabs: cterm -> Proof.context -> cterm * Proof.context - val dest_cbinder: cterm -> Proof.context -> cterm * Proof.context - val dest_all_cbinders: cterm -> Proof.context -> cterm * Proof.context - val mk_cprop: cterm -> cterm - val dest_cprop: cterm -> cterm - val mk_cequals: cterm -> cterm -> cterm - val term_of: cterm -> term - val prop_of: thm -> term - - (*conversions*) - val if_conv: (term -> bool) -> conv -> conv -> conv - val if_true_conv: (term -> bool) -> conv -> conv - val if_exists_conv: (term -> bool) -> conv -> conv - val binders_conv: (Proof.context -> conv) -> Proof.context -> conv - val under_quant_conv: (Proof.context * cterm list -> conv) -> - Proof.context -> conv - val prop_conv: conv -> conv -end; - -structure SMT_Util: SMT_UTIL = -struct - -(* basic combinators *) - -fun repeat f = - let fun rep x = (case f x of SOME y => rep y | NONE => x) - in rep end - -fun repeat_yield f = - let fun rep x y = (case f x y of SOME (x', y') => rep x' y' | NONE => (x, y)) - in rep end - - -(* class dictionaries *) - -type class = string list - -val basicC = [] - -fun string_of_class [] = "basic" - | string_of_class cs = "basic." ^ space_implode "." cs - -type 'a dict = (class * 'a) Ord_List.T - -fun class_ord ((cs1, _), (cs2, _)) = - rev_order (list_ord fast_string_ord (cs1, cs2)) - -fun dict_insert (cs, x) d = - if AList.defined (op =) d cs then d - else Ord_List.insert class_ord (cs, x) d - -fun dict_map_default (cs, x) f = - dict_insert (cs, x) #> AList.map_entry (op =) cs f - -fun dict_update (e as (_, x)) = dict_map_default e (K x) - -fun dict_merge val_merge = sort class_ord o AList.join (op =) (K val_merge) - -fun dict_lookup d cs = - let fun match (cs', x) = if is_prefix (op =) cs' cs then SOME x else NONE - in map_filter match d end - -fun dict_get d cs = - (case AList.lookup (op =) d cs of - NONE => (case cs of [] => NONE | _ => dict_get d (take (length cs - 1) cs)) - | SOME x => SOME x) - - -(* types *) - -val dest_funT = - let - fun dest Ts 0 T = (rev Ts, T) - | dest Ts i (Type ("fun", [T, U])) = dest (T::Ts) (i-1) U - | dest _ _ T = raise TYPE ("not a function type", [T], []) - in dest [] end - - -(* terms *) - -fun dest_conj (@{const HOL.conj} $ t $ u) = (t, u) - | dest_conj t = raise TERM ("not a conjunction", [t]) - -fun dest_disj (@{const HOL.disj} $ t $ u) = (t, u) - | dest_disj t = raise TERM ("not a disjunction", [t]) - -fun under_quant f t = - (case t of - Const (@{const_name All}, _) $ Abs (_, _, u) => under_quant f u - | Const (@{const_name Ex}, _) $ Abs (_, _, u) => under_quant f u - | _ => f t) - -val is_number = - let - fun is_num env (Const (@{const_name Let}, _) $ t $ Abs (_, _, u)) = is_num (t :: env) u - | is_num env (Bound i) = i < length env andalso is_num env (nth env i) - | is_num _ t = can HOLogic.dest_number t - in is_num [] end - - -(* symbolic lists *) - -fun symb_listT T = Type (@{type_name symb_list}, [T]) - -fun symb_nil_const T = Const (@{const_name Symb_Nil}, symb_listT T) - -fun symb_cons_const T = - let val listT = symb_listT T in Const (@{const_name Symb_Cons}, T --> listT --> listT) end - -fun mk_symb_list T ts = - fold_rev (fn t => fn u => symb_cons_const T $ t $ u) ts (symb_nil_const T) - -fun dest_symb_list (Const (@{const_name Symb_Nil}, _)) = [] - | dest_symb_list (Const (@{const_name Symb_Cons}, _) $ t $ u) = t :: dest_symb_list u - - -(* patterns and instantiations *) - -fun mk_const_pat thy name destT = - let val cpat = Thm.global_cterm_of thy (Const (name, Sign.the_const_type thy name)) - in (destT (Thm.ctyp_of_cterm cpat), cpat) end - -val destT1 = hd o Thm.dest_ctyp -val destT2 = hd o tl o Thm.dest_ctyp - -fun instTs cUs (cTs, ct) = Thm.instantiate_cterm (map (dest_TVar o Thm.typ_of) cTs ~~ cUs, []) ct -fun instT cU (cT, ct) = instTs [cU] ([cT], ct) -fun instT' ct = instT (Thm.ctyp_of_cterm ct) - - -(* certified terms *) - -fun dest_cabs ct ctxt = - (case Thm.term_of ct of - Abs _ => - let val (n, ctxt') = yield_singleton Variable.variant_fixes Name.uu ctxt - in (snd (Thm.dest_abs (SOME n) ct), ctxt') end - | _ => raise CTERM ("no abstraction", [ct])) - -val dest_all_cabs = repeat_yield (try o dest_cabs) - -fun dest_cbinder ct ctxt = - (case Thm.term_of ct of - Const _ $ Abs _ => dest_cabs (Thm.dest_arg ct) ctxt - | _ => raise CTERM ("not a binder", [ct])) - -val dest_all_cbinders = repeat_yield (try o dest_cbinder) - -val mk_cprop = Thm.apply (Thm.cterm_of @{context} @{const Trueprop}) - -fun dest_cprop ct = - (case Thm.term_of ct of - @{const Trueprop} $ _ => Thm.dest_arg ct - | _ => raise CTERM ("not a property", [ct])) - -val equals = mk_const_pat @{theory} @{const_name Pure.eq} destT1 -fun mk_cequals ct cu = Thm.mk_binop (instT' ct equals) ct cu - -val dest_prop = (fn @{const Trueprop} $ t => t | t => t) -fun term_of ct = dest_prop (Thm.term_of ct) -fun prop_of thm = dest_prop (Thm.prop_of thm) - - -(* conversions *) - -fun if_conv pred cv1 cv2 ct = if pred (Thm.term_of ct) then cv1 ct else cv2 ct - -fun if_true_conv pred cv = if_conv pred cv Conv.all_conv - -fun if_exists_conv pred = if_true_conv (Term.exists_subterm pred) - -fun binders_conv cv ctxt = - Conv.binder_conv (binders_conv cv o snd) ctxt else_conv cv ctxt - -fun under_quant_conv cv ctxt = - let - fun quant_conv inside ctxt cvs ct = - (case Thm.term_of ct of - Const (@{const_name All}, _) $ Abs _ => - Conv.binder_conv (under_conv cvs) ctxt - | Const (@{const_name Ex}, _) $ Abs _ => - Conv.binder_conv (under_conv cvs) ctxt - | _ => if inside then cv (ctxt, cvs) else Conv.all_conv) ct - and under_conv cvs (cv, ctxt) = quant_conv true ctxt (cv :: cvs) - in quant_conv false ctxt [] end - -fun prop_conv cv ct = - (case Thm.term_of ct of - @{const Trueprop} $ _ => Conv.arg_conv cv ct - | _ => raise CTERM ("not a property", [ct])) - -end; diff --git a/src/test-gen/SMT/smtlib.ML b/src/test-gen/SMT/smtlib.ML deleted file mode 100644 index e20b0ba..0000000 --- a/src/test-gen/SMT/smtlib.ML +++ /dev/null @@ -1,191 +0,0 @@ -(* Title: HOL/Tools/SMT/smtlib.ML - Author: Sascha Boehme, TU Muenchen - -Parsing and generating SMT-LIB 2. -*) - -signature SMTLIB = -sig - exception PARSE of int * string - datatype tree = - Num of int | - Dec of int * int | - Str of string | - Sym of string | - Key of string | - S of tree list - val parse: string list -> tree - val pretty_tree: tree -> Pretty.T - val str_of: tree -> string -end; - -structure SMTLIB: SMTLIB = -struct - -(* data structures *) - -exception PARSE of int * string - -datatype tree = - Num of int | - Dec of int * int | - Str of string | - Sym of string | - Key of string | - S of tree list - -datatype unfinished = None | String of string | Symbol of string - - -(* utilities *) - -fun read_raw pred l cs = - (case take_prefix pred cs of - ([], []) => raise PARSE (l, "empty token") - | ([], c :: _) => raise PARSE (l, "unexpected character " ^ quote c) - | x => x) - - -(* numerals and decimals *) - -fun int_of cs = fst (read_int cs) - -fun read_num l cs = - (case read_raw Symbol.is_ascii_digit l cs of - (cs1, "." :: cs') => - let val (cs2, cs'') = read_raw Symbol.is_ascii_digit l cs' - in (Dec (int_of cs1, int_of cs2), cs'') end - | (cs1, cs2) => (Num (int_of cs1), cs2)) - - -(* binary numbers *) - -fun is_bin c = (c = "0" orelse c = "1") - -fun read_bin l cs = read_raw is_bin l cs |>> Num o fst o read_radix_int 2 - - -(* hex numbers *) - -val is_hex = member (op =) (raw_explode "0123456789abcdefABCDEF") - -fun within c1 c2 c = (ord c1 <= ord c andalso ord c <= ord c2) - -fun unhex i [] = i - | unhex i (c :: cs) = - if within "0" "9" c then unhex (i * 16 + (ord c - ord "0")) cs - else if within "a" "f" c then unhex (i * 16 + (ord c - ord "a" + 10)) cs - else if within "A" "F" c then unhex (i * 16 + (ord c - ord "A" + 10)) cs - else raise Fail ("bad hex character " ^ quote c) - -fun read_hex l cs = read_raw is_hex l cs |>> Num o unhex 0 - - -(* symbols *) - -val symbol_chars = raw_explode "~!@$%^&*_+=<>.?/-" - -fun is_sym c = - Symbol.is_ascii_letter c orelse - Symbol.is_ascii_digit c orelse - member (op =) symbol_chars c - -fun read_sym f l cs = read_raw is_sym l cs |>> f o implode - - -(* quoted tokens *) - -fun read_quoted stop (escape, replacement) cs = - let - fun read _ [] = NONE - | read rs (cs as (c :: cs')) = - if is_prefix (op =) stop cs then - SOME (implode (rev rs), drop (length stop) cs) - else if not (null escape) andalso is_prefix (op =) escape cs then - read (replacement :: rs) (drop (length escape) cs) - else read (c :: rs) cs' - in read [] cs end - -fun read_string cs = read_quoted ["\\", "\""] (["\\", "\\"], "\\") cs -fun read_symbol cs = read_quoted ["|"] ([], "") cs - - -(* core parser *) - -fun read _ [] rest tss = (rest, tss) - | read l ("(" :: cs) None tss = read l cs None ([] :: tss) - | read l (")" :: cs) None (ts1 :: ts2 :: tss) = - read l cs None ((S (rev ts1) :: ts2) :: tss) - | read l ("#" :: "x" :: cs) None (ts :: tss) = - token read_hex l cs ts tss - | read l ("#" :: "b" :: cs) None (ts :: tss) = - token read_bin l cs ts tss - | read l (":" :: cs) None (ts :: tss) = - token (read_sym Key) l cs ts tss - | read l ("\"" :: cs) None (ts :: tss) = - quoted read_string String Str l "" cs ts tss - | read l ("|" :: cs) None (ts :: tss) = - quoted read_symbol Symbol Sym l "" cs ts tss - | read l ((c as "!") :: cs) None (ts :: tss) = - token (fn _ => pair (Sym c)) l cs ts tss - | read l (c :: cs) None (ts :: tss) = - if Symbol.is_ascii_blank c then read l cs None (ts :: tss) - else if Symbol.is_digit c then token read_num l (c :: cs) ts tss - else token (read_sym Sym) l (c :: cs) ts tss - | read l cs (String s) (ts :: tss) = - quoted read_string String Str l s cs ts tss - | read l cs (Symbol s) (ts :: tss) = - quoted read_symbol Symbol Sym l s cs ts tss - | read l _ _ [] = raise PARSE (l, "bad parser state") - -and token f l cs ts tss = - let val (t, cs') = f l cs - in read l cs' None ((t :: ts) :: tss) end - -and quoted r f g l s cs ts tss = - (case r cs of - NONE => (f (s ^ implode cs), ts :: tss) - | SOME (s', cs') => read l cs' None ((g (s ^ s') :: ts) :: tss)) - - - -(* overall parser *) - -fun read_line l line = read l (raw_explode line) - -fun add_line line (l, (None, tss)) = - if size line = 0 orelse nth_string line 0 = ";" then (l + 1, (None, tss)) - else (l + 1, read_line l line None tss) - | add_line line (l, (unfinished, tss)) = - (l + 1, read_line l line unfinished tss) - -fun finish (_, (None, [[t]])) = t - | finish (l, _) = raise PARSE (l, "bad nesting") - -fun parse lines = finish (fold add_line lines (1, (None, [[]]))) - - -(* pretty printer *) - -fun pretty_tree (Num i) = Pretty.str (string_of_int i) - | pretty_tree (Dec (i, j)) = - Pretty.str (string_of_int i ^ "." ^ string_of_int j) - | pretty_tree (Str s) = - raw_explode s - |> maps (fn "\"" => ["\\", "\""] | "\\" => ["\\", "\\"] | c => [c]) - |> implode - |> enclose "\"" "\"" - |> Pretty.str - | pretty_tree (Sym s) = - if String.isPrefix "(" s (* for bit vector functions *) orelse - forall is_sym (raw_explode s) then - Pretty.str s - else - Pretty.str ("|" ^ s ^ "|") - | pretty_tree (Key s) = Pretty.str (":" ^ s) - | pretty_tree (S trees) = - Pretty.enclose "(" ")" (Pretty.separate "" (map pretty_tree trees)) - -val str_of = Pretty.unformatted_string_of o pretty_tree - -end; diff --git a/src/test-gen/SMT/smtlib_interface.ML b/src/test-gen/SMT/smtlib_interface.ML deleted file mode 100644 index 37ffb50..0000000 --- a/src/test-gen/SMT/smtlib_interface.ML +++ /dev/null @@ -1,171 +0,0 @@ -(* Title: HOL/Tools/SMT/smtlib_interface.ML - Author: Sascha Boehme, TU Muenchen - Author: Jasmin Blanchette, TU Muenchen - -Interface to SMT solvers based on the SMT-LIB 2 format. -*) - -signature SMTLIB_INTERFACE = -sig - val smtlibC: SMT_Util.class - val add_logic: int * (term list -> string option) -> Context.generic -> Context.generic - val translate_config: Proof.context -> SMT_Translate.config - val assert_name_of_index: int -> string - val assert_index_of_name: string -> int - val assert_prefix : string -end; - -structure SMTLIB_Interface: SMTLIB_INTERFACE = -struct - -val smtlibC = ["smtlib"] - - -(* builtins *) - -local - fun int_num _ i = SOME (string_of_int i) - - fun is_linear [t] = SMT_Util.is_number t - | is_linear [t, u] = SMT_Util.is_number t orelse SMT_Util.is_number u - | is_linear _ = false - - fun times _ _ ts = - let val mk = Term.list_comb o pair @{const times (int)} - in if is_linear ts then SOME ("*", 2, ts, mk) else NONE end -in - -val setup_builtins = - fold (SMT_Builtin.add_builtin_typ smtlibC) [ - (@{typ bool}, K (SOME "Bool"), K (K NONE)), - (@{typ int}, K (SOME "Int"), int_num)] #> - fold (SMT_Builtin.add_builtin_fun' smtlibC) [ - (@{const True}, "true"), - (@{const False}, "false"), - (@{const Not}, "not"), - (@{const HOL.conj}, "and"), - (@{const HOL.disj}, "or"), - (@{const HOL.implies}, "=>"), - (@{const HOL.eq ('a)}, "="), - (@{const If ('a)}, "ite"), - (@{const less (int)}, "<"), - (@{const less_eq (int)}, "<="), - (@{const uminus (int)}, "-"), - (@{const plus (int)}, "+"), - (@{const minus (int)}, "-")] #> - SMT_Builtin.add_builtin_fun smtlibC - (Term.dest_Const @{const times (int)}, times) - -end - - -(* serialization *) - -(** logic **) - -fun fst_int_ord ((i1, _), (i2, _)) = int_ord (i1, i2) - -structure Logics = Generic_Data -( - type T = (int * (term list -> string option)) list - val empty = [] - val extend = I - fun merge data = Ord_List.merge fst_int_ord data -) - -fun add_logic pf = Logics.map (Ord_List.insert fst_int_ord pf) - -fun choose_logic ctxt ts = - let - fun choose [] = "AUFLIA" - | choose ((_, f) :: fs) = (case f ts of SOME s => s | NONE => choose fs) - in - (case choose (Logics.get (Context.Proof ctxt)) of - "" => "" (* for default Z3 logic, a subset of everything *) - | logic => "(set-logic " ^ logic ^ ")\n") - end - - -(** serialization **) - -fun var i = "?v" ^ string_of_int i - -fun tree_of_sterm l (SMT_Translate.SVar i) = SMTLIB.Sym (var (l - i - 1)) - | tree_of_sterm _ (SMT_Translate.SApp (n, [])) = SMTLIB.Sym n - | tree_of_sterm l (SMT_Translate.SApp (n, ts)) = - SMTLIB.S (SMTLIB.Sym n :: map (tree_of_sterm l) ts) - | tree_of_sterm _ (SMT_Translate.SLet _) = - raise Fail "SMT-LIB: unsupported let expression" - | tree_of_sterm l (SMT_Translate.SQua (q, ss, pats, t)) = - let - val l' = l + length ss - - fun quant_name SMT_Translate.SForall = "forall" - | quant_name SMT_Translate.SExists = "exists" - - fun gen_trees_of_pat keyword ps = - [SMTLIB.Key keyword, SMTLIB.S (map (tree_of_sterm l') ps)] - fun trees_of_pat (SMT_Translate.SPat ps) = gen_trees_of_pat "pattern" ps - | trees_of_pat (SMT_Translate.SNoPat ps) = gen_trees_of_pat "no-pattern" ps - fun tree_of_pats [] t = t - | tree_of_pats pats t = SMTLIB.S (SMTLIB.Sym "!" :: t :: maps trees_of_pat pats) - - val vs = map_index (fn (i, ty) => - SMTLIB.S [SMTLIB.Sym (var (l + i)), SMTLIB.Sym ty]) ss - - val body = t - |> tree_of_sterm l' - |> tree_of_pats pats - in - SMTLIB.S [SMTLIB.Sym (quant_name q), SMTLIB.S vs, body] - end - - -fun sctrarg (sel, typ) = "(" ^ sel ^ " " ^ typ ^ ")" -fun sctr (name, args) = enclose "(" ")" (space_implode " " (name :: map sctrarg args)) -fun sdatatype (name, ctrs) = enclose "(" ")" (space_implode " " (name :: map sctr ctrs)) - -fun string_of_fun (f, (ss, s)) = f ^ " (" ^ space_implode " " ss ^ ") " ^ s - -fun named_sterm s t = SMTLIB.S [SMTLIB.Sym "!", t, SMTLIB.Key "named", SMTLIB.Sym s] - -val assert_prefix = "a" - -fun assert_name_of_index i = assert_prefix ^ string_of_int i -fun assert_index_of_name s = the_default ~1 (Int.fromString (unprefix assert_prefix s)) - -fun sdtyp (fp, dtyps) = - Buffer.add (enclose ("(declare-" ^ BNF_FP_Util.co_prefix fp ^ "datatypes () (") "))\n" - (space_implode "\n " (map sdatatype dtyps))) - -fun serialize smt_options comments {logic, sorts, dtyps, funcs} ts = - let - val unsat_core = member (op =) smt_options (":produce-unsat-cores", "true") - in - Buffer.empty - |> fold (Buffer.add o enclose "; " "\n") comments - |> fold (fn (k, v) => Buffer.add ("(set-option " ^ k ^ " " ^ v ^ ")\n")) smt_options - |> Buffer.add logic - |> fold (Buffer.add o enclose "(declare-sort " " 0)\n") (sort fast_string_ord sorts) - |> fold sdtyp (AList.coalesce (op =) dtyps) - |> fold (Buffer.add o enclose "(declare-fun " ")\n" o string_of_fun) - (sort (fast_string_ord o apply2 fst) funcs) - |> fold (fn (i, t) => Buffer.add (enclose "(assert " ")\n" - (SMTLIB.str_of (named_sterm (assert_name_of_index i) (tree_of_sterm 0 t))))) (map_index I ts) - |> Buffer.add "(check-sat)\n" - |> Buffer.add (if unsat_core then "(get-unsat-core)\n" else "(get-proof)\n") - |> Buffer.content - end - -(* interface *) - -fun translate_config ctxt = { - logic = choose_logic ctxt, - fp_kinds = [], - serialize = serialize} - -val _ = Theory.setup (Context.theory_map - (setup_builtins #> - SMT_Translate.add_config (smtlibC, translate_config))) - -end; diff --git a/src/test-gen/SMT/smtlib_isar.ML b/src/test-gen/SMT/smtlib_isar.ML deleted file mode 100644 index 2f7ae04..0000000 --- a/src/test-gen/SMT/smtlib_isar.ML +++ /dev/null @@ -1,75 +0,0 @@ -(* Title: HOL/Tools/SMT/smtlib_isar.ML - Author: Jasmin Blanchette, TU Muenchen - Author: Mathias Fleury, ENS Rennes - -General tools for Isar proof reconstruction. -*) - -signature SMTLIB_ISAR = -sig - val unlift_term: term list -> term -> term - val postprocess_step_conclusion: Proof.context -> thm list -> term list -> term -> term - val normalizing_prems : Proof.context -> term -> (string * string list) list - val distinguish_conjecture_and_hypothesis : ''a list -> ''b -> ''b -> ''b list -> - (''a * term) list -> term list -> term -> (ATP_Problem.atp_formula_role * term) option - val unskolemize_names: Proof.context -> term -> term -end; - -structure SMTLIB_Isar: SMTLIB_ISAR = -struct - -open ATP_Util -open ATP_Problem -open ATP_Proof_Reconstruct - -fun unlift_term ll_defs = - let - val lifted = map (ATP_Util.extract_lambda_def dest_Free o ATP_Util.hol_open_form I) ll_defs - - fun un_free (t as Free (s, _)) = - (case AList.lookup (op =) lifted s of - SOME t => un_term t - | NONE => t) - | un_free t = t - and un_term t = map_aterms un_free t - in un_term end - -(* Remove the "__" suffix for newly introduced variables (Skolems). It is not clear why "__" is - generated also for abstraction variables, but this is repaired here. *) -fun unskolemize_names ctxt = - Term.map_abs_vars (perhaps (try Name.dest_skolem)) - #> Term.map_aterms (perhaps (try (fn Free (s, T) => - Free (s |> not (Variable.is_fixed ctxt s) ? Name.dest_skolem, T)))) - -fun postprocess_step_conclusion ctxt rewrite_rules ll_defs = - let val thy = Proof_Context.theory_of ctxt in - Raw_Simplifier.rewrite_term thy rewrite_rules [] - #> Object_Logic.atomize_term ctxt - #> not (null ll_defs) ? unlift_term ll_defs - #> simplify_bool - #> unskolemize_names ctxt - #> HOLogic.mk_Trueprop - end - -fun normalizing_prems ctxt concl0 = - SMT_Normalize.case_bool_entry :: SMT_Normalize.special_quant_table @ - SMT_Normalize.abs_min_max_table - |> map_filter (fn (c, th) => - if exists_Const (curry (op =) c o fst) concl0 then - let val s = short_thm_name ctxt th in SOME (s, [s]) end - else - NONE) - -fun distinguish_conjecture_and_hypothesis ss id conjecture_id prem_ids fact_helper_ts hyp_ts - concl_t = - (case ss of - [s] => SOME (Axiom, the (AList.lookup (op =) fact_helper_ts s)) - | _ => - if id = conjecture_id then - SOME (Conjecture, concl_t) - else - (case find_index (curry (op =) id) prem_ids of - ~1 => NONE (* lambda-lifting definition *) - | i => SOME (Hypothesis, close_form (nth hyp_ts i)))) - -end; diff --git a/src/test-gen/SMT/smtlib_proof.ML b/src/test-gen/SMT/smtlib_proof.ML deleted file mode 100644 index 909b7a5..0000000 --- a/src/test-gen/SMT/smtlib_proof.ML +++ /dev/null @@ -1,298 +0,0 @@ -(* Title: HOL/Tools/SMT/smtlib_proof.ML - Author: Sascha Boehme, TU Muenchen - Author: Mathias Fleury, ENS Rennes - Author: Jasmin Blanchette, TU Muenchen - -SMT-LIB-2-style proofs: parsing and abstract syntax tree. -*) - -signature SMTLIB_PROOF = -sig - datatype 'b shared = Tree of SMTLIB.tree | Term of term | Proof of 'b | None - type ('a, 'b) context - - val mk_context: Proof.context -> int -> 'b shared Symtab.table -> typ Symtab.table -> - term Symtab.table -> 'a -> ('a, 'b) context - val empty_context: Proof.context -> typ Symtab.table -> term Symtab.table -> ('a list, 'b) context - val ctxt_of: ('a, 'b) context -> Proof.context - val lookup_binding: ('a, 'b) context -> string -> 'b shared - val update_binding: string * 'b shared -> ('a, 'b) context -> ('a, 'b) context - val with_bindings: (string * 'b shared) list -> (('a, 'b) context -> 'c * ('d, 'b) context) -> - ('a, 'b) context -> 'c * ('d, 'b) context - val next_id: ('a, 'b) context -> int * ('a, 'b) context - val with_fresh_names: (('a list, 'b) context -> - term * ((string * (string * typ)) list, 'b) context) -> ('c, 'b) context -> (term * string list) - - (*type and term parsers*) - type type_parser = SMTLIB.tree * typ list -> typ option - type term_parser = SMTLIB.tree * term list -> term option - val add_type_parser: type_parser -> Context.generic -> Context.generic - val add_term_parser: term_parser -> Context.generic -> Context.generic - - exception SMTLIB_PARSE of string * SMTLIB.tree - - val declare_fun: string -> typ -> ((string * typ) list, 'a) context -> - ((string * typ) list, 'a) context - val dest_binding: SMTLIB.tree -> string * 'a shared - val type_of: ('a, 'b) context -> SMTLIB.tree -> typ - val term_of: SMTLIB.tree -> ((string * (string * typ)) list, 'a) context -> - term * ((string * (string * typ)) list, 'a) context -end; - -structure SMTLIB_Proof: SMTLIB_PROOF = -struct - -(* proof parser context *) - -datatype 'b shared = Tree of SMTLIB.tree | Term of term | Proof of 'b | None - -type ('a, 'b) context = { - ctxt: Proof.context, - id: int, - syms: 'b shared Symtab.table, - typs: typ Symtab.table, - funs: term Symtab.table, - extra: 'a} - -fun mk_context ctxt id syms typs funs extra: ('a, 'b) context = - {ctxt = ctxt, id = id, syms = syms, typs = typs, funs = funs, extra = extra} - -fun empty_context ctxt typs funs = mk_context ctxt 1 Symtab.empty typs funs [] - -fun ctxt_of ({ctxt, ...}: ('a, 'b) context) = ctxt - -fun lookup_binding ({syms, ...}: ('a, 'b) context) = - the_default None o Symtab.lookup syms - -fun map_syms f ({ctxt, id, syms, typs, funs, extra}: ('a, 'b) context) = - mk_context ctxt id (f syms) typs funs extra - -fun update_binding b = map_syms (Symtab.update b) - -fun with_bindings bs f cx = - let val bs' = map (lookup_binding cx o fst) bs - in - cx - |> fold update_binding bs - |> f - ||> fold2 (fn (name, _) => update_binding o pair name) bs bs' - end - -fun next_id ({ctxt, id, syms, typs, funs, extra}: ('a, 'b) context) = - (id, mk_context ctxt (id + 1) syms typs funs extra) - -fun with_fresh_names f ({ctxt, id, syms, typs, funs, ...}: ('a, 'b) context) = - let - fun bind (_, v as (_, T)) t = Logic.all_const T $ Term.absfree v t - - val needs_inferT = equal Term.dummyT orf Term.is_TVar - val needs_infer = Term.exists_type (Term.exists_subtype needs_inferT) - fun infer_types ctxt = - singleton (Type_Infer_Context.infer_types ctxt) #> - singleton (Proof_Context.standard_term_check_finish ctxt) - fun infer ctxt t = if needs_infer t then infer_types ctxt t else t - - val (t, {ctxt = ctxt', extra = names, ...}: ((string * (string * typ)) list, 'b) context) = - f (mk_context ctxt id syms typs funs []) - val t' = infer ctxt' (fold_rev bind names (HOLogic.mk_Trueprop t)) - in - (t', map fst names) - end - -fun lookup_typ ({typs, ...}: ('a, 'b) context) = Symtab.lookup typs -fun lookup_fun ({funs, ...}: ('a, 'b) context) = Symtab.lookup funs - - -(* core type and term parser *) - -fun core_type_parser (SMTLIB.Sym "Bool", []) = SOME @{typ HOL.bool} - | core_type_parser (SMTLIB.Sym "Int", []) = SOME @{typ Int.int} - | core_type_parser _ = NONE - -fun mk_unary n t = - let val T = fastype_of t - in Const (n, T --> T) $ t end - -fun mk_binary' n T U t1 t2 = Const (n, [T, T] ---> U) $ t1 $ t2 - -fun mk_binary n t1 t2 = - let val T = fastype_of t1 - in mk_binary' n T T t1 t2 end - -fun mk_rassoc f t ts = - let val us = rev (t :: ts) - in fold f (tl us) (hd us) end - -fun mk_lassoc f t ts = fold (fn u1 => fn u2 => f u2 u1) ts t - -fun mk_lassoc' n = mk_lassoc (mk_binary n) - -fun mk_binary_pred n S t1 t2 = - let - val T1 = fastype_of t1 - val T2 = fastype_of t2 - val T = - if T1 <> Term.dummyT then T1 - else if T2 <> Term.dummyT then T2 - else TVar (("?a", serial ()), S) - in mk_binary' n T @{typ HOL.bool} t1 t2 end - -fun mk_less t1 t2 = mk_binary_pred @{const_name ord_class.less} @{sort linorder} t1 t2 -fun mk_less_eq t1 t2 = mk_binary_pred @{const_name ord_class.less_eq} @{sort linorder} t1 t2 - -fun core_term_parser (SMTLIB.Sym "true", _) = SOME @{const HOL.True} - | core_term_parser (SMTLIB.Sym "false", _) = SOME @{const HOL.False} - | core_term_parser (SMTLIB.Sym "not", [t]) = SOME (HOLogic.mk_not t) - | core_term_parser (SMTLIB.Sym "and", t :: ts) = SOME (mk_rassoc (curry HOLogic.mk_conj) t ts) - | core_term_parser (SMTLIB.Sym "or", t :: ts) = SOME (mk_rassoc (curry HOLogic.mk_disj) t ts) - | core_term_parser (SMTLIB.Sym "=>", [t1, t2]) = SOME (HOLogic.mk_imp (t1, t2)) - | core_term_parser (SMTLIB.Sym "implies", [t1, t2]) = SOME (HOLogic.mk_imp (t1, t2)) - | core_term_parser (SMTLIB.Sym "=", [t1, t2]) = SOME (HOLogic.mk_eq (t1, t2)) - | core_term_parser (SMTLIB.Sym "~", [t1, t2]) = SOME (HOLogic.mk_eq (t1, t2)) - | core_term_parser (SMTLIB.Sym "ite", [t1, t2, t3]) = - let - val T = fastype_of t2 - val c = Const (@{const_name HOL.If}, [@{typ HOL.bool}, T, T] ---> T) - in SOME (c $ t1 $ t2 $ t3) end - | core_term_parser (SMTLIB.Num i, []) = SOME (HOLogic.mk_number @{typ Int.int} i) - | core_term_parser (SMTLIB.Sym "-", [t]) = SOME (mk_unary @{const_name uminus_class.uminus} t) - | core_term_parser (SMTLIB.Sym "~", [t]) = SOME (mk_unary @{const_name uminus_class.uminus} t) - | core_term_parser (SMTLIB.Sym "+", t :: ts) = - SOME (mk_lassoc' @{const_name plus_class.plus} t ts) - | core_term_parser (SMTLIB.Sym "-", t :: ts) = - SOME (mk_lassoc' @{const_name minus_class.minus} t ts) - | core_term_parser (SMTLIB.Sym "*", t :: ts) = - SOME (mk_lassoc' @{const_name times_class.times} t ts) - | core_term_parser (SMTLIB.Sym "div", [t1, t2]) = SOME (mk_binary @{const_name z3div} t1 t2) - | core_term_parser (SMTLIB.Sym "mod", [t1, t2]) = SOME (mk_binary @{const_name z3mod} t1 t2) - | core_term_parser (SMTLIB.Sym "<", [t1, t2]) = SOME (mk_less t1 t2) - | core_term_parser (SMTLIB.Sym ">", [t1, t2]) = SOME (mk_less t2 t1) - | core_term_parser (SMTLIB.Sym "<=", [t1, t2]) = SOME (mk_less_eq t1 t2) - | core_term_parser (SMTLIB.Sym ">=", [t1, t2]) = SOME (mk_less_eq t2 t1) - | core_term_parser _ = NONE - - -(* custom type and term parsers *) - -type type_parser = SMTLIB.tree * typ list -> typ option - -type term_parser = SMTLIB.tree * term list -> term option - -fun id_ord ((id1, _), (id2, _)) = int_ord (id1, id2) - -structure Parsers = Generic_Data -( - type T = (int * type_parser) list * (int * term_parser) list - val empty : T = ([(serial (), core_type_parser)], [(serial (), core_term_parser)]) - val extend = I - fun merge ((tys1, ts1), (tys2, ts2)) = - (Ord_List.merge id_ord (tys1, tys2), Ord_List.merge id_ord (ts1, ts2)) -) - -fun add_type_parser type_parser = - Parsers.map (apfst (Ord_List.insert id_ord (serial (), type_parser))) - -fun add_term_parser term_parser = - Parsers.map (apsnd (Ord_List.insert id_ord (serial (), term_parser))) - -fun get_type_parsers ctxt = map snd (fst (Parsers.get (Context.Proof ctxt))) -fun get_term_parsers ctxt = map snd (snd (Parsers.get (Context.Proof ctxt))) - -fun apply_parsers parsers x = - let - fun apply [] = NONE - | apply (parser :: parsers) = - (case parser x of - SOME y => SOME y - | NONE => apply parsers) - in apply parsers end - - -(* type and term parsing *) - -exception SMTLIB_PARSE of string * SMTLIB.tree - -val desymbolize = Name.desymbolize (SOME false) o perhaps (try (unprefix "?")) - -fun fresh_fun add name n T ({ctxt, id, syms, typs, funs, extra}: ('a, 'b) context) = - let - val (n', ctxt') = yield_singleton Variable.variant_fixes n ctxt - val t = Free (n', T) - val funs' = Symtab.update (name, t) funs - in (t, mk_context ctxt' id syms typs funs' (add (n', T) extra)) end - -fun declare_fun name = snd oo fresh_fun cons name (desymbolize name) -fun declare_free name = fresh_fun (cons o pair name) name (desymbolize name) - -fun parse_type cx ty Ts = - (case apply_parsers (get_type_parsers (ctxt_of cx)) (ty, Ts) of - SOME T => T - | NONE => - (case ty of - SMTLIB.Sym name => - (case lookup_typ cx name of - SOME T => T - | NONE => raise SMTLIB_PARSE ("unknown SMT type", ty)) - | _ => raise SMTLIB_PARSE ("bad SMT type format", ty))) - -fun parse_term t ts cx = - (case apply_parsers (get_term_parsers (ctxt_of cx)) (t, ts) of - SOME u => (u, cx) - | NONE => - (case t of - SMTLIB.Sym name => - (case lookup_fun cx name of - SOME u => (Term.list_comb (u, ts), cx) - | NONE => - if null ts then declare_free name Term.dummyT cx - else raise SMTLIB_PARSE ("bad SMT term", t)) - | _ => raise SMTLIB_PARSE ("bad SMT term format", t))) - -fun type_of cx ty = - (case try (parse_type cx ty) [] of - SOME T => T - | NONE => - (case ty of - SMTLIB.S (ty' :: tys) => parse_type cx ty' (map (type_of cx) tys) - | _ => raise SMTLIB_PARSE ("bad SMT type", ty))) - -fun dest_var cx (SMTLIB.S [SMTLIB.Sym name, ty]) = (name, (desymbolize name, type_of cx ty)) - | dest_var _ v = raise SMTLIB_PARSE ("bad SMT quantifier variable format", v) - -fun dest_body (SMTLIB.S (SMTLIB.Sym "!" :: body :: _)) = dest_body body - | dest_body body = body - -fun dest_binding (SMTLIB.S [SMTLIB.Sym name, t]) = (name, Tree t) - | dest_binding b = raise SMTLIB_PARSE ("bad SMT let binding format", b) - -fun term_of t cx = - (case t of - SMTLIB.S [SMTLIB.Sym "forall", SMTLIB.S vars, body] => quant HOLogic.mk_all vars body cx - | SMTLIB.S [SMTLIB.Sym "exists", SMTLIB.S vars, body] => quant HOLogic.mk_exists vars body cx - | SMTLIB.S [SMTLIB.Sym "let", SMTLIB.S bindings, body] => - with_bindings (map dest_binding bindings) (term_of body) cx - | SMTLIB.S (SMTLIB.Sym "!" :: t :: _) => term_of t cx - | SMTLIB.S (f :: args) => - cx - |> fold_map term_of args - |-> parse_term f - | SMTLIB.Sym name => - (case lookup_binding cx name of - Tree u => - cx - |> term_of u - |-> (fn u' => pair u' o update_binding (name, Term u')) - | Term u => (u, cx) - | None => parse_term t [] cx - | _ => raise SMTLIB_PARSE ("bad SMT term format", t)) - | _ => parse_term t [] cx) - -and quant q vars body cx = - let val vs = map (dest_var cx) vars - in - cx - |> with_bindings (map (apsnd (Term o Free)) vs) (term_of (dest_body body)) - |>> fold_rev (fn (_, (n, T)) => fn t => q (n, T, t)) vs - end - -end; diff --git a/src/test-gen/SMT/verit_isar.ML b/src/test-gen/SMT/verit_isar.ML deleted file mode 100644 index 28ee6d9..0000000 --- a/src/test-gen/SMT/verit_isar.ML +++ /dev/null @@ -1,60 +0,0 @@ -(* Title: HOL/Tools/SMT/verit_isar.ML - Author: Mathias Fleury, TU Muenchen - Author: Jasmin Blanchette, TU Muenchen - -VeriT proofs as generic ATP proofs for Isar proof reconstruction. -*) - -signature VERIT_ISAR = -sig - type ('a, 'b) atp_step = ('a, 'b) ATP_Proof.atp_step - val atp_proof_of_veriT_proof: Proof.context -> term list -> thm list -> term list -> term -> - (string * term) list -> int list -> int -> (int * string) list -> VeriT_Proof.veriT_step list -> - (term, string) ATP_Proof.atp_step list -end; - -structure VeriT_Isar: VERIT_ISAR = -struct - -open ATP_Util -open ATP_Problem -open ATP_Proof -open ATP_Proof_Reconstruct -open SMTLIB_Interface -open SMTLIB_Isar -open VeriT_Proof - -fun atp_proof_of_veriT_proof ctxt ll_defs rewrite_rules hyp_ts concl_t fact_helper_ts prem_ids - conjecture_id fact_helper_ids = - let - fun steps_of (VeriT_Proof.VeriT_Step {id, rule, prems, concl, ...}) = - let - val concl' = postprocess_step_conclusion ctxt rewrite_rules ll_defs concl - fun standard_step role = ((id, []), role, concl', rule, map (rpair []) prems) - in - if rule = veriT_input_rule then - let - val id_num = the (Int.fromString (unprefix assert_prefix id)) - val ss = the_list (AList.lookup (op =) fact_helper_ids id_num) - in - (case distinguish_conjecture_and_hypothesis ss id_num conjecture_id prem_ids - fact_helper_ts hyp_ts concl_t of - NONE => [] - | SOME (role0, concl00) => - let - val name0 = (id ^ "a", ss) - val concl0 = unskolemize_names ctxt concl00 - in - [(name0, role0, concl0, rule, []), - ((id, []), Plain, concl', veriT_rewrite_rule, - name0 :: normalizing_prems ctxt concl0)] - end) - end - else - [standard_step (if null prems then Lemma else Plain)] - end - in - maps steps_of - end - -end; diff --git a/src/test-gen/SMT/verit_proof.ML b/src/test-gen/SMT/verit_proof.ML deleted file mode 100644 index 1dab112..0000000 --- a/src/test-gen/SMT/verit_proof.ML +++ /dev/null @@ -1,324 +0,0 @@ -(* Title: HOL/Tools/SMT/verit_proof.ML - Author: Mathias Fleury, ENS Rennes - Author: Sascha Boehme, TU Muenchen - -VeriT proofs: parsing and abstract syntax tree. -*) - -signature VERIT_PROOF = -sig - (*proofs*) - datatype veriT_step = VeriT_Step of { - id: string, - rule: string, - prems: string list, - concl: term, - fixes: string list} - - (*proof parser*) - val parse: typ Symtab.table -> term Symtab.table -> string list -> - Proof.context -> veriT_step list * Proof.context - - val veriT_step_prefix : string - val veriT_input_rule: string - val veriT_la_generic_rule : string - val veriT_rewrite_rule : string - val veriT_simp_arith_rule : string - val veriT_tmp_ite_elim_rule : string - val veriT_tmp_skolemize_rule : string -end; - -structure VeriT_Proof: VERIT_PROOF = -struct - -open SMTLIB_Proof - -datatype veriT_node = VeriT_Node of { - id: string, - rule: string, - prems: string list, - concl: term, - bounds: string list} - -fun mk_node id rule prems concl bounds = - VeriT_Node {id = id, rule = rule, prems = prems, concl = concl, bounds = bounds} - -datatype veriT_step = VeriT_Step of { - id: string, - rule: string, - prems: string list, - concl: term, - fixes: string list} - -fun mk_step id rule prems concl fixes = - VeriT_Step {id = id, rule = rule, prems = prems, concl = concl, fixes = fixes} - -val veriT_step_prefix = ".c" -val veriT_input_rule = "input" -val veriT_la_generic_rule = "la_generic" -val veriT_rewrite_rule = "__rewrite" (* arbitrary *) -val veriT_simp_arith_rule = "simp_arith" -val veriT_tmp_alphaconv_rule = "tmp_alphaconv" -val veriT_tmp_ite_elim_rule = "tmp_ite_elim" -val veriT_tmp_skolemize_rule = "tmp_skolemize" - -(* proof parser *) - -fun node_of p cx = - ([], cx) - ||>> `(with_fresh_names (term_of p)) - |>> snd - -(*in order to get Z3-style quantification*) -fun repair_quantification (SMTLIB.S (SMTLIB.Sym "forall" :: l)) = - let val (quantified_vars, t) = split_last (map repair_quantification l) - in - SMTLIB.S (SMTLIB.Sym "forall" :: SMTLIB.S quantified_vars :: t :: []) - end - | repair_quantification (SMTLIB.S (SMTLIB.Sym "exists" :: l)) = - let val (quantified_vars, t) = split_last (map repair_quantification l) - in - SMTLIB.S (SMTLIB.Sym "exists" :: SMTLIB.S quantified_vars :: t :: []) - end - | repair_quantification (SMTLIB.S l) = SMTLIB.S (map repair_quantification l) - | repair_quantification x = x - -fun replace_bound_var_by_free_var (q $ Abs (var, ty, u)) free_var = - (case List.find (fn v => String.isPrefix v var) free_var of - NONE => q $ Abs (var, ty, replace_bound_var_by_free_var u free_var) - | SOME _ => replace_bound_var_by_free_var (Term.subst_bound (Free (var, ty), u)) free_var) - | replace_bound_var_by_free_var (u $ v) free_vars = replace_bound_var_by_free_var u free_vars $ - replace_bound_var_by_free_var v free_vars - | replace_bound_var_by_free_var u _ = u - -fun find_type_in_formula (Abs (v, T, u)) var_name = - if String.isPrefix var_name v then SOME T else find_type_in_formula u var_name - | find_type_in_formula (u $ v) var_name = - (case find_type_in_formula u var_name of - NONE => find_type_in_formula v var_name - | some_T => some_T) - | find_type_in_formula _ _ = NONE - -fun add_bound_variables_to_ctxt concl = - fold (update_binding o - (fn s => (s, Term (Free (s, the_default dummyT (find_type_in_formula concl s)))))) - -fun update_step_and_cx (node as VeriT_Node {id, rule, prems, concl, bounds}) cx = - if rule = veriT_tmp_ite_elim_rule then - (mk_node id rule prems concl bounds, add_bound_variables_to_ctxt concl bounds cx) - else if rule = veriT_tmp_skolemize_rule then - let val concl' = replace_bound_var_by_free_var concl bounds in - (mk_node id rule prems concl' [], add_bound_variables_to_ctxt concl bounds cx) - end - else - (node, cx) - -fun fix_subproof_steps ((((id_of_father_step, rule), prems), subproof), ((step_concl, bounds), - cx)) = - let - fun mk_prop_of_term concl = - concl |> fastype_of concl = @{typ bool} ? curry (op $) @{term Trueprop} - fun update_prems assumption_id prems = - map (fn prem => id_of_father_step ^ prem) - (filter_out (curry (op =) assumption_id) prems) - fun inline_assumption assumption assumption_id - (VeriT_Node {id, rule, prems, concl, bounds}) = - mk_node id rule (update_prems assumption_id prems) - (@{const Pure.imp} $ mk_prop_of_term assumption $ mk_prop_of_term concl) bounds - fun find_input_steps_and_inline [] last_step = ([], last_step) - | find_input_steps_and_inline (VeriT_Node {id = id', rule, prems, concl, bounds} :: steps) - last_step = - if rule = veriT_input_rule then - find_input_steps_and_inline (map (inline_assumption concl id') steps) last_step - else - apfst (cons (mk_node (id_of_father_step ^ id') rule prems concl bounds)) - (find_input_steps_and_inline steps (id_of_father_step ^ id')) - val (subproof', last_step_id) = find_input_steps_and_inline subproof "" - val prems' = - if last_step_id = "" then - prems - else - (case prems of - NONE => SOME [last_step_id] - | SOME l => SOME (last_step_id :: l)) - in - (subproof', (((((id_of_father_step, rule), prems'), step_concl), bounds), cx)) - end - -(* -(set id rule :clauses(...) :args(..) :conclusion (...)). -or -(set id subproof (set ...) :conclusion (...)). -*) - -fun parse_proof_step cx = - let - fun rotate_pair (a, (b, c)) = ((a, b), c) - fun get_id (SMTLIB.S [SMTLIB.Sym "set", SMTLIB.Sym id, SMTLIB.S l]) = (id, l) - | get_id t = raise Fail ("unrecognized VeriT proof " ^ @{make_string} t) - fun parse_rule (SMTLIB.Sym rule :: l) = (rule, l) - fun parse_source (SMTLIB.Key "clauses" :: SMTLIB.S source ::l) = - (SOME (map (fn (SMTLIB.Sym id) => id) source), l) - | parse_source l = (NONE, l) - fun parse_subproof cx id_of_father_step ((subproof_step as SMTLIB.S (SMTLIB.Sym "set" :: _)) :: l) = - let val (subproof_steps, cx') = parse_proof_step cx subproof_step in - apfst (apfst (curry (op @) subproof_steps)) (parse_subproof cx' id_of_father_step l) - end - | parse_subproof cx _ l = (([], cx), l) - fun skip_args (SMTLIB.Key "args" :: SMTLIB.S _ :: l) = l - | skip_args l = l - fun parse_conclusion (SMTLIB.Key "conclusion" :: SMTLIB.S concl :: []) = concl - fun make_or_from_clausification l = - foldl1 (fn ((concl1, bounds1), (concl2, bounds2)) => - (HOLogic.mk_disj (perhaps (try HOLogic.dest_Trueprop) concl1, - perhaps (try HOLogic.dest_Trueprop) concl2), bounds1 @ bounds2)) l - fun to_node (((((id, rule), prems), concl), bounds), cx) = - (mk_node id rule (the_default [] prems) concl bounds, cx) - in - get_id - ##> parse_rule - #> rotate_pair - ##> parse_source - #> rotate_pair - ##> skip_args - #> (fn (((id, rule), prems), sub) => (((id, rule), prems), parse_subproof cx id sub)) - #> rotate_pair - ##> parse_conclusion - ##> map repair_quantification - #> (fn ((((id, rule), prems), (subproof, cx)), terms) => - (((((id, rule), prems), subproof), fold_map (fn t => fn cx => node_of t cx) terms cx))) - ##> apfst (fn [] => (@{const False}, []) | concls => make_or_from_clausification concls) - #> fix_subproof_steps - ##> to_node - #> (fn (subproof, (step, cx)) => (subproof @ [step], cx)) - #-> fold_map update_step_and_cx - end - -(*subproofs are written on multiple lines: SMTLIB can not parse then, because parentheses are -unbalanced on each line*) -fun seperate_into_steps lines = - let - fun count ("(" :: l) n = count l (n + 1) - | count (")" :: l) n = count l (n - 1) - | count (_ :: l) n = count l n - | count [] n = n - fun seperate (line :: l) actual_lines m = - let val n = count (raw_explode line) 0 in - if m + n = 0 then - [actual_lines ^ line] :: seperate l "" 0 - else - seperate l (actual_lines ^ line) (m + n) - end - | seperate [] _ 0 = [] - in - seperate lines "" 0 - end - -(* VeriT adds "@" before every variable. *) -fun remove_all_at (SMTLIB.Sym v :: l) = - SMTLIB.Sym (perhaps (try (unprefix "@")) v) :: remove_all_at l - | remove_all_at (SMTLIB.S l :: l') = SMTLIB.S (remove_all_at l) :: remove_all_at l' - | remove_all_at (SMTLIB.Key v :: l) = SMTLIB.Key v :: remove_all_at l - | remove_all_at (v :: l) = v :: remove_all_at l - | remove_all_at [] = [] - -fun find_in_which_step_defined var (VeriT_Node {id, bounds, ...} :: l) = - (case List.find (fn v => String.isPrefix v var) bounds of - NONE => find_in_which_step_defined var l - | SOME _ => id) - | find_in_which_step_defined var _ = raise Fail ("undefined " ^ var) - -(*Yes every case is possible: the introduced var is not on a special size of the equality sign.*) -fun find_ite_var_in_term (Const (@{const_name If}, _) $ _ $ - (Const (@{const_name HOL.eq}, _) $ Free (var1, _) $ Free (var2, _) ) $ - (Const (@{const_name HOL.eq}, _) $ Free (var3, _) $ Free (var4, _) )) = - let - fun get_number_of_ite_transformed_var var = - perhaps (try (unprefix "ite")) var - |> Int.fromString - fun is_equal_and_has_correct_substring var var' var'' = - if var = var' andalso String.isPrefix "ite" var then SOME var' - else if var = var'' andalso String.isPrefix "ite" var then SOME var'' else NONE - val var1_introduced_var = is_equal_and_has_correct_substring var1 var3 var4 - val var2_introduced_var = is_equal_and_has_correct_substring var3 var1 var2 - in - (case (var1_introduced_var, var2_introduced_var) of - (SOME a, SOME b) => - (*ill-generated case, might be possible when applying the rule to max a a. Only if the - variable have been introduced before. Probably an impossible edge case*) - (case (get_number_of_ite_transformed_var a, get_number_of_ite_transformed_var b) of - (SOME a, SOME b) => if a < b then var2_introduced_var else var1_introduced_var - (*Otherwise, it is a name clase between a parameter name and the introduced variable. - Or the name convention has been changed.*) - | (NONE, SOME _) => var2_introduced_var - | (SOME _, NONE) => var2_introduced_var) - | (_, SOME _) => var2_introduced_var - | (SOME _, _) => var1_introduced_var) - end - | find_ite_var_in_term (Const (@{const_name If}, _) $ _ $ - (Const (@{const_name HOL.eq}, _) $ Free (var, _) $ _ ) $ - (Const (@{const_name HOL.eq}, _) $ Free (var', _) $ _ )) = - if var = var' then SOME var else NONE - | find_ite_var_in_term (Const (@{const_name If}, _) $ _ $ - (Const (@{const_name HOL.eq}, _) $ _ $ Free (var, _)) $ - (Const (@{const_name HOL.eq}, _) $ _ $ Free (var', _))) = - if var = var' then SOME var else NONE - | find_ite_var_in_term (p $ q) = - (case find_ite_var_in_term p of - NONE => find_ite_var_in_term q - | x => x) - | find_ite_var_in_term (Abs (_, _, body)) = find_ite_var_in_term body - | find_ite_var_in_term _ = NONE - -fun correct_veriT_step steps (node as VeriT_Node {id, rule, prems, concl, bounds}) = - if rule = veriT_tmp_ite_elim_rule then - if bounds = [] then - (*if the introduced var has already been defined, adding the definition as a dependency*) - let - val new_prems = prems - |> (case find_ite_var_in_term concl of - NONE => I - | SOME var => cons (find_in_which_step_defined var steps)) - in - VeriT_Node {id = id, rule = rule, prems = new_prems, concl = concl, bounds = bounds} - end - else - (*some new variables are created*) - let val concl' = replace_bound_var_by_free_var concl bounds in - mk_node id rule prems concl' [] - end - else - node - -fun remove_alpha_conversion _ [] = [] - | remove_alpha_conversion replace_table (VeriT_Node {id, rule, prems, concl, bounds} :: steps) = - let - val correct_dependency = map (perhaps (Symtab.lookup replace_table)) - val find_predecessor = perhaps (Symtab.lookup replace_table) - in - if rule = veriT_tmp_alphaconv_rule then - remove_alpha_conversion (Symtab.update (id, find_predecessor (hd prems)) - replace_table) steps - else - VeriT_Node {id = id, rule = rule, prems = correct_dependency prems, - concl = concl, bounds = bounds} :: remove_alpha_conversion replace_table steps - end - -fun correct_veriT_steps steps = - steps - |> map (correct_veriT_step steps) - |> remove_alpha_conversion Symtab.empty - -fun parse typs funs lines ctxt = - let - val smtlib_lines_without_at = remove_all_at (map SMTLIB.parse (seperate_into_steps lines)) - val (u, env) = apfst flat (fold_map (fn l => fn cx => parse_proof_step cx l) - smtlib_lines_without_at (empty_context ctxt typs funs)) - val t = correct_veriT_steps u - fun node_to_step (VeriT_Node {id, rule, prems, concl, bounds, ...}) = - mk_step id rule prems concl bounds - in - (map node_to_step t, ctxt_of env) - end - -end; diff --git a/src/test-gen/SMT/verit_proof_parse.ML b/src/test-gen/SMT/verit_proof_parse.ML deleted file mode 100644 index cddc609..0000000 --- a/src/test-gen/SMT/verit_proof_parse.ML +++ /dev/null @@ -1,78 +0,0 @@ -(* Title: HOL/Tools/SMT/verit_proof_parse.ML - Author: Mathias Fleury, TU Muenchen - Author: Jasmin Blanchette, TU Muenchen - -VeriT proof parsing. -*) - -signature VERIT_PROOF_PARSE = -sig - type ('a, 'b) atp_step = ('a, 'b) ATP_Proof.atp_step - val parse_proof: SMT_Translate.replay_data -> - ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> - SMT_Solver.parsed_proof -end; - -structure VeriT_Proof_Parse: VERIT_PROOF_PARSE = -struct - -open ATP_Util -open ATP_Problem -open ATP_Proof -open ATP_Proof_Reconstruct -open VeriT_Isar -open VeriT_Proof - -fun add_used_asserts_in_step (VeriT_Proof.VeriT_Step {prems, ...}) = - union (op =) (map_filter (try SMTLIB_Interface.assert_index_of_name) prems) - -fun parse_proof - ({context = ctxt, typs, terms, ll_defs, rewrite_rules, assms} : SMT_Translate.replay_data) - xfacts prems concl output = - let - val num_ll_defs = length ll_defs - - val id_of_index = Integer.add num_ll_defs - val index_of_id = Integer.add (~ num_ll_defs) - - fun step_of_assume j (_, th) = - VeriT_Proof.VeriT_Step {id = SMTLIB_Interface.assert_name_of_index (id_of_index j), - rule = veriT_input_rule, prems = [], concl = Thm.prop_of th, fixes = []} - - val (actual_steps, _) = VeriT_Proof.parse typs terms output ctxt - val used_assert_ids = fold add_used_asserts_in_step actual_steps [] - val used_assm_js = - map_filter (fn id => let val i = index_of_id id in if i >= 0 then SOME i else NONE end) - used_assert_ids - val used_assms = map (nth assms) used_assm_js - val assm_steps = map2 step_of_assume used_assm_js used_assms - val steps = assm_steps @ actual_steps - - val conjecture_i = 0 - val prems_i = conjecture_i + 1 - val num_prems = length prems - val facts_i = prems_i + num_prems - val num_facts = length xfacts - val helpers_i = facts_i + num_facts - - val conjecture_id = id_of_index conjecture_i - val prem_ids = map id_of_index (prems_i upto prems_i + num_prems - 1) - val fact_ids' = - map_filter (fn j => - let val (i, _) = nth assms j in - try (apsnd (nth xfacts)) (id_of_index j, i - facts_i) - end) used_assm_js - val helper_ids' = filter (fn (i, _) => i >= helpers_i) used_assms - - val fact_helper_ts = - map (fn (_, th) => (ATP_Util.short_thm_name ctxt th, Thm.prop_of th)) helper_ids' @ - map (fn (_, ((s, _), th)) => (s, Thm.prop_of th)) fact_ids' - val fact_helper_ids' = - map (apsnd (ATP_Util.short_thm_name ctxt)) helper_ids' @ map (apsnd (fst o fst)) fact_ids' - in - {outcome = NONE, fact_ids = SOME fact_ids', - atp_proof = fn () => atp_proof_of_veriT_proof ctxt ll_defs rewrite_rules prems concl - fact_helper_ts prem_ids conjecture_id fact_helper_ids' steps} - end - -end; diff --git a/src/test-gen/SMT/z3_interface.ML b/src/test-gen/SMT/z3_interface.ML deleted file mode 100644 index 588458a..0000000 --- a/src/test-gen/SMT/z3_interface.ML +++ /dev/null @@ -1,192 +0,0 @@ -(* Title: HOL/Tools/SMT/z3_interface.ML - Author: Sascha Boehme, TU Muenchen - -Interface to Z3 based on a relaxed version of SMT-LIB. -*) - -signature Z3_INTERFACE = -sig - val smtlib_z3C: SMT_Util.class - - datatype sym = Sym of string * sym list - type mk_builtins = { - mk_builtin_typ: sym -> typ option, - mk_builtin_num: theory -> int -> typ -> cterm option, - mk_builtin_fun: theory -> sym -> cterm list -> cterm option } - val add_mk_builtins: mk_builtins -> Context.generic -> Context.generic - val mk_builtin_typ: Proof.context -> sym -> typ option - val mk_builtin_num: Proof.context -> int -> typ -> cterm option - val mk_builtin_fun: Proof.context -> sym -> cterm list -> cterm option - - val is_builtin_theory_term: Proof.context -> term -> bool -end; - -structure Z3_Interface: Z3_INTERFACE = -struct - -val smtlib_z3C = SMTLIB_Interface.smtlibC @ ["z3"] - - -(* interface *) - -local - fun translate_config ctxt = - {logic = K "", fp_kinds = [BNF_Util.Least_FP], - serialize = #serialize (SMTLIB_Interface.translate_config ctxt)} - - fun is_div_mod @{const divide (int)} = true - | is_div_mod @{const modulo (int)} = true - | is_div_mod _ = false - - val have_int_div_mod = exists (Term.exists_subterm is_div_mod o Thm.prop_of) - - fun add_div_mod _ (thms, extra_thms) = - if have_int_div_mod thms orelse have_int_div_mod extra_thms then - (thms, @{thms div_as_z3div mod_as_z3mod} @ extra_thms) - else (thms, extra_thms) - - val setup_builtins = - SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const times (int)}, "*") #> - SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const z3div}, "div") #> - SMT_Builtin.add_builtin_fun' smtlib_z3C (@{const z3mod}, "mod") -in - -val _ = Theory.setup (Context.theory_map ( - setup_builtins #> - SMT_Normalize.add_extra_norm (smtlib_z3C, add_div_mod) #> - SMT_Translate.add_config (smtlib_z3C, translate_config))) - -end - - -(* constructors *) - -datatype sym = Sym of string * sym list - - -(** additional constructors **) - -type mk_builtins = { - mk_builtin_typ: sym -> typ option, - mk_builtin_num: theory -> int -> typ -> cterm option, - mk_builtin_fun: theory -> sym -> cterm list -> cterm option } - -fun chained _ [] = NONE - | chained f (b :: bs) = (case f b of SOME y => SOME y | NONE => chained f bs) - -fun chained_mk_builtin_typ bs sym = - chained (fn {mk_builtin_typ=mk, ...} : mk_builtins => mk sym) bs - -fun chained_mk_builtin_num ctxt bs i T = - let val thy = Proof_Context.theory_of ctxt - in chained (fn {mk_builtin_num=mk, ...} : mk_builtins => mk thy i T) bs end - -fun chained_mk_builtin_fun ctxt bs s cts = - let val thy = Proof_Context.theory_of ctxt - in chained (fn {mk_builtin_fun=mk, ...} : mk_builtins => mk thy s cts) bs end - -fun fst_int_ord ((i1, _), (i2, _)) = int_ord (i1, i2) - -structure Mk_Builtins = Generic_Data -( - type T = (int * mk_builtins) list - val empty = [] - val extend = I - fun merge data = Ord_List.merge fst_int_ord data -) - -fun add_mk_builtins mk = Mk_Builtins.map (Ord_List.insert fst_int_ord (serial (), mk)) - -fun get_mk_builtins ctxt = map snd (Mk_Builtins.get (Context.Proof ctxt)) - - -(** basic and additional constructors **) - -fun mk_builtin_typ _ (Sym ("Bool", _)) = SOME @{typ bool} - | mk_builtin_typ _ (Sym ("Int", _)) = SOME @{typ int} - | mk_builtin_typ _ (Sym ("bool", _)) = SOME @{typ bool} (*FIXME: legacy*) - | mk_builtin_typ _ (Sym ("int", _)) = SOME @{typ int} (*FIXME: legacy*) - | mk_builtin_typ ctxt sym = chained_mk_builtin_typ (get_mk_builtins ctxt) sym - -fun mk_builtin_num _ i @{typ int} = SOME (Numeral.mk_cnumber @{ctyp int} i) - | mk_builtin_num ctxt i T = - chained_mk_builtin_num ctxt (get_mk_builtins ctxt) i T - -val mk_true = Thm.cterm_of @{context} (@{const Not} $ @{const False}) -val mk_false = Thm.cterm_of @{context} @{const False} -val mk_not = Thm.apply (Thm.cterm_of @{context} @{const Not}) -val mk_implies = Thm.mk_binop (Thm.cterm_of @{context} @{const HOL.implies}) -val mk_iff = Thm.mk_binop (Thm.cterm_of @{context} @{const HOL.eq (bool)}) -val conj = Thm.cterm_of @{context} @{const HOL.conj} -val disj = Thm.cterm_of @{context} @{const HOL.disj} - -fun mk_nary _ cu [] = cu - | mk_nary ct _ cts = uncurry (fold_rev (Thm.mk_binop ct)) (split_last cts) - -val eq = SMT_Util.mk_const_pat @{theory} @{const_name HOL.eq} SMT_Util.destT1 -fun mk_eq ct cu = Thm.mk_binop (SMT_Util.instT' ct eq) ct cu - -val if_term = - SMT_Util.mk_const_pat @{theory} @{const_name If} (SMT_Util.destT1 o SMT_Util.destT2) -fun mk_if cc ct = Thm.mk_binop (Thm.apply (SMT_Util.instT' ct if_term) cc) ct - -val access = SMT_Util.mk_const_pat @{theory} @{const_name fun_app} SMT_Util.destT1 -fun mk_access array = Thm.apply (SMT_Util.instT' array access) array - -val update = - SMT_Util.mk_const_pat @{theory} @{const_name fun_upd} (Thm.dest_ctyp o SMT_Util.destT1) -fun mk_update array index value = - let val cTs = Thm.dest_ctyp (Thm.ctyp_of_cterm array) - in Thm.apply (Thm.mk_binop (SMT_Util.instTs cTs update) array index) value end - -val mk_uminus = Thm.apply (Thm.cterm_of @{context} @{const uminus (int)}) -val add = Thm.cterm_of @{context} @{const plus (int)} -val int0 = Numeral.mk_cnumber @{ctyp int} 0 -val mk_sub = Thm.mk_binop (Thm.cterm_of @{context} @{const minus (int)}) -val mk_mul = Thm.mk_binop (Thm.cterm_of @{context} @{const times (int)}) -val mk_div = Thm.mk_binop (Thm.cterm_of @{context} @{const z3div}) -val mk_mod = Thm.mk_binop (Thm.cterm_of @{context} @{const z3mod}) -val mk_lt = Thm.mk_binop (Thm.cterm_of @{context} @{const less (int)}) -val mk_le = Thm.mk_binop (Thm.cterm_of @{context} @{const less_eq (int)}) - -fun mk_builtin_fun ctxt sym cts = - (case (sym, cts) of - (Sym ("true", _), []) => SOME mk_true - | (Sym ("false", _), []) => SOME mk_false - | (Sym ("not", _), [ct]) => SOME (mk_not ct) - | (Sym ("and", _), _) => SOME (mk_nary conj mk_true cts) - | (Sym ("or", _), _) => SOME (mk_nary disj mk_false cts) - | (Sym ("implies", _), [ct, cu]) => SOME (mk_implies ct cu) - | (Sym ("iff", _), [ct, cu]) => SOME (mk_iff ct cu) - | (Sym ("~", _), [ct, cu]) => SOME (mk_iff ct cu) - | (Sym ("xor", _), [ct, cu]) => SOME (mk_not (mk_iff ct cu)) - | (Sym ("if", _), [ct1, ct2, ct3]) => SOME (mk_if ct1 ct2 ct3) - | (Sym ("ite", _), [ct1, ct2, ct3]) => SOME (mk_if ct1 ct2 ct3) (* FIXME: remove *) - | (Sym ("=", _), [ct, cu]) => SOME (mk_eq ct cu) - | (Sym ("select", _), [ca, ck]) => SOME (Thm.apply (mk_access ca) ck) - | (Sym ("store", _), [ca, ck, cv]) => SOME (mk_update ca ck cv) - | _ => - (case (sym, try (Thm.typ_of_cterm o hd) cts, cts) of - (Sym ("+", _), SOME @{typ int}, _) => SOME (mk_nary add int0 cts) - | (Sym ("-", _), SOME @{typ int}, [ct]) => SOME (mk_uminus ct) - | (Sym ("-", _), SOME @{typ int}, [ct, cu]) => SOME (mk_sub ct cu) - | (Sym ("*", _), SOME @{typ int}, [ct, cu]) => SOME (mk_mul ct cu) - | (Sym ("div", _), SOME @{typ int}, [ct, cu]) => SOME (mk_div ct cu) - | (Sym ("mod", _), SOME @{typ int}, [ct, cu]) => SOME (mk_mod ct cu) - | (Sym ("<", _), SOME @{typ int}, [ct, cu]) => SOME (mk_lt ct cu) - | (Sym ("<=", _), SOME @{typ int}, [ct, cu]) => SOME (mk_le ct cu) - | (Sym (">", _), SOME @{typ int}, [ct, cu]) => SOME (mk_lt cu ct) - | (Sym (">=", _), SOME @{typ int}, [ct, cu]) => SOME (mk_le cu ct) - | _ => chained_mk_builtin_fun ctxt (get_mk_builtins ctxt) sym cts)) - - -(* abstraction *) - -fun is_builtin_theory_term ctxt t = - if SMT_Builtin.is_builtin_num ctxt t then true - else - (case Term.strip_comb t of - (Const c, ts) => SMT_Builtin.is_builtin_fun ctxt c ts - | _ => false) - -end; diff --git a/src/test-gen/SMT/z3_isar.ML b/src/test-gen/SMT/z3_isar.ML deleted file mode 100644 index 5b73931..0000000 --- a/src/test-gen/SMT/z3_isar.ML +++ /dev/null @@ -1,120 +0,0 @@ -(* Title: HOL/Tools/SMT/z3_isar.ML - Author: Jasmin Blanchette, TU Muenchen - -Z3 proofs as generic ATP proofs for Isar proof reconstruction. -*) - -signature Z3_ISAR = -sig - val atp_proof_of_z3_proof: Proof.context -> term list -> thm list -> term list -> term -> - (string * term) list -> int list -> int -> (int * string) list -> Z3_Proof.z3_step list -> - (term, string) ATP_Proof.atp_step list -end; - -structure Z3_Isar: Z3_ISAR = -struct - -open ATP_Util -open ATP_Problem -open ATP_Proof -open ATP_Proof_Reconstruct -open SMTLIB_Isar - -val z3_apply_def_rule = Z3_Proof.string_of_rule Z3_Proof.Apply_Def -val z3_hypothesis_rule = Z3_Proof.string_of_rule Z3_Proof.Hypothesis -val z3_intro_def_rule = Z3_Proof.string_of_rule Z3_Proof.Intro_Def -val z3_lemma_rule = Z3_Proof.string_of_rule Z3_Proof.Lemma - -fun inline_z3_defs _ [] = [] - | inline_z3_defs defs ((name, role, t, rule, deps) :: lines) = - if rule = z3_intro_def_rule then - let val def = t |> HOLogic.dest_Trueprop |> HOLogic.dest_eq |> swap in - inline_z3_defs (insert (op =) def defs) - (map (replace_dependencies_in_line (name, [])) lines) - end - else if rule = z3_apply_def_rule then - inline_z3_defs defs (map (replace_dependencies_in_line (name, [])) lines) - else - (name, role, Term.subst_atomic defs t, rule, deps) :: inline_z3_defs defs lines - -fun add_z3_hypotheses [] = I - | add_z3_hypotheses hyps = - HOLogic.dest_Trueprop - #> curry s_imp (Library.foldr1 s_conj (map HOLogic.dest_Trueprop hyps)) - #> HOLogic.mk_Trueprop - -fun inline_z3_hypotheses _ _ [] = [] - | inline_z3_hypotheses hyp_names hyps ((name, role, t, rule, deps) :: lines) = - if rule = z3_hypothesis_rule then - inline_z3_hypotheses (name :: hyp_names) (AList.map_default (op =) (t, []) (cons name) hyps) - lines - else - let val deps' = subtract (op =) hyp_names deps in - if rule = z3_lemma_rule then - (name, role, t, rule, deps') :: inline_z3_hypotheses hyp_names hyps lines - else - let - val add_hyps = filter_out (null o inter (op =) deps o snd) hyps - val t' = add_z3_hypotheses (map fst add_hyps) t - val hyps' = fold (AList.update (op =) o apsnd (insert (op =) name)) add_hyps hyps - in - (name, role, t', rule, deps') :: inline_z3_hypotheses hyp_names hyps' lines - end - end - -fun dest_alls (Const (@{const_name Pure.all}, _) $ Abs (abs as (_, T, _))) = - let val (s', t') = Term.dest_abs abs in - dest_alls t' |>> cons (s', T) - end - | dest_alls t = ([], t) - -val reorder_foralls = - dest_alls - #>> sort_by fst - #-> fold_rev (Logic.all o Free); - -fun atp_proof_of_z3_proof ctxt ll_defs rewrite_rules hyp_ts concl_t fact_helper_ts prem_ids - conjecture_id fact_helper_ids proof = - let - fun steps_of (Z3_Proof.Z3_Step {id, rule, prems, concl, ...}) = - let - val sid = string_of_int id - - val concl' = concl - |> reorder_foralls (* crucial for skolemization steps *) - |> postprocess_step_conclusion ctxt rewrite_rules ll_defs - fun standard_step role = - ((sid, []), role, concl', Z3_Proof.string_of_rule rule, - map (fn id => (string_of_int id, [])) prems) - in - (case rule of - Z3_Proof.Asserted => - let val ss = the_list (AList.lookup (op =) fact_helper_ids id) in - (case distinguish_conjecture_and_hypothesis ss id conjecture_id prem_ids fact_helper_ts - hyp_ts concl_t of - NONE => [] - | SOME (role0, concl00) => - let - val name0 = (sid ^ "a", ss) - val concl0 = unskolemize_names ctxt concl00 - in - (if role0 = Axiom then [] - else [(name0, role0, concl0, Z3_Proof.string_of_rule rule, [])]) @ - [((sid, []), Plain, concl', Z3_Proof.string_of_rule Z3_Proof.Rewrite, - name0 :: normalizing_prems ctxt concl0)] - end) - end - | Z3_Proof.Rewrite => [standard_step Lemma] - | Z3_Proof.Rewrite_Star => [standard_step Lemma] - | Z3_Proof.Skolemize => [standard_step Lemma] - | Z3_Proof.Th_Lemma _ => [standard_step Lemma] - | _ => [standard_step Plain]) - end - in - proof - |> maps steps_of - |> inline_z3_defs [] - |> inline_z3_hypotheses [] [] - end - -end; diff --git a/src/test-gen/SMT/z3_proof.ML b/src/test-gen/SMT/z3_proof.ML deleted file mode 100644 index 2c3ab4e..0000000 --- a/src/test-gen/SMT/z3_proof.ML +++ /dev/null @@ -1,303 +0,0 @@ -(* Title: HOL/Tools/SMT/z3_proof.ML - Author: Sascha Boehme, TU Muenchen - -Z3 proofs: parsing and abstract syntax tree. -*) - -signature Z3_PROOF = -sig - (*proof rules*) - datatype z3_rule = - True_Axiom | Asserted | Goal | Modus_Ponens | Reflexivity | Symmetry | Transitivity | - Transitivity_Star | Monotonicity | Quant_Intro | Distributivity | And_Elim | Not_Or_Elim | - Rewrite | Rewrite_Star | Pull_Quant | Pull_Quant_Star | Push_Quant | Elim_Unused_Vars | - Dest_Eq_Res | Quant_Inst | Hypothesis | Lemma | Unit_Resolution | Iff_True | Iff_False | - Commutativity | Def_Axiom | Intro_Def | Apply_Def | Iff_Oeq | Nnf_Pos | Nnf_Neg | Nnf_Star | - Cnf_Star | Skolemize | Modus_Ponens_Oeq | Th_Lemma of string - - val is_assumption: z3_rule -> bool - val string_of_rule: z3_rule -> string - - (*proofs*) - datatype z3_step = Z3_Step of { - id: int, - rule: z3_rule, - prems: int list, - concl: term, - fixes: string list, - is_fix_step: bool} - - (*proof parser*) - val parse: typ Symtab.table -> term Symtab.table -> string list -> - Proof.context -> z3_step list * Proof.context -end; - -structure Z3_Proof: Z3_PROOF = -struct - -open SMTLIB_Proof - - -(* proof rules *) - -datatype z3_rule = - True_Axiom | Asserted | Goal | Modus_Ponens | Reflexivity | Symmetry | Transitivity | - Transitivity_Star | Monotonicity | Quant_Intro | Distributivity | And_Elim | Not_Or_Elim | - Rewrite | Rewrite_Star | Pull_Quant | Pull_Quant_Star | Push_Quant | Elim_Unused_Vars | - Dest_Eq_Res | Quant_Inst | Hypothesis | Lemma | Unit_Resolution | Iff_True | Iff_False | - Commutativity | Def_Axiom | Intro_Def | Apply_Def | Iff_Oeq | Nnf_Pos | Nnf_Neg | Nnf_Star | - Cnf_Star | Skolemize | Modus_Ponens_Oeq | Th_Lemma of string - (* some proof rules include further information that is currently dropped by the parser *) - -val rule_names = Symtab.make [ - ("true-axiom", True_Axiom), - ("asserted", Asserted), - ("goal", Goal), - ("mp", Modus_Ponens), - ("refl", Reflexivity), - ("symm", Symmetry), - ("trans", Transitivity), - ("trans*", Transitivity_Star), - ("monotonicity", Monotonicity), - ("quant-intro", Quant_Intro), - ("distributivity", Distributivity), - ("and-elim", And_Elim), - ("not-or-elim", Not_Or_Elim), - ("rewrite", Rewrite), - ("rewrite*", Rewrite_Star), - ("pull-quant", Pull_Quant), - ("pull-quant*", Pull_Quant_Star), - ("push-quant", Push_Quant), - ("elim-unused", Elim_Unused_Vars), - ("der", Dest_Eq_Res), - ("quant-inst", Quant_Inst), - ("hypothesis", Hypothesis), - ("lemma", Lemma), - ("unit-resolution", Unit_Resolution), - ("iff-true", Iff_True), - ("iff-false", Iff_False), - ("commutativity", Commutativity), - ("def-axiom", Def_Axiom), - ("intro-def", Intro_Def), - ("apply-def", Apply_Def), - ("iff~", Iff_Oeq), - ("nnf-pos", Nnf_Pos), - ("nnf-neg", Nnf_Neg), - ("nnf*", Nnf_Star), - ("cnf*", Cnf_Star), - ("sk", Skolemize), - ("mp~", Modus_Ponens_Oeq)] - -fun is_assumption Asserted = true - | is_assumption Goal = true - | is_assumption Hypothesis = true - | is_assumption Intro_Def = true - | is_assumption Skolemize = true - | is_assumption _ = false - -fun rule_of_string name = - (case Symtab.lookup rule_names name of - SOME rule => rule - | NONE => error ("unknown Z3 proof rule " ^ quote name)) - -fun string_of_rule (Th_Lemma kind) = "th-lemma" ^ (if kind = "" then "" else " " ^ kind) - | string_of_rule r = - let fun eq_rule (s, r') = if r = r' then SOME s else NONE - in the (Symtab.get_first eq_rule rule_names) end - - -(* proofs *) - -datatype z3_node = Z3_Node of { - id: int, - rule: z3_rule, - prems: z3_node list, - concl: term, - bounds: string list} - -fun mk_node id rule prems concl bounds = - Z3_Node {id = id, rule = rule, prems = prems, concl = concl, bounds = bounds} - -datatype z3_step = Z3_Step of { - id: int, - rule: z3_rule, - prems: int list, - concl: term, - fixes: string list, - is_fix_step: bool} - -fun mk_step id rule prems concl fixes is_fix_step = - Z3_Step {id = id, rule = rule, prems = prems, concl = concl, fixes = fixes, - is_fix_step = is_fix_step} - - -(* proof parser *) - -fun rule_of (SMTLIB.Sym name) = rule_of_string name - | rule_of (SMTLIB.S (SMTLIB.Sym "_" :: SMTLIB.Sym name :: args)) = - (case (name, args) of - ("th-lemma", SMTLIB.Sym kind :: _) => Th_Lemma kind - | _ => rule_of_string name) - | rule_of r = raise SMTLIB_PARSE ("bad Z3 proof rule format", r) - -fun node_of p cx = - (case p of - SMTLIB.Sym name => - (case lookup_binding cx name of - Proof node => (node, cx) - | Tree p' => - cx - |> node_of p' - |-> (fn node => pair node o update_binding (name, Proof node)) - | _ => raise SMTLIB_PARSE ("bad Z3 proof format", p)) - | SMTLIB.S [SMTLIB.Sym "let", SMTLIB.S bindings, p] => - with_bindings (map dest_binding bindings) (node_of p) cx - | SMTLIB.S (name :: parts) => - let - val (ps, p) = split_last parts - val r = rule_of name - in - cx - |> fold_map node_of ps - ||>> `(with_fresh_names (term_of p)) - ||>> next_id - |>> (fn ((prems, (t, ns)), id) => mk_node id r prems t ns) - end - | _ => raise SMTLIB_PARSE ("bad Z3 proof format", p)) - -fun dest_name (SMTLIB.Sym name) = name - | dest_name t = raise SMTLIB_PARSE ("bad name", t) - -fun dest_seq (SMTLIB.S ts) = ts - | dest_seq t = raise SMTLIB_PARSE ("bad Z3 proof format", t) - -fun parse' (SMTLIB.S (SMTLIB.Sym "set-logic" :: _) :: ts) cx = parse' ts cx - | parse' (SMTLIB.S [SMTLIB.Sym "declare-fun", n, tys, ty] :: ts) cx = - let - val name = dest_name n - val Ts = map (type_of cx) (dest_seq tys) - val T = type_of cx ty - in parse' ts (declare_fun name (Ts ---> T) cx) end - | parse' (SMTLIB.S [SMTLIB.Sym "proof", p] :: _) cx = node_of p cx - | parse' ts _ = raise SMTLIB_PARSE ("bad Z3 proof declarations", SMTLIB.S ts) - -fun parse_proof typs funs lines ctxt = - let - val ts = dest_seq (SMTLIB.parse lines) - val (node, cx) = parse' ts (empty_context ctxt typs funs) - in (node, ctxt_of cx) end - handle SMTLIB.PARSE (l, msg) => error ("parsing error at line " ^ string_of_int l ^ ": " ^ msg) - | SMTLIB_PARSE (msg, t) => error (msg ^ ": " ^ SMTLIB.str_of t) - - -(* handling of bound variables *) - -fun subst_of tyenv = - let fun add (ix, (S, T)) = cons (TVar (ix, S), T) - in Vartab.fold add tyenv [] end - -fun substTs_same subst = - let val applyT = Same.function (AList.lookup (op =) subst) - in Term_Subst.map_atypsT_same applyT end - -fun subst_types ctxt env bounds t = - let - val match = Sign.typ_match (Proof_Context.theory_of ctxt) - - val t' = singleton (Variable.polymorphic ctxt) t - val patTs = map snd (Term.strip_qnt_vars @{const_name Pure.all} t') - val objTs = map (the o Symtab.lookup env) bounds - val subst = subst_of (fold match (patTs ~~ objTs) Vartab.empty) - in Same.commit (Term_Subst.map_types_same (substTs_same subst)) t' end - -fun eq_quant (@{const_name HOL.All}, _) (@{const_name HOL.All}, _) = true - | eq_quant (@{const_name HOL.Ex}, _) (@{const_name HOL.Ex}, _) = true - | eq_quant _ _ = false - -fun opp_quant (@{const_name HOL.All}, _) (@{const_name HOL.Ex}, _) = true - | opp_quant (@{const_name HOL.Ex}, _) (@{const_name HOL.All}, _) = true - | opp_quant _ _ = false - -fun with_quant pred i (Const q1 $ Abs (_, T1, t1), Const q2 $ Abs (_, T2, t2)) = - if pred q1 q2 andalso T1 = T2 then - let val t = Var (("", i), T1) - in SOME (apply2 Term.subst_bound ((t, t1), (t, t2))) end - else NONE - | with_quant _ _ _ = NONE - -fun dest_quant_pair i (@{term HOL.Not} $ t1, t2) = - Option.map (apfst HOLogic.mk_not) (with_quant opp_quant i (t1, t2)) - | dest_quant_pair i (t1, t2) = with_quant eq_quant i (t1, t2) - -fun dest_quant i t = - (case dest_quant_pair i (HOLogic.dest_eq (HOLogic.dest_Trueprop t)) of - SOME (t1, t2) => HOLogic.mk_Trueprop (HOLogic.mk_eq (t1, t2)) - | NONE => raise TERM ("lift_quant", [t])) - -fun match_types ctxt pat obj = - (Vartab.empty, Vartab.empty) - |> Pattern.first_order_match (Proof_Context.theory_of ctxt) (pat, obj) - -fun strip_match ctxt pat i obj = - (case try (match_types ctxt pat) obj of - SOME (tyenv, _) => subst_of tyenv - | NONE => strip_match ctxt pat (i + 1) (dest_quant i obj)) - -fun dest_all i (Const (@{const_name Pure.all}, _) $ (a as Abs (_, T, _))) = - dest_all (i + 1) (Term.betapply (a, Var (("", i), T))) - | dest_all i t = (i, t) - -fun dest_alls t = dest_all (Term.maxidx_of_term t + 1) t - -fun match_rule ctxt env (Z3_Node {bounds = bs', concl = t', ...}) bs t = - let - val t'' = singleton (Variable.polymorphic ctxt) t' - val (i, obj) = dest_alls (subst_types ctxt env bs t) - in - (case try (strip_match ctxt (snd (dest_alls t'')) i) obj of - NONE => NONE - | SOME subst => - let - val applyT = Same.commit (substTs_same subst) - val patTs = map snd (Term.strip_qnt_vars @{const_name Pure.all} t'') - in SOME (Symtab.make (bs' ~~ map applyT patTs)) end) - end - - -(* linearizing proofs and resolving types of bound variables *) - -fun has_step (tab, _) = Inttab.defined tab - -fun add_step id rule bounds concl is_fix_step ids (tab, sts) = - let val step = mk_step id rule ids concl bounds is_fix_step - in (id, (Inttab.update (id, ()) tab, step :: sts)) end - -fun is_fix_rule rule prems = - member (op =) [Quant_Intro, Nnf_Pos, Nnf_Neg] rule andalso length prems = 1 - -fun lin_proof ctxt env (Z3_Node {id, rule, prems, concl, bounds}) steps = - if has_step steps id then (id, steps) - else - let - val t = subst_types ctxt env bounds concl - val add = add_step id rule bounds t - fun rec_apply e b = fold_map (lin_proof ctxt e) prems #-> add b - in - if is_fix_rule rule prems then - (case match_rule ctxt env (hd prems) bounds t of - NONE => rec_apply env false steps - | SOME env' => rec_apply env' true steps) - else rec_apply env false steps - end - -fun linearize ctxt node = - rev (snd (snd (lin_proof ctxt Symtab.empty node (Inttab.empty, [])))) - - -(* overall proof parser *) - -fun parse typs funs lines ctxt = - let val (node, ctxt') = parse_proof typs funs lines ctxt - in (linearize ctxt' node, ctxt') end - -end; diff --git a/src/test-gen/SMT/z3_real.ML b/src/test-gen/SMT/z3_real.ML deleted file mode 100644 index 15ef469..0000000 --- a/src/test-gen/SMT/z3_real.ML +++ /dev/null @@ -1,32 +0,0 @@ -(* Title: HOL/Tools/SMT/z3_real.ML - Author: Sascha Boehme, TU Muenchen - -Z3 setup for reals. -*) - -structure Z3_Real: sig end = -struct - -fun real_type_parser (SMTLIB.Sym "Real", []) = SOME @{typ Real.real} - | real_type_parser _ = NONE - -fun real_term_parser (SMTLIB.Dec (i, 0), []) = SOME (HOLogic.mk_number @{typ Real.real} i) - | real_term_parser (SMTLIB.Sym "/", [t1, t2]) = - SOME (@{term "Rings.divide :: real => _"} $ t1 $ t2) - | real_term_parser (SMTLIB.Sym "to_real", [t]) = SOME (@{term "Int.of_int :: int => _"} $ t) - | real_term_parser _ = NONE - -fun abstract abs t = - (case t of - (c as @{term "Rings.divide :: real => _"}) $ t1 $ t2 => - abs t1 ##>> abs t2 #>> (fn (u1, u2) => SOME (c $ u1 $ u2)) - | (c as @{term "Int.of_int :: int => _"}) $ t => - abs t #>> (fn u => SOME (c $ u)) - | _ => pair NONE) - -val _ = Theory.setup (Context.theory_map ( - SMTLIB_Proof.add_type_parser real_type_parser #> - SMTLIB_Proof.add_term_parser real_term_parser #> - Z3_Replay_Methods.add_arith_abstracter abstract)) - -end; diff --git a/src/test-gen/SMT/z3_replay.ML b/src/test-gen/SMT/z3_replay.ML deleted file mode 100644 index b9ecce8..0000000 --- a/src/test-gen/SMT/z3_replay.ML +++ /dev/null @@ -1,262 +0,0 @@ -(* Title: HOL/Tools/SMT/z3_replay.ML - Author: Sascha Boehme, TU Muenchen - Author: Jasmin Blanchette, TU Muenchen - -Z3 proof parsing and replay. -*) - -signature Z3_REPLAY = -sig - val parse_proof: Proof.context -> SMT_Translate.replay_data -> - ((string * ATP_Problem_Generate.stature) * thm) list -> term list -> term -> string list -> - SMT_Solver.parsed_proof - val replay: Proof.context -> SMT_Translate.replay_data -> string list -> thm -end; - -structure Z3_Replay: Z3_REPLAY = -struct - -fun params_of t = Term.strip_qnt_vars @{const_name Pure.all} t - -fun varify ctxt thm = - let - val maxidx = Thm.maxidx_of thm + 1 - val vs = params_of (Thm.prop_of thm) - val vars = map_index (fn (i, (n, T)) => Var ((n, i + maxidx), T)) vs - in Drule.forall_elim_list (map (Thm.cterm_of ctxt) vars) thm end - -fun add_paramTs names t = - fold2 (fn n => fn (_, T) => AList.update (op =) (n, T)) names (params_of t) - -fun new_fixes ctxt nTs = - let - val (ns, ctxt') = Variable.variant_fixes (replicate (length nTs) "") ctxt - fun mk (n, T) n' = (n, Thm.cterm_of ctxt' (Free (n', T))) - in (ctxt', Symtab.make (map2 mk nTs ns)) end - -fun forall_elim_term ct (Const (@{const_name Pure.all}, _) $ (a as Abs _)) = - Term.betapply (a, Thm.term_of ct) - | forall_elim_term _ qt = raise TERM ("forall_elim'", [qt]) - -fun apply_fixes elim env = fold (elim o the o Symtab.lookup env) - -val apply_fixes_prem = uncurry o apply_fixes Thm.forall_elim -val apply_fixes_concl = apply_fixes forall_elim_term - -fun export_fixes env names = Drule.forall_intr_list (map (the o Symtab.lookup env) names) - -fun under_fixes f ctxt (prems, nthms) names concl = - let - val thms1 = map (varify ctxt) prems - val (ctxt', env) = - add_paramTs names concl [] - |> fold (uncurry add_paramTs o apsnd Thm.prop_of) nthms - |> new_fixes ctxt - val thms2 = map (apply_fixes_prem env) nthms - val t = apply_fixes_concl env names concl - in export_fixes env names (f ctxt' (thms1 @ thms2) t) end - -fun replay_thm ctxt assumed nthms (Z3_Proof.Z3_Step {id, rule, concl, fixes, is_fix_step, ...}) = - if Z3_Proof.is_assumption rule then - (case Inttab.lookup assumed id of - SOME (_, thm) => thm - | NONE => Thm.assume (Thm.cterm_of ctxt concl)) - else - under_fixes (Z3_Replay_Methods.method_for rule) ctxt - (if is_fix_step then (map snd nthms, []) else ([], nthms)) fixes concl - -fun replay_step ctxt assumed (step as Z3_Proof.Z3_Step {id, rule, prems, fixes, ...}) state = - let - val (proofs, stats) = state - val nthms = map (the o Inttab.lookup proofs) prems - val replay = Timing.timing (replay_thm ctxt assumed nthms) - val ({elapsed, ...}, thm) = - SMT_Config.with_time_limit ctxt SMT_Config.reconstruction_step_timeout replay step - handle Timeout.TIMEOUT _ => raise SMT_Failure.SMT SMT_Failure.Time_Out - val stats' = Symtab.cons_list (Z3_Proof.string_of_rule rule, Time.toMilliseconds elapsed) stats - in (Inttab.update (id, (fixes, thm)) proofs, stats') end - -local - val remove_trigger = mk_meta_eq @{thm trigger_def} - val remove_fun_app = mk_meta_eq @{thm fun_app_def} - - fun rewrite_conv _ [] = Conv.all_conv - | rewrite_conv ctxt eqs = Simplifier.full_rewrite (empty_simpset ctxt addsimps eqs) - - val rewrite_true_rule = @{lemma "True == ~ False" by simp} - val prep_rules = [@{thm Let_def}, remove_trigger, remove_fun_app, rewrite_true_rule] - - fun rewrite _ [] = I - | rewrite ctxt eqs = Conv.fconv_rule (rewrite_conv ctxt eqs) - - fun lookup_assm assms_net ct = - Z3_Replay_Util.net_instances assms_net ct - |> map (fn ithm as (_, thm) => (ithm, Thm.cprop_of thm aconvc ct)) -in - -fun add_asserted outer_ctxt rewrite_rules assms steps ctxt = - let - val eqs = map (rewrite ctxt [rewrite_true_rule]) rewrite_rules - val eqs' = union Thm.eq_thm eqs prep_rules - - val assms_net = - assms - |> map (apsnd (rewrite ctxt eqs')) - |> map (apsnd (Conv.fconv_rule Thm.eta_conversion)) - |> Z3_Replay_Util.thm_net_of snd - - fun revert_conv ctxt = rewrite_conv ctxt eqs' then_conv Thm.eta_conversion - - fun assume thm ctxt = - let - val ct = Thm.cprem_of thm 1 - val (thm', ctxt') = yield_singleton Assumption.add_assumes ct ctxt - in (thm' RS thm, ctxt') end - - fun add1 id fixes thm1 ((i, th), exact) ((iidths, thms), (ctxt, ptab)) = - let - val (thm, ctxt') = if exact then (Thm.implies_elim thm1 th, ctxt) else assume thm1 ctxt - val thms' = if exact then thms else th :: thms - in (((i, (id, th)) :: iidths, thms'), (ctxt', Inttab.update (id, (fixes, thm)) ptab)) end - - fun add (Z3_Proof.Z3_Step {id, rule, concl, fixes, ...}) - (cx as ((iidths, thms), (ctxt, ptab))) = - if Z3_Proof.is_assumption rule andalso rule <> Z3_Proof.Hypothesis then - let - val ct = Thm.cterm_of ctxt concl - val thm1 = Thm.trivial ct |> Conv.fconv_rule (Conv.arg1_conv (revert_conv outer_ctxt)) - val thm2 = singleton (Variable.export ctxt outer_ctxt) thm1 - in - (case lookup_assm assms_net (Thm.cprem_of thm2 1) of - [] => - let val (thm, ctxt') = assume thm1 ctxt - in ((iidths, thms), (ctxt', Inttab.update (id, (fixes, thm)) ptab)) end - | ithms => fold (add1 id fixes thm1) ithms cx) - end - else - cx - in fold add steps (([], []), (ctxt, Inttab.empty)) end - -end - -(* |- (EX x. P x) = P c |- ~ (ALL x. P x) = ~ P c *) -local - val sk_rules = @{lemma - "c = (SOME x. P x) ==> (EX x. P x) = P c" - "c = (SOME x. ~ P x) ==> (~ (ALL x. P x)) = (~ P c)" - by (metis someI_ex)+} -in - -fun discharge_sk_tac ctxt i st = - (resolve_tac ctxt @{thms trans} i - THEN resolve_tac ctxt sk_rules i - THEN (resolve_tac ctxt @{thms refl} ORELSE' discharge_sk_tac ctxt) (i+1) - THEN resolve_tac ctxt @{thms refl} i) st - -end - -val true_thm = @{lemma "~False" by simp} -fun make_discharge_rules rules = rules @ [@{thm allI}, @{thm refl}, @{thm reflexive}, true_thm] - -val intro_def_rules = @{lemma - "(~ P | P) & (P | ~ P)" - "(P | ~ P) & (~ P | P)" - by fast+} - -fun discharge_assms_tac ctxt rules = - REPEAT - (HEADGOAL (resolve_tac ctxt (intro_def_rules @ rules) ORELSE' - SOLVED' (discharge_sk_tac ctxt))) - -fun discharge_assms ctxt rules thm = - (if Thm.nprems_of thm = 0 then - thm - else - (case Seq.pull (discharge_assms_tac ctxt rules thm) of - SOME (thm', _) => thm' - | NONE => raise THM ("failed to discharge premise", 1, [thm]))) - |> Goal.norm_result ctxt - -fun discharge rules outer_ctxt inner_ctxt = - singleton (Proof_Context.export inner_ctxt outer_ctxt) - #> discharge_assms outer_ctxt (make_discharge_rules rules) - -fun parse_proof outer_ctxt - ({context = ctxt, typs, terms, ll_defs, rewrite_rules, assms} : SMT_Translate.replay_data) - xfacts prems concl output = - let - val (steps, ctxt2) = Z3_Proof.parse typs terms output ctxt - val ((iidths, _), _) = add_asserted outer_ctxt rewrite_rules assms steps ctxt2 - - fun id_of_index i = the_default ~1 (Option.map fst (AList.lookup (op =) iidths i)) - - val conjecture_i = 0 - val prems_i = 1 - val facts_i = prems_i + length prems - - val conjecture_id = id_of_index conjecture_i - val prem_ids = map id_of_index (prems_i upto facts_i - 1) - val fact_ids' = - map_filter (fn (i, (id, _)) => try (apsnd (nth xfacts)) (id, i - facts_i)) iidths - val helper_ids' = map_filter (try (fn (~1, idth) => idth)) iidths - - val fact_helper_ts = - map (fn (_, th) => (ATP_Util.short_thm_name ctxt th, Thm.prop_of th)) helper_ids' @ - map (fn (_, ((s, _), th)) => (s, Thm.prop_of th)) fact_ids' - val fact_helper_ids' = - map (apsnd (ATP_Util.short_thm_name ctxt)) helper_ids' @ map (apsnd (fst o fst)) fact_ids' - in - {outcome = NONE, fact_ids = SOME fact_ids', - atp_proof = fn () => Z3_Isar.atp_proof_of_z3_proof ctxt ll_defs rewrite_rules prems concl - fact_helper_ts prem_ids conjecture_id fact_helper_ids' steps} - end - -fun intermediate_statistics ctxt start total = - SMT_Config.statistics_msg ctxt (fn current => - "Reconstructed " ^ string_of_int current ^ " of " ^ string_of_int total ^ " steps in " ^ - string_of_int (Time.toMilliseconds (#elapsed (Timing.result start))) ^ " ms") - -fun pretty_statistics total stats = - let - fun mean_of is = - let - val len = length is - val mid = len div 2 - in if len mod 2 = 0 then (nth is (mid - 1) + nth is mid) div 2 else nth is mid end - fun pretty_item name p = Pretty.item (Pretty.separate ":" [Pretty.str name, p]) - fun pretty (name, milliseconds) = pretty_item name (Pretty.block (Pretty.separate "," [ - Pretty.str (string_of_int (length milliseconds) ^ " occurrences") , - Pretty.str (string_of_int (mean_of milliseconds) ^ " ms mean time"), - Pretty.str (string_of_int (fold Integer.max milliseconds 0) ^ " ms maximum time"), - Pretty.str (string_of_int (fold Integer.add milliseconds 0) ^ " ms total time")])) - in - Pretty.big_list "Z3 proof reconstruction statistics:" ( - pretty_item "total time" (Pretty.str (string_of_int total ^ " ms")) :: - map pretty (Symtab.dest stats)) - end - -fun replay outer_ctxt - ({context = ctxt, typs, terms, rewrite_rules, assms, ...} : SMT_Translate.replay_data) output = - let - val (steps, ctxt2) = Z3_Proof.parse typs terms output ctxt - val ((_, rules), (ctxt3, assumed)) = add_asserted outer_ctxt rewrite_rules assms steps ctxt2 - val ctxt4 = - ctxt3 - |> put_simpset (Z3_Replay_Util.make_simpset ctxt3 []) - |> Config.put SAT.solver (Config.get ctxt3 SMT_Config.sat_solver) - val len = length steps - val start = Timing.start () - val print_runtime_statistics = intermediate_statistics ctxt4 start len - fun blockwise f (i, x) y = - (if i > 0 andalso i mod 100 = 0 then print_runtime_statistics i else (); f x y) - val (proofs, stats) = - fold_index (blockwise (replay_step ctxt4 assumed)) steps (assumed, Symtab.empty) - val _ = print_runtime_statistics len - val total = Time.toMilliseconds (#elapsed (Timing.result start)) - val (_, Z3_Proof.Z3_Step {id, ...}) = split_last steps - val _ = SMT_Config.statistics_msg ctxt4 (Pretty.string_of o pretty_statistics total) stats - in - Inttab.lookup proofs id |> the |> snd |> discharge rules outer_ctxt ctxt4 - end - -end; diff --git a/src/test-gen/SMT/z3_replay_methods.ML b/src/test-gen/SMT/z3_replay_methods.ML deleted file mode 100644 index e4fbb73..0000000 --- a/src/test-gen/SMT/z3_replay_methods.ML +++ /dev/null @@ -1,685 +0,0 @@ -(* Title: HOL/Tools/SMT/z3_replay_methods.ML - Author: Sascha Boehme, TU Muenchen - Author: Jasmin Blanchette, TU Muenchen - -Proof methods for replaying Z3 proofs. -*) - -signature Z3_REPLAY_METHODS = -sig - (*abstraction*) - type abs_context = int * term Termtab.table - type 'a abstracter = term -> abs_context -> 'a * abs_context - val add_arith_abstracter: (term abstracter -> term option abstracter) -> - Context.generic -> Context.generic - - (*theory lemma methods*) - type th_lemma_method = Proof.context -> thm list -> term -> thm - val add_th_lemma_method: string * th_lemma_method -> Context.generic -> - Context.generic - - (*methods for Z3 proof rules*) - type z3_method = Proof.context -> thm list -> term -> thm - val true_axiom: z3_method - val mp: z3_method - val refl: z3_method - val symm: z3_method - val trans: z3_method - val cong: z3_method - val quant_intro: z3_method - val distrib: z3_method - val and_elim: z3_method - val not_or_elim: z3_method - val rewrite: z3_method - val rewrite_star: z3_method - val pull_quant: z3_method - val push_quant: z3_method - val elim_unused: z3_method - val dest_eq_res: z3_method - val quant_inst: z3_method - val lemma: z3_method - val unit_res: z3_method - val iff_true: z3_method - val iff_false: z3_method - val comm: z3_method - val def_axiom: z3_method - val apply_def: z3_method - val iff_oeq: z3_method - val nnf_pos: z3_method - val nnf_neg: z3_method - val mp_oeq: z3_method - val th_lemma: string -> z3_method - val method_for: Z3_Proof.z3_rule -> z3_method -end; - -structure Z3_Replay_Methods: Z3_REPLAY_METHODS = -struct - -type z3_method = Proof.context -> thm list -> term -> thm - - -(* utility functions *) - -fun trace ctxt f = SMT_Config.trace_msg ctxt f () - -fun pretty_thm ctxt thm = Syntax.pretty_term ctxt (Thm.concl_of thm) - -fun pretty_goal ctxt msg rule thms t = - let - val full_msg = msg ^ ": " ^ quote (Z3_Proof.string_of_rule rule) - val assms = - if null thms then [] - else [Pretty.big_list "assumptions:" (map (pretty_thm ctxt) thms)] - val concl = Pretty.big_list "proposition:" [Syntax.pretty_term ctxt t] - in Pretty.big_list full_msg (assms @ [concl]) end - -fun replay_error ctxt msg rule thms t = error (Pretty.string_of (pretty_goal ctxt msg rule thms t)) - -fun replay_rule_error ctxt = replay_error ctxt "Failed to replay Z3 proof step" - -fun trace_goal ctxt rule thms t = - trace ctxt (fn () => Pretty.string_of (pretty_goal ctxt "Goal" rule thms t)) - -fun as_prop (t as Const (@{const_name Trueprop}, _) $ _) = t - | as_prop t = HOLogic.mk_Trueprop t - -fun dest_prop (Const (@{const_name Trueprop}, _) $ t) = t - | dest_prop t = t - -fun dest_thm thm = dest_prop (Thm.concl_of thm) - -fun certify_prop ctxt t = Thm.cterm_of ctxt (as_prop t) - -fun try_provers ctxt rule [] thms t = replay_rule_error ctxt rule thms t - | try_provers ctxt rule ((name, prover) :: named_provers) thms t = - (case (trace ctxt (K ("Trying prover " ^ quote name)); try prover t) of - SOME thm => thm - | NONE => try_provers ctxt rule named_provers thms t) - -fun match ctxt pat t = - (Vartab.empty, Vartab.empty) - |> Pattern.first_order_match (Proof_Context.theory_of ctxt) (pat, t) - -fun gen_certify_inst sel cert ctxt thm t = - let - val inst = match ctxt (dest_thm thm) (dest_prop t) - fun cert_inst (ix, (a, b)) = ((ix, a), cert b) - in Vartab.fold (cons o cert_inst) (sel inst) [] end - -fun match_instantiateT ctxt t thm = - if Term.exists_type (Term.exists_subtype Term.is_TVar) (dest_thm thm) then - Thm.instantiate (gen_certify_inst fst (Thm.ctyp_of ctxt) ctxt thm t, []) thm - else thm - -fun match_instantiate ctxt t thm = - let val thm' = match_instantiateT ctxt t thm in - Thm.instantiate ([], gen_certify_inst snd (Thm.cterm_of ctxt) ctxt thm' t) thm' - end - -fun apply_rule ctxt t = - (case Z3_Replay_Rules.apply ctxt (certify_prop ctxt t) of - SOME thm => thm - | NONE => raise Fail "apply_rule") - -fun discharge _ [] thm = thm - | discharge i (rule :: rules) thm = discharge (i + Thm.nprems_of rule) rules (rule RSN (i, thm)) - -fun by_tac ctxt thms ns ts t tac = - Goal.prove ctxt [] (map as_prop ts) (as_prop t) - (fn {context, prems} => HEADGOAL (tac context prems)) - |> Drule.generalize ([], ns) - |> discharge 1 thms - -fun prove ctxt t tac = by_tac ctxt [] [] [] t (K o tac) - -fun prop_tac ctxt prems = - Method.insert_tac ctxt prems - THEN' SUBGOAL (fn (prop, i) => - if Term.size_of_term prop > 100 then SAT.satx_tac ctxt i - else (Classical.fast_tac ctxt ORELSE' Clasimp.force_tac ctxt) i) - -fun quant_tac ctxt = Blast.blast_tac ctxt - - -(* plug-ins *) - -type abs_context = int * term Termtab.table - -type 'a abstracter = term -> abs_context -> 'a * abs_context - -type th_lemma_method = Proof.context -> thm list -> term -> thm - -fun id_ord ((id1, _), (id2, _)) = int_ord (id1, id2) - -structure Plugins = Generic_Data -( - type T = - (int * (term abstracter -> term option abstracter)) list * - th_lemma_method Symtab.table - val empty = ([], Symtab.empty) - val extend = I - fun merge ((abss1, ths1), (abss2, ths2)) = ( - Ord_List.merge id_ord (abss1, abss2), - Symtab.merge (K true) (ths1, ths2)) -) - -fun add_arith_abstracter abs = Plugins.map (apfst (Ord_List.insert id_ord (serial (), abs))) -fun get_arith_abstracters ctxt = map snd (fst (Plugins.get (Context.Proof ctxt))) - -fun add_th_lemma_method method = Plugins.map (apsnd (Symtab.update_new method)) -fun get_th_lemma_method ctxt = snd (Plugins.get (Context.Proof ctxt)) - - -(* abstraction *) - -fun prove_abstract ctxt thms t tac f = - let - val ((prems, concl), (_, ts)) = f (1, Termtab.empty) - val ns = Termtab.fold (fn (_, v) => cons (fst (Term.dest_Free v))) ts [] - in - by_tac ctxt [] ns prems concl tac - |> match_instantiate ctxt t - |> discharge 1 thms - end - -fun prove_abstract' ctxt t tac f = - prove_abstract ctxt [] t tac (f #>> pair []) - -fun lookup_term (_, terms) t = Termtab.lookup terms t - -fun abstract_sub t f cx = - (case lookup_term cx t of - SOME v => (v, cx) - | NONE => f cx) - -fun mk_fresh_free t (i, terms) = - let val v = Free ("t" ^ string_of_int i, fastype_of t) - in (v, (i + 1, Termtab.update (t, v) terms)) end - -fun apply_abstracters _ [] _ cx = (NONE, cx) - | apply_abstracters abs (abstracter :: abstracters) t cx = - (case abstracter abs t cx of - (NONE, _) => apply_abstracters abs abstracters t cx - | x as (SOME _, _) => x) - -fun abstract_term (t as _ $ _) = abstract_sub t (mk_fresh_free t) - | abstract_term (t as Abs _) = abstract_sub t (mk_fresh_free t) - | abstract_term t = pair t - -fun abstract_bin abs f t t1 t2 = abstract_sub t (abs t1 ##>> abs t2 #>> f) - -fun abstract_ter abs f t t1 t2 t3 = - abstract_sub t (abs t1 ##>> abs t2 ##>> abs t3 #>> (Scan.triple1 #> f)) - -fun abstract_lit (@{const HOL.Not} $ t) = abstract_term t #>> HOLogic.mk_not - | abstract_lit t = abstract_term t - -fun abstract_not abs (t as @{const HOL.Not} $ t1) = - abstract_sub t (abs t1 #>> HOLogic.mk_not) - | abstract_not _ t = abstract_lit t - -fun abstract_conj (t as @{const HOL.conj} $ t1 $ t2) = - abstract_bin abstract_conj HOLogic.mk_conj t t1 t2 - | abstract_conj t = abstract_lit t - -fun abstract_disj (t as @{const HOL.disj} $ t1 $ t2) = - abstract_bin abstract_disj HOLogic.mk_disj t t1 t2 - | abstract_disj t = abstract_lit t - -fun abstract_prop (t as (c as @{const If (bool)}) $ t1 $ t2 $ t3) = - abstract_ter abstract_prop (fn (t1, t2, t3) => c $ t1 $ t2 $ t3) t t1 t2 t3 - | abstract_prop (t as @{const HOL.disj} $ t1 $ t2) = - abstract_bin abstract_prop HOLogic.mk_disj t t1 t2 - | abstract_prop (t as @{const HOL.conj} $ t1 $ t2) = - abstract_bin abstract_prop HOLogic.mk_conj t t1 t2 - | abstract_prop (t as @{const HOL.implies} $ t1 $ t2) = - abstract_bin abstract_prop HOLogic.mk_imp t t1 t2 - | abstract_prop (t as @{term "HOL.eq :: bool => _"} $ t1 $ t2) = - abstract_bin abstract_prop HOLogic.mk_eq t t1 t2 - | abstract_prop t = abstract_not abstract_prop t - -fun abstract_arith ctxt u = - let - fun abs (t as (c as Const _) $ Abs (s, T, t')) = - abstract_sub t (abs t' #>> (fn u' => c $ Abs (s, T, u'))) - | abs (t as (c as Const (@{const_name If}, _)) $ t1 $ t2 $ t3) = - abstract_ter abs (fn (t1, t2, t3) => c $ t1 $ t2 $ t3) t t1 t2 t3 - | abs (t as @{const HOL.Not} $ t1) = abstract_sub t (abs t1 #>> HOLogic.mk_not) - | abs (t as @{const HOL.disj} $ t1 $ t2) = - abstract_sub t (abs t1 ##>> abs t2 #>> HOLogic.mk_disj) - | abs (t as (c as Const (@{const_name uminus_class.uminus}, _)) $ t1) = - abstract_sub t (abs t1 #>> (fn u => c $ u)) - | abs (t as (c as Const (@{const_name plus_class.plus}, _)) $ t1 $ t2) = - abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) - | abs (t as (c as Const (@{const_name minus_class.minus}, _)) $ t1 $ t2) = - abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) - | abs (t as (c as Const (@{const_name times_class.times}, _)) $ t1 $ t2) = - abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) - | abs (t as (c as Const (@{const_name z3div}, _)) $ t1 $ t2) = - abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) - | abs (t as (c as Const (@{const_name z3mod}, _)) $ t1 $ t2) = - abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) - | abs (t as (c as Const (@{const_name HOL.eq}, _)) $ t1 $ t2) = - abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) - | abs (t as (c as Const (@{const_name ord_class.less}, _)) $ t1 $ t2) = - abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) - | abs (t as (c as Const (@{const_name ord_class.less_eq}, _)) $ t1 $ t2) = - abstract_sub t (abs t1 ##>> abs t2 #>> (fn (u1, u2) => c $ u1 $ u2)) - | abs t = abstract_sub t (fn cx => - if can HOLogic.dest_number t then (t, cx) - else - (case apply_abstracters abs (get_arith_abstracters ctxt) t cx of - (SOME u, cx') => (u, cx') - | (NONE, _) => abstract_term t cx)) - in abs u end - - -(* truth axiom *) - -fun true_axiom _ _ _ = @{thm TrueI} - - -(* modus ponens *) - -fun mp _ [p, p_eq_q] _ = discharge 1 [p_eq_q, p] iffD1 - | mp ctxt thms t = replay_rule_error ctxt Z3_Proof.Modus_Ponens thms t - -val mp_oeq = mp - - -(* reflexivity *) - -fun refl ctxt _ t = match_instantiate ctxt t @{thm refl} - - -(* symmetry *) - -fun symm _ [thm] _ = thm RS @{thm sym} - | symm ctxt thms t = replay_rule_error ctxt Z3_Proof.Reflexivity thms t - - -(* transitivity *) - -fun trans _ [thm1, thm2] _ = thm1 RSN (1, thm2 RSN (2, @{thm trans})) - | trans ctxt thms t = replay_rule_error ctxt Z3_Proof.Transitivity thms t - - -(* congruence *) - -fun ctac ctxt prems i st = st |> ( - resolve_tac ctxt (@{thm refl} :: prems) i - ORELSE (cong_tac ctxt i THEN ctac ctxt prems (i + 1) THEN ctac ctxt prems i)) - -fun cong_basic ctxt thms t = - let val st = Thm.trivial (certify_prop ctxt t) - in - (case Seq.pull (ctac ctxt thms 1 st) of - SOME (thm, _) => thm - | NONE => raise THM ("cong", 0, thms @ [st])) - end - -val cong_dest_rules = @{lemma - "(~ P | Q) & (P | ~ Q) ==> P = Q" - "(P | ~ Q) & (~ P | Q) ==> P = Q" - by fast+} - -fun cong_full ctxt thms t = prove ctxt t (fn ctxt' => - Method.insert_tac ctxt thms - THEN' (Classical.fast_tac ctxt' - ORELSE' dresolve_tac ctxt cong_dest_rules - THEN' Classical.fast_tac ctxt')) - -fun cong ctxt thms = try_provers ctxt Z3_Proof.Monotonicity [ - ("basic", cong_basic ctxt thms), - ("full", cong_full ctxt thms)] thms - - -(* quantifier introduction *) - -val quant_intro_rules = @{lemma - "(!!x. P x = Q x) ==> (ALL x. P x) = (ALL x. Q x)" - "(!!x. P x = Q x) ==> (EX x. P x) = (EX x. Q x)" - "(!!x. (~ P x) = Q x) ==> (~ (EX x. P x)) = (ALL x. Q x)" - "(!!x. (~ P x) = Q x) ==> (~ (ALL x. P x)) = (EX x. Q x)" - by fast+} - -fun quant_intro ctxt [thm] t = - prove ctxt t (K (REPEAT_ALL_NEW (resolve_tac ctxt (thm :: quant_intro_rules)))) - | quant_intro ctxt thms t = replay_rule_error ctxt Z3_Proof.Quant_Intro thms t - - -(* distributivity of conjunctions and disjunctions *) - -(* TODO: there are no tests with this proof rule *) -fun distrib ctxt _ t = - prove_abstract' ctxt t prop_tac (abstract_prop (dest_prop t)) - - -(* elimination of conjunctions *) - -fun and_elim ctxt [thm] t = - prove_abstract ctxt [thm] t prop_tac ( - abstract_lit (dest_prop t) ##>> - abstract_conj (dest_thm thm) #>> - apfst single o swap) - | and_elim ctxt thms t = replay_rule_error ctxt Z3_Proof.And_Elim thms t - - -(* elimination of negated disjunctions *) - -fun not_or_elim ctxt [thm] t = - prove_abstract ctxt [thm] t prop_tac ( - abstract_lit (dest_prop t) ##>> - abstract_not abstract_disj (dest_thm thm) #>> - apfst single o swap) - | not_or_elim ctxt thms t = - replay_rule_error ctxt Z3_Proof.Not_Or_Elim thms t - - -(* rewriting *) - -local - -fun dest_all (Const (@{const_name HOL.All}, _) $ Abs (_, T, t)) nctxt = - let - val (n, nctxt') = Name.variant "" nctxt - val f = Free (n, T) - val t' = Term.subst_bound (f, t) - in dest_all t' nctxt' |>> cons f end - | dest_all t _ = ([], t) - -fun dest_alls t = - let - val nctxt = Name.make_context (Term.add_free_names t []) - val (lhs, rhs) = HOLogic.dest_eq (dest_prop t) - val (ls, lhs') = dest_all lhs nctxt - val (rs, rhs') = dest_all rhs nctxt - in - if eq_list (op aconv) (ls, rs) then SOME (ls, (HOLogic.mk_eq (lhs', rhs'))) - else NONE - end - -fun forall_intr ctxt t thm = - let val ct = Thm.cterm_of ctxt t - in Thm.forall_intr ct thm COMP_INCR @{thm iff_allI} end - -in - -fun focus_eq f ctxt t = - (case dest_alls t of - NONE => f ctxt t - | SOME (vs, t') => fold (forall_intr ctxt) vs (f ctxt t')) - -end - -fun abstract_eq f (Const (@{const_name HOL.eq}, _) $ t1 $ t2) = - f t1 ##>> f t2 #>> HOLogic.mk_eq - | abstract_eq _ t = abstract_term t - -fun prove_prop_rewrite ctxt t = - prove_abstract' ctxt t prop_tac ( - abstract_eq abstract_prop (dest_prop t)) - -fun arith_rewrite_tac ctxt _ = - TRY o Simplifier.simp_tac ctxt - THEN_ALL_NEW (Arith_Data.arith_tac ctxt ORELSE' Clasimp.force_tac ctxt) - -fun prove_arith_rewrite ctxt t = - prove_abstract' ctxt t arith_rewrite_tac ( - abstract_eq (abstract_arith ctxt) (dest_prop t)) - -val lift_ite_thm = @{thm HOL.if_distrib} RS @{thm eq_reflection} - -fun ternary_conv cv = Conv.combination_conv (Conv.binop_conv cv) cv - -fun if_context_conv ctxt ct = - (case Thm.term_of ct of - Const (@{const_name HOL.If}, _) $ _ $ _ $ _ => - ternary_conv (if_context_conv ctxt) - | _ $ (Const (@{const_name HOL.If}, _) $ _ $ _ $ _) => - Conv.rewr_conv lift_ite_thm then_conv ternary_conv (if_context_conv ctxt) - | _ => Conv.sub_conv (Conv.top_sweep_conv if_context_conv) ctxt) ct - -fun lift_ite_rewrite ctxt t = - prove ctxt t (fn ctxt => - CONVERSION (HOLogic.Trueprop_conv (Conv.binop_conv (if_context_conv ctxt))) - THEN' resolve_tac ctxt @{thms refl}) - -fun prove_conj_disj_perm ctxt t = prove ctxt t Conj_Disj_Perm.conj_disj_perm_tac - -fun rewrite ctxt _ = try_provers ctxt Z3_Proof.Rewrite [ - ("rules", apply_rule ctxt), - ("conj_disj_perm", prove_conj_disj_perm ctxt), - ("prop_rewrite", prove_prop_rewrite ctxt), - ("arith_rewrite", focus_eq prove_arith_rewrite ctxt), - ("if_rewrite", lift_ite_rewrite ctxt)] [] - -fun rewrite_star ctxt = rewrite ctxt - - -(* pulling quantifiers *) - -fun pull_quant ctxt _ t = prove ctxt t quant_tac - - -(* pushing quantifiers *) - -fun push_quant _ _ _ = raise Fail "unsupported" (* FIXME *) - - -(* elimination of unused bound variables *) - -val elim_all = @{lemma "P = Q ==> (ALL x. P) = Q" by fast} -val elim_ex = @{lemma "P = Q ==> (EX x. P) = Q" by fast} - -fun elim_unused_tac ctxt i st = ( - match_tac ctxt [@{thm refl}] - ORELSE' (match_tac ctxt [elim_all, elim_ex] THEN' elim_unused_tac ctxt) - ORELSE' ( - match_tac ctxt [@{thm iff_allI}, @{thm iff_exI}] - THEN' elim_unused_tac ctxt)) i st - -fun elim_unused ctxt _ t = prove ctxt t elim_unused_tac - - -(* destructive equality resolution *) - -fun dest_eq_res _ _ _ = raise Fail "dest_eq_res" (* FIXME *) - - -(* quantifier instantiation *) - -val quant_inst_rule = @{lemma "~P x | Q ==> ~(ALL x. P x) | Q" by fast} - -fun quant_inst ctxt _ t = prove ctxt t (fn _ => - REPEAT_ALL_NEW (resolve_tac ctxt [quant_inst_rule]) - THEN' resolve_tac ctxt @{thms excluded_middle}) - - -(* propositional lemma *) - -exception LEMMA of unit - -val intro_hyp_rule1 = @{lemma "(~P ==> Q) ==> P | Q" by fast} -val intro_hyp_rule2 = @{lemma "(P ==> Q) ==> ~P | Q" by fast} - -fun norm_lemma thm = - (thm COMP_INCR intro_hyp_rule1) - handle THM _ => thm COMP_INCR intro_hyp_rule2 - -fun negated_prop (@{const HOL.Not} $ t) = HOLogic.mk_Trueprop t - | negated_prop t = HOLogic.mk_Trueprop (HOLogic.mk_not t) - -fun intro_hyps tab (t as @{const HOL.disj} $ t1 $ t2) cx = - lookup_intro_hyps tab t (fold (intro_hyps tab) [t1, t2]) cx - | intro_hyps tab t cx = - lookup_intro_hyps tab t (fn _ => raise LEMMA ()) cx - -and lookup_intro_hyps tab t f (cx as (thm, terms)) = - (case Termtab.lookup tab (negated_prop t) of - NONE => f cx - | SOME hyp => (norm_lemma (Thm.implies_intr hyp thm), t :: terms)) - -fun lemma ctxt (thms as [thm]) t = - (let - val tab = Termtab.make (map (`Thm.term_of) (Thm.chyps_of thm)) - val (thm', terms) = intro_hyps tab (dest_prop t) (thm, []) - in - prove_abstract ctxt [thm'] t prop_tac ( - fold (snd oo abstract_lit) terms #> - abstract_disj (dest_thm thm') #>> single ##>> - abstract_disj (dest_prop t)) - end - handle LEMMA () => replay_error ctxt "Bad proof state" Z3_Proof.Lemma thms t) - | lemma ctxt thms t = replay_rule_error ctxt Z3_Proof.Lemma thms t - - -(* unit resolution *) - -fun abstract_unit (t as (@{const HOL.Not} $ (@{const HOL.disj} $ t1 $ t2))) = - abstract_sub t (abstract_unit t1 ##>> abstract_unit t2 #>> - HOLogic.mk_not o HOLogic.mk_disj) - | abstract_unit (t as (@{const HOL.disj} $ t1 $ t2)) = - abstract_sub t (abstract_unit t1 ##>> abstract_unit t2 #>> - HOLogic.mk_disj) - | abstract_unit t = abstract_lit t - -fun unit_res ctxt thms t = - prove_abstract ctxt thms t prop_tac ( - fold_map (abstract_unit o dest_thm) thms ##>> - abstract_unit (dest_prop t) #>> - (fn (prems, concl) => (prems, concl))) - - -(* iff-true *) - -val iff_true_rule = @{lemma "P ==> P = True" by fast} - -fun iff_true _ [thm] _ = thm RS iff_true_rule - | iff_true ctxt thms t = replay_rule_error ctxt Z3_Proof.Iff_True thms t - - -(* iff-false *) - -val iff_false_rule = @{lemma "~P ==> P = False" by fast} - -fun iff_false _ [thm] _ = thm RS iff_false_rule - | iff_false ctxt thms t = replay_rule_error ctxt Z3_Proof.Iff_False thms t - - -(* commutativity *) - -fun comm ctxt _ t = match_instantiate ctxt t @{thm eq_commute} - - -(* definitional axioms *) - -fun def_axiom_disj ctxt t = - (case dest_prop t of - @{const HOL.disj} $ u1 $ u2 => - prove_abstract' ctxt t prop_tac ( - abstract_prop u2 ##>> abstract_prop u1 #>> HOLogic.mk_disj o swap) - | u => prove_abstract' ctxt t prop_tac (abstract_prop u)) - -fun def_axiom ctxt _ = try_provers ctxt Z3_Proof.Def_Axiom [ - ("rules", apply_rule ctxt), - ("disj", def_axiom_disj ctxt)] [] - - -(* application of definitions *) - -fun apply_def _ [thm] _ = thm (* TODO: cover also the missing cases *) - | apply_def ctxt thms t = replay_rule_error ctxt Z3_Proof.Apply_Def thms t - - -(* iff-oeq *) - -fun iff_oeq _ _ _ = raise Fail "iff_oeq" (* FIXME *) - - -(* negation normal form *) - -fun nnf_prop ctxt thms t = - prove_abstract ctxt thms t prop_tac ( - fold_map (abstract_prop o dest_thm) thms ##>> - abstract_prop (dest_prop t)) - -fun nnf ctxt rule thms = try_provers ctxt rule [ - ("prop", nnf_prop ctxt thms), - ("quant", quant_intro ctxt [hd thms])] thms - -fun nnf_pos ctxt = nnf ctxt Z3_Proof.Nnf_Pos -fun nnf_neg ctxt = nnf ctxt Z3_Proof.Nnf_Neg - - -(* theory lemmas *) - -fun arith_th_lemma_tac ctxt prems = - Method.insert_tac ctxt prems - THEN' SELECT_GOAL (Local_Defs.unfold0_tac ctxt @{thms z3div_def z3mod_def}) - THEN' Arith_Data.arith_tac ctxt - -fun arith_th_lemma ctxt thms t = - prove_abstract ctxt thms t arith_th_lemma_tac ( - fold_map (abstract_arith ctxt o dest_thm) thms ##>> - abstract_arith ctxt (dest_prop t)) - -val _ = Theory.setup (Context.theory_map (add_th_lemma_method ("arith", arith_th_lemma))) - -fun th_lemma name ctxt thms = - (case Symtab.lookup (get_th_lemma_method ctxt) name of - SOME method => method ctxt thms - | NONE => replay_error ctxt "Bad theory" (Z3_Proof.Th_Lemma name) thms) - - -(* mapping of rules to methods *) - -fun unsupported rule ctxt = replay_error ctxt "Unsupported" rule -fun assumed rule ctxt = replay_error ctxt "Assumed" rule - -fun choose Z3_Proof.True_Axiom = true_axiom - | choose (r as Z3_Proof.Asserted) = assumed r - | choose (r as Z3_Proof.Goal) = assumed r - | choose Z3_Proof.Modus_Ponens = mp - | choose Z3_Proof.Reflexivity = refl - | choose Z3_Proof.Symmetry = symm - | choose Z3_Proof.Transitivity = trans - | choose (r as Z3_Proof.Transitivity_Star) = unsupported r - | choose Z3_Proof.Monotonicity = cong - | choose Z3_Proof.Quant_Intro = quant_intro - | choose Z3_Proof.Distributivity = distrib - | choose Z3_Proof.And_Elim = and_elim - | choose Z3_Proof.Not_Or_Elim = not_or_elim - | choose Z3_Proof.Rewrite = rewrite - | choose Z3_Proof.Rewrite_Star = rewrite_star - | choose Z3_Proof.Pull_Quant = pull_quant - | choose (r as Z3_Proof.Pull_Quant_Star) = unsupported r - | choose Z3_Proof.Push_Quant = push_quant - | choose Z3_Proof.Elim_Unused_Vars = elim_unused - | choose Z3_Proof.Dest_Eq_Res = dest_eq_res - | choose Z3_Proof.Quant_Inst = quant_inst - | choose (r as Z3_Proof.Hypothesis) = assumed r - | choose Z3_Proof.Lemma = lemma - | choose Z3_Proof.Unit_Resolution = unit_res - | choose Z3_Proof.Iff_True = iff_true - | choose Z3_Proof.Iff_False = iff_false - | choose Z3_Proof.Commutativity = comm - | choose Z3_Proof.Def_Axiom = def_axiom - | choose (r as Z3_Proof.Intro_Def) = assumed r - | choose Z3_Proof.Apply_Def = apply_def - | choose Z3_Proof.Iff_Oeq = iff_oeq - | choose Z3_Proof.Nnf_Pos = nnf_pos - | choose Z3_Proof.Nnf_Neg = nnf_neg - | choose (r as Z3_Proof.Nnf_Star) = unsupported r - | choose (r as Z3_Proof.Cnf_Star) = unsupported r - | choose (r as Z3_Proof.Skolemize) = assumed r - | choose Z3_Proof.Modus_Ponens_Oeq = mp_oeq - | choose (Z3_Proof.Th_Lemma name) = th_lemma name - -fun with_tracing rule method ctxt thms t = - let val _ = trace_goal ctxt rule thms t - in method ctxt thms t end - -fun method_for rule = with_tracing rule (choose rule) - -end; diff --git a/src/test-gen/SMT/z3_replay_rules.ML b/src/test-gen/SMT/z3_replay_rules.ML deleted file mode 100644 index 966100b..0000000 --- a/src/test-gen/SMT/z3_replay_rules.ML +++ /dev/null @@ -1,54 +0,0 @@ -(* Title: HOL/Tools/SMT/z3_replay_rules.ML - Author: Sascha Boehme, TU Muenchen - -Custom rules for Z3 proof replay. -*) - -signature Z3_REPLAY_RULES = -sig - val apply: Proof.context -> cterm -> thm option -end; - -structure Z3_Replay_Rules: Z3_REPLAY_RULES = -struct - -structure Data = Generic_Data -( - type T = thm Net.net - val empty = Net.empty - val extend = I - val merge = Net.merge Thm.eq_thm -) - -fun maybe_instantiate ct thm = - try Thm.first_order_match (Thm.cprop_of thm, ct) - |> Option.map (fn inst => Thm.instantiate inst thm) - -fun apply ctxt ct = - let - val net = Data.get (Context.Proof ctxt) - val xthms = Net.match_term net (Thm.term_of ct) - - fun select ct = map_filter (maybe_instantiate ct) xthms - fun select' ct = - let val thm = Thm.trivial ct - in map_filter (try (fn rule => rule COMP thm)) xthms end - - in try hd (case select ct of [] => select' ct | xthms' => xthms') end - -val prep = `Thm.prop_of - -fun ins thm net = Net.insert_term Thm.eq_thm (prep thm) net handle Net.INSERT => net -fun del thm net = Net.delete_term Thm.eq_thm (prep thm) net handle Net.DELETE => net - -val add = Thm.declaration_attribute (Data.map o ins) -val del = Thm.declaration_attribute (Data.map o del) - -val name = Binding.name "z3_rule" - -val description = "declaration of Z3 proof rules" - -val _ = Theory.setup (Attrib.setup name (Attrib.add_del add del) description #> - Global_Theory.add_thms_dynamic (name, Net.content o Data.get)) - -end; diff --git a/src/test-gen/SMT/z3_replay_util.ML b/src/test-gen/SMT/z3_replay_util.ML deleted file mode 100644 index 34419ec..0000000 --- a/src/test-gen/SMT/z3_replay_util.ML +++ /dev/null @@ -1,155 +0,0 @@ -(* Title: HOL/Tools/SMT/z3_replay_util.ML - Author: Sascha Boehme, TU Muenchen - -Helper functions required for Z3 proof replay. -*) - -signature Z3_REPLAY_UTIL = -sig - (*theorem nets*) - val thm_net_of: ('a -> thm) -> 'a list -> 'a Net.net - val net_instances: (int * thm) Net.net -> cterm -> (int * thm) list - - (*proof combinators*) - val under_assumption: (thm -> thm) -> cterm -> thm - val discharge: thm -> thm -> thm - - (*a faster COMP*) - type compose_data = cterm list * (cterm -> cterm list) * thm - val precompose: (cterm -> cterm list) -> thm -> compose_data - val precompose2: (cterm -> cterm * cterm) -> thm -> compose_data - val compose: compose_data -> thm -> thm - - (*simpset*) - val add_simproc: Simplifier.simproc -> Context.generic -> Context.generic - val make_simpset: Proof.context -> thm list -> simpset -end; - -structure Z3_Replay_Util: Z3_REPLAY_UTIL = -struct - -(* theorem nets *) - -fun thm_net_of f xthms = - let fun insert xthm = Net.insert_term (K false) (Thm.prop_of (f xthm), xthm) - in fold insert xthms Net.empty end - -fun maybe_instantiate ct thm = - try Thm.first_order_match (Thm.cprop_of thm, ct) - |> Option.map (fn inst => Thm.instantiate inst thm) - -local - fun instances_from_net match f net ct = - let - val lookup = if match then Net.match_term else Net.unify_term - val xthms = lookup net (Thm.term_of ct) - fun select ct = map_filter (f (maybe_instantiate ct)) xthms - fun select' ct = - let val thm = Thm.trivial ct - in map_filter (f (try (fn rule => rule COMP thm))) xthms end - in (case select ct of [] => select' ct | xthms' => xthms') end -in - -fun net_instances net = - instances_from_net false (fn f => fn (i, thm) => Option.map (pair i) (f thm)) - net - -end - - -(* proof combinators *) - -fun under_assumption f ct = - let val ct' = SMT_Util.mk_cprop ct in Thm.implies_intr ct' (f (Thm.assume ct')) end - -fun discharge p pq = Thm.implies_elim pq p - - -(* a faster COMP *) - -type compose_data = cterm list * (cterm -> cterm list) * thm - -fun list2 (x, y) = [x, y] - -fun precompose f rule : compose_data = (f (Thm.cprem_of rule 1), f, rule) -fun precompose2 f rule : compose_data = precompose (list2 o f) rule - -fun compose (cvs, f, rule) thm = - discharge thm - (Thm.instantiate ([], map (dest_Var o Thm.term_of) cvs ~~ f (Thm.cprop_of thm)) rule) - - -(* simpset *) - -local - val antisym_le1 = mk_meta_eq @{thm order_class.antisym_conv} - val antisym_le2 = mk_meta_eq @{thm linorder_class.antisym_conv2} - val antisym_less1 = mk_meta_eq @{thm linorder_class.antisym_conv1} - val antisym_less2 = mk_meta_eq @{thm linorder_class.antisym_conv3} - - fun eq_prop t thm = HOLogic.mk_Trueprop t aconv Thm.prop_of thm - fun dest_binop ((c as Const _) $ t $ u) = (c, t, u) - | dest_binop t = raise TERM ("dest_binop", [t]) - - fun prove_antisym_le ctxt ct = - let - val (le, r, s) = dest_binop (Thm.term_of ct) - val less = Const (@{const_name less}, Term.fastype_of le) - val prems = Simplifier.prems_of ctxt - in - (case find_first (eq_prop (le $ s $ r)) prems of - NONE => - find_first (eq_prop (HOLogic.mk_not (less $ r $ s))) prems - |> Option.map (fn thm => thm RS antisym_less1) - | SOME thm => SOME (thm RS antisym_le1)) - end - handle THM _ => NONE - - fun prove_antisym_less ctxt ct = - let - val (less, r, s) = dest_binop (HOLogic.dest_not (Thm.term_of ct)) - val le = Const (@{const_name less_eq}, Term.fastype_of less) - val prems = Simplifier.prems_of ctxt - in - (case find_first (eq_prop (le $ r $ s)) prems of - NONE => - find_first (eq_prop (HOLogic.mk_not (less $ s $ r))) prems - |> Option.map (fn thm => thm RS antisym_less2) - | SOME thm => SOME (thm RS antisym_le2)) - end - handle THM _ => NONE - - val basic_simpset = - simpset_of (put_simpset HOL_ss @{context} - addsimps @{thms field_simps times_divide_eq_right times_divide_eq_left arith_special - arith_simps rel_simps array_rules z3div_def z3mod_def NO_MATCH_def} - addsimprocs [@{simproc numeral_divmod}, - Simplifier.make_simproc @{context} "fast_int_arith" - {lhss = [@{term "(m::int) < n"}, @{term "(m::int) \ n"}, @{term "(m::int) = n"}], - proc = K Lin_Arith.simproc}, - Simplifier.make_simproc @{context} "antisym_le" - {lhss = [@{term "(x::'a::order) \ y"}], - proc = K prove_antisym_le}, - Simplifier.make_simproc @{context} "antisym_less" - {lhss = [@{term "\ (x::'a::linorder) < y"}], - proc = K prove_antisym_less}]) - - structure Simpset = Generic_Data - ( - type T = simpset - val empty = basic_simpset - val extend = I - val merge = Simplifier.merge_ss - ) -in - -fun add_simproc simproc context = - Simpset.map (simpset_map (Context.proof_of context) - (fn ctxt => ctxt addsimprocs [simproc])) context - -fun make_simpset ctxt rules = - simpset_of (put_simpset (Simpset.get (Context.Proof ctxt)) ctxt addsimps rules) - -end - -end;