diff --git a/.github/workflows/nightly_detect_failure.yml b/.github/workflows/nightly_detect_failure.yml index 50c1dbf53b50e..cfbeb6032e80e 100644 --- a/.github/workflows/nightly_detect_failure.yml +++ b/.github/workflows/nightly_detect_failure.yml @@ -40,8 +40,29 @@ jobs: toolchain=$(> $GITHUB_ENV git push origin refs/heads/nightly-testing:refs/heads/nightly-testing-$version else echo "Error: The file lean-toolchain does not contain the expected pattern." exit 1 fi + + # Next, we'll update the `nightly-with-mathlib` branch at Lean. + - name: Cleanup workspace + run: | + sudo rm -rf * + # Checkout the Lean repository on 'nightly-with-mathlib' + - name: Checkout Lean repository + uses: actions/checkout@v3 + with: + repository: leanprover/lean4 + token: ${{ secrets.LEAN_PR_TESTING }} + ref: nightly-with-mathlib + # Merge the relevant nightly. + - name: Fetch tags from 'lean4-nightly', and merge relevant nightly into 'nightly-with-mathlib' + run: | + git remote add nightly https://github.com/leanprover/lean4-nightly.git + git fetch nightly --tags + # Note: old jobs may run out of order, but it is safe to merge an older `nightly-YYYY-MM-DD`. + git merge nightly-${NIGHTLY} --strategy-option ours --allow-unrelated-histories || true + git push origin diff --git a/.github/workflows/nightly_merge_master.yml b/.github/workflows/nightly_merge_master.yml index 2c602be5fa3e5..c1f8c8a18e7ac 100644 --- a/.github/workflows/nightly_merge_master.yml +++ b/.github/workflows/nightly_merge_master.yml @@ -3,9 +3,8 @@ name: Merge master to nightly on: - push: - branches: - - master + schedule: + - cron: '30 */3 * * *' # 8AM CET/11PM PT jobs: merge-to-nightly: @@ -25,9 +24,11 @@ jobs: - name: Merge master to nightly favoring nightly changes run: | git checkout nightly-testing - # If the merge goes badly, we proceed anyway via '|| true' + # If the merge goes badly, we proceed anyway via '|| true'. # CI will report failures on the 'nightly-testing' branch direct to Zulip. git merge master --strategy-option ours --no-commit --allow-unrelated-histories || true git add . - git commit -m "Merge master into nightly-testing" + # If there's nothing to do (because there are no new commits from master), + # that's okay, hence the '|| true'. + git commit -m "Merge master into nightly-testing" || true git push origin nightly-testing diff --git a/.vscode/settings.json b/.vscode/settings.json index 405a433928c74..df28d07629dd6 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -7,5 +7,8 @@ "files.insertFinalNewline": true, "files.trimFinalNewlines": true, "files.trimTrailingWhitespace": true, - "githubPullRequests.ignoredPullRequestBranches": ["master"] + "githubPullRequests.ignoredPullRequestBranches": [ + "master" + ], + "git.ignoreLimitWarning": true } diff --git a/Archive/Imo/Imo1981Q3.lean b/Archive/Imo/Imo1981Q3.lean index b9859f454b6b7..90b29784e58c4 100644 --- a/Archive/Imo/Imo1981Q3.lean +++ b/Archive/Imo/Imo1981Q3.lean @@ -4,7 +4,7 @@ Released under Apache 2.0 license as described in the file LICENSE. Authors: Kevin Lacker -/ import Mathlib.Data.Int.Lemmas -import Mathlib.Data.Nat.Fib +import Mathlib.Data.Nat.Fib.Basic import Mathlib.Tactic.Linarith import Mathlib.Tactic.LinearCombination diff --git a/Archive/Wiedijk100Theorems/SumOfPrimeReciprocalsDiverges.lean b/Archive/Wiedijk100Theorems/SumOfPrimeReciprocalsDiverges.lean index 7aa806eee5b46..ac3bc23dc7090 100644 --- a/Archive/Wiedijk100Theorems/SumOfPrimeReciprocalsDiverges.lean +++ b/Archive/Wiedijk100Theorems/SumOfPrimeReciprocalsDiverges.lean @@ -161,7 +161,7 @@ theorem card_le_two_pow {x k : ℕ} : exact ⟨p.pred, (Nat.pred_lt (Nat.Prime.ne_zero hp.1)).trans_le ((hmp p) hp), Nat.succ_pred_eq_of_pos (Nat.Prime.pos hp.1)⟩ - · simp [Nat.prod_factors m.succ_ne_zero, m.succ_sub_one] + · simp [Nat.prod_factors m.succ_ne_zero, m.add_one_sub_one] -- The number of elements of `M x k` with `e + 1` squarefree is bounded by the number of subsets -- of `[1, k]`. calc @@ -191,7 +191,7 @@ theorem card_le_two_pow_mul_sqrt {x k : ℕ} : card (M x k) ≤ 2 ^ k * Nat.sqrt obtain ⟨a, b, hab₁, hab₂⟩ := Nat.sq_mul_squarefree_of_pos' hm' obtain ⟨ham, hbm⟩ := Dvd.intro_left _ hab₁, Dvd.intro _ hab₁ refine' - ⟨a, b, ⟨⟨⟨_, fun p hp => _⟩, hab₂⟩, ⟨_, fun p hp => _⟩⟩, by simp_rw [hab₁, m.succ_sub_one]⟩ + ⟨a, b, ⟨⟨⟨_, fun p hp => _⟩, hab₂⟩, ⟨_, fun p hp => _⟩⟩, by simp_rw [hab₁, m.add_one_sub_one]⟩ · exact (Nat.succ_le_succ_iff.mp (Nat.le_of_dvd hm' ham)).trans_lt hm.1 · exact hm.2 p ⟨hp.1, hp.2.trans ham⟩ · calc diff --git a/GNUmakefile b/GNUmakefile index e455b89e6e6de..35ceeb715e071 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -14,7 +14,7 @@ test/%.run: build @if [ -s test/$*.log ]; then \ echo "Error: Test output is not empty"; \ cat test/$*.log; \ - rm -f test/$*.log \ + rm -f test/$*.log; \ exit 1; \ fi @rm -f test/$*.log diff --git a/Mathlib.lean b/Mathlib.lean index 52927ccc6a246..7895988ce2981 100644 --- a/Mathlib.lean +++ b/Mathlib.lean @@ -230,6 +230,7 @@ import Mathlib.Algebra.Homology.ExactSequence import Mathlib.Algebra.Homology.Flip import Mathlib.Algebra.Homology.Functor import Mathlib.Algebra.Homology.HomologicalComplex +import Mathlib.Algebra.Homology.HomologicalComplexLimits import Mathlib.Algebra.Homology.Homology import Mathlib.Algebra.Homology.Homotopy import Mathlib.Algebra.Homology.HomotopyCategory @@ -498,6 +499,7 @@ import Mathlib.AlgebraicGeometry.Properties import Mathlib.AlgebraicGeometry.Pullbacks import Mathlib.AlgebraicGeometry.Restrict import Mathlib.AlgebraicGeometry.Scheme +import Mathlib.AlgebraicGeometry.Sites.BigZariski import Mathlib.AlgebraicGeometry.Spec import Mathlib.AlgebraicGeometry.StructureSheaf import Mathlib.AlgebraicTopology.AlternatingFaceMapComplex @@ -678,6 +680,7 @@ import Mathlib.Analysis.Convex.Jensen import Mathlib.Analysis.Convex.Join import Mathlib.Analysis.Convex.KreinMilman import Mathlib.Analysis.Convex.Measure +import Mathlib.Analysis.Convex.Mul import Mathlib.Analysis.Convex.Normed import Mathlib.Analysis.Convex.PartitionOfUnity import Mathlib.Analysis.Convex.Quasiconvex @@ -1081,6 +1084,7 @@ import Mathlib.CategoryTheory.Limits.Presheaf import Mathlib.CategoryTheory.Limits.Shapes.BinaryProducts import Mathlib.CategoryTheory.Limits.Shapes.Biproducts import Mathlib.CategoryTheory.Limits.Shapes.CommSq +import Mathlib.CategoryTheory.Limits.Shapes.ConcreteCategory import Mathlib.CategoryTheory.Limits.Shapes.Diagonal import Mathlib.CategoryTheory.Limits.Shapes.DisjointCoproduct import Mathlib.CategoryTheory.Limits.Shapes.Equalizers @@ -1240,7 +1244,7 @@ import Mathlib.CategoryTheory.Sites.Over import Mathlib.CategoryTheory.Sites.Plus import Mathlib.CategoryTheory.Sites.Preserves import Mathlib.CategoryTheory.Sites.Pretopology -import Mathlib.CategoryTheory.Sites.Pushforward +import Mathlib.CategoryTheory.Sites.Pullback import Mathlib.CategoryTheory.Sites.RegularExtensive import Mathlib.CategoryTheory.Sites.Sheaf import Mathlib.CategoryTheory.Sites.SheafOfTypes @@ -1351,6 +1355,8 @@ import Mathlib.Combinatorics.SimpleGraph.Triangle.Basic import Mathlib.Combinatorics.Young.SemistandardTableau import Mathlib.Combinatorics.Young.YoungDiagram import Mathlib.Computability.Ackermann +import Mathlib.Computability.AkraBazzi.AkraBazzi +import Mathlib.Computability.AkraBazzi.GrowsPolynomially import Mathlib.Computability.ContextFreeGrammar import Mathlib.Computability.DFA import Mathlib.Computability.Encoding @@ -1717,7 +1723,7 @@ import Mathlib.Data.Nat.Factorial.SuperFactorial import Mathlib.Data.Nat.Factorization.Basic import Mathlib.Data.Nat.Factorization.PrimePow import Mathlib.Data.Nat.Factors -import Mathlib.Data.Nat.Fib +import Mathlib.Data.Nat.Fib.Basic import Mathlib.Data.Nat.Fib.Zeckendorf import Mathlib.Data.Nat.ForSqrt import Mathlib.Data.Nat.GCD.Basic @@ -2254,6 +2260,7 @@ import Mathlib.Lean.EnvExtension import Mathlib.Lean.Exception import Mathlib.Lean.Expr import Mathlib.Lean.Expr.Basic +import Mathlib.Lean.Expr.ExtraRecognizers import Mathlib.Lean.Expr.ReplaceRec import Mathlib.Lean.Expr.Traverse import Mathlib.Lean.IO.Process @@ -2468,6 +2475,7 @@ import Mathlib.Logic.Pairwise import Mathlib.Logic.Relation import Mathlib.Logic.Relator import Mathlib.Logic.Small.Basic +import Mathlib.Logic.Small.Defs import Mathlib.Logic.Small.Group import Mathlib.Logic.Small.List import Mathlib.Logic.Small.Module @@ -2880,6 +2888,7 @@ import Mathlib.Probability.CondCount import Mathlib.Probability.ConditionalExpectation import Mathlib.Probability.ConditionalProbability import Mathlib.Probability.Density +import Mathlib.Probability.Distributions.Exponential import Mathlib.Probability.Distributions.Gaussian import Mathlib.Probability.IdentDistrib import Mathlib.Probability.Independence.Basic @@ -3124,6 +3133,7 @@ import Mathlib.SetTheory.Cardinal.Divisibility import Mathlib.SetTheory.Cardinal.Finite import Mathlib.SetTheory.Cardinal.Ordinal import Mathlib.SetTheory.Cardinal.SchroederBernstein +import Mathlib.SetTheory.Cardinal.UnivLE import Mathlib.SetTheory.Game.Basic import Mathlib.SetTheory.Game.Birthday import Mathlib.SetTheory.Game.Domineering @@ -3247,13 +3257,13 @@ import Mathlib.Tactic.NormNum import Mathlib.Tactic.NormNum.Basic import Mathlib.Tactic.NormNum.BigOperators import Mathlib.Tactic.NormNum.Core +import Mathlib.Tactic.NormNum.DivMod import Mathlib.Tactic.NormNum.Eq import Mathlib.Tactic.NormNum.GCD import Mathlib.Tactic.NormNum.Ineq import Mathlib.Tactic.NormNum.Inv import Mathlib.Tactic.NormNum.IsCoprime import Mathlib.Tactic.NormNum.LegendreSymbol -import Mathlib.Tactic.NormNum.Mod import Mathlib.Tactic.NormNum.NatFib import Mathlib.Tactic.NormNum.NatSqrt import Mathlib.Tactic.NormNum.OfScientific @@ -3269,6 +3279,7 @@ import Mathlib.Tactic.Polyrith import Mathlib.Tactic.Positivity import Mathlib.Tactic.Positivity.Basic import Mathlib.Tactic.Positivity.Core +import Mathlib.Tactic.ProdAssoc import Mathlib.Tactic.ProjectionNotation import Mathlib.Tactic.Propose import Mathlib.Tactic.ProxyType @@ -3501,6 +3512,7 @@ import Mathlib.Topology.Homotopy.HSpaces import Mathlib.Topology.Homotopy.HomotopyGroup import Mathlib.Topology.Homotopy.Path import Mathlib.Topology.Homotopy.Product +import Mathlib.Topology.IndicatorConstPointwise import Mathlib.Topology.Inseparable import Mathlib.Topology.Instances.AddCircle import Mathlib.Topology.Instances.Complex diff --git a/Mathlib/Algebra/AddTorsor.lean b/Mathlib/Algebra/AddTorsor.lean index 55924cd14fe1e..c9deee694d97c 100644 --- a/Mathlib/Algebra/AddTorsor.lean +++ b/Mathlib/Algebra/AddTorsor.lean @@ -49,14 +49,14 @@ by the `+ᵥ` operation and a corresponding subtraction given by the space. -/ class AddTorsor (G : outParam (Type*)) (P : Type*) [outParam <| AddGroup G] extends AddAction G P, VSub G P where - [Nonempty : Nonempty P] + [nonempty : Nonempty P] /-- Torsor subtraction and addition with the same element cancels out. -/ vsub_vadd' : ∀ p1 p2 : P, (p1 -ᵥ p2 : G) +ᵥ p2 = p1 /-- Torsor addition and subtraction with the same element cancels out. -/ vadd_vsub' : ∀ (g : G) (p : P), g +ᵥ p -ᵥ p = g #align add_torsor AddTorsor -attribute [instance 100] AddTorsor.Nonempty -- porting note: removers `nolint instance_priority` +attribute [instance 100] AddTorsor.nonempty -- porting note: removers `nolint instance_priority` --Porting note: removed --attribute [nolint dangerous_instance] AddTorsor.toVSub @@ -287,7 +287,7 @@ instance instAddTorsor : AddTorsor (G × G') (P × P') where zero_vadd _ := Prod.ext (zero_vadd _ _) (zero_vadd _ _) add_vadd _ _ _ := Prod.ext (add_vadd _ _ _) (add_vadd _ _ _) vsub p₁ p₂ := (p₁.1 -ᵥ p₂.1, p₁.2 -ᵥ p₂.2) - Nonempty := Prod.Nonempty + nonempty := Prod.Nonempty vsub_vadd' _ _ := Prod.ext (vsub_vadd _ _) (vsub_vadd _ _) vadd_vsub' _ _ := Prod.ext (vadd_vsub _ _) (vadd_vsub _ _) @@ -342,12 +342,11 @@ variable {I : Type u} {fg : I → Type v} [∀ i, AddGroup (fg i)] {fp : I → T open AddAction AddTorsor /-- A product of `AddTorsor`s is an `AddTorsor`. -/ -instance instAddTorsor [T : ∀ i, AddTorsor (fg i) (fp i)] : AddTorsor (∀ i, fg i) (∀ i, fp i) where +instance instAddTorsor [∀ i, AddTorsor (fg i) (fp i)] : AddTorsor (∀ i, fg i) (∀ i, fp i) where vadd g p i := g i +ᵥ p i zero_vadd p := funext fun i => zero_vadd (fg i) (p i) add_vadd g₁ g₂ p := funext fun i => add_vadd (g₁ i) (g₂ i) (p i) vsub p₁ p₂ i := p₁ i -ᵥ p₂ i - Nonempty := ⟨fun i => Classical.choice (T i).Nonempty⟩ vsub_vadd' p₁ p₂ := funext fun i => vsub_vadd (p₁ i) (p₂ i) vadd_vsub' g p := funext fun i => vadd_vsub (g i) (p i) diff --git a/Mathlib/Algebra/BigOperators/Basic.lean b/Mathlib/Algebra/BigOperators/Basic.lean index bd32cb17f490b..ff5478ad2cda1 100644 --- a/Mathlib/Algebra/BigOperators/Basic.lean +++ b/Mathlib/Algebra/BigOperators/Basic.lean @@ -2005,13 +2005,13 @@ namespace Fintype open Finset -/-- `Fintype.prod_bijective` is a variant of `Finset.prod_bij` that accepts `Function.bijective`. +/-- `Fintype.prod_bijective` is a variant of `Finset.prod_bij` that accepts `Function.Bijective`. -See `Function.bijective.prod_comp` for a version without `h`. -/ -@[to_additive "`Fintype.sum_equiv` is a variant of `Finset.sum_bij` that accepts -`Function.bijective`. +See `Function.Bijective.prod_comp` for a version without `h`. -/ +@[to_additive "`Fintype.sum_bijective` is a variant of `Finset.sum_bij` that accepts +`Function.Bijective`. -See `Function.bijective.sum_comp` for a version without `h`. "] +See `Function.Bijective.sum_comp` for a version without `h`. "] theorem prod_bijective {α β M : Type*} [Fintype α] [Fintype β] [CommMonoid M] (e : α → β) (he : Function.Bijective e) (f : α → M) (g : β → M) (h : ∀ x, f x = g (e x)) : ∏ x : α, f x = ∏ x : β, g x := @@ -2021,6 +2021,9 @@ theorem prod_bijective {α β M : Type*} [Fintype α] [Fintype β] [CommMonoid M #align fintype.prod_bijective Fintype.prod_bijective #align fintype.sum_bijective Fintype.sum_bijective +alias _root_.Function.Bijective.finset_prod := prod_bijective +attribute [to_additive] Function.Bijective.finset_prod + /-- `Fintype.prod_equiv` is a specialization of `Finset.prod_bij` that automatically fills in most arguments. @@ -2032,7 +2035,7 @@ automatically fills in most arguments. See `Equiv.sum_comp` for a version without `h`."] theorem prod_equiv {α β M : Type*} [Fintype α] [Fintype β] [CommMonoid M] (e : α ≃ β) (f : α → M) (g : β → M) (h : ∀ x, f x = g (e x)) : ∏ x : α, f x = ∏ x : β, g x := - prod_bijective e e.bijective f g h + e.bijective.finset_prod _ f g h #align fintype.prod_equiv Fintype.prod_equiv #align fintype.sum_equiv Fintype.sum_equiv diff --git a/Mathlib/Algebra/BigOperators/Order.lean b/Mathlib/Algebra/BigOperators/Order.lean index 10be1fa609b8f..613db9a2f56ca 100644 --- a/Mathlib/Algebra/BigOperators/Order.lean +++ b/Mathlib/Algebra/BigOperators/Order.lean @@ -548,6 +548,18 @@ theorem prod_eq_prod_iff_of_le {f g : ι → M} (h : ∀ i ∈ s, f i ≤ g i) : #align finset.prod_eq_prod_iff_of_le Finset.prod_eq_prod_iff_of_le #align finset.sum_eq_sum_iff_of_le Finset.sum_eq_sum_iff_of_le +variable [DecidableEq ι] + +@[to_additive] lemma prod_sdiff_le_prod_sdiff : + ∏ i in s \ t, f i ≤ ∏ i in t \ s, f i ↔ ∏ i in s, f i ≤ ∏ i in t, f i := by + rw [←mul_le_mul_iff_right, ←prod_union (disjoint_sdiff_inter _ _), sdiff_union_inter, ←prod_union, + inter_comm, sdiff_union_inter]; simpa only [inter_comm] using disjoint_sdiff_inter t s + +@[to_additive] lemma prod_sdiff_lt_prod_sdiff : + ∏ i in s \ t, f i < ∏ i in t \ s, f i ↔ ∏ i in s, f i < ∏ i in t, f i := by + rw [←mul_lt_mul_iff_right, ←prod_union (disjoint_sdiff_inter _ _), sdiff_union_inter, ←prod_union, + inter_comm, sdiff_union_inter]; simpa only [inter_comm] using disjoint_sdiff_inter t s + end OrderedCancelCommMonoid section LinearOrderedCancelCommMonoid diff --git a/Mathlib/Algebra/Category/ModuleCat/FilteredColimits.lean b/Mathlib/Algebra/Category/ModuleCat/FilteredColimits.lean index 1cabd926dfb7a..347fbf84c8882 100644 --- a/Mathlib/Algebra/Category/ModuleCat/FilteredColimits.lean +++ b/Mathlib/Algebra/Category/ModuleCat/FilteredColimits.lean @@ -118,7 +118,7 @@ instance colimitMulAction : MulAction R (M F) where instance colimitSMulWithZero : SMulWithZero R (M F) := { colimitMulAction F with smul_zero := fun r => by - erw [colimit_zero_eq _ (IsFiltered.Nonempty.some : J), colimit_smul_mk_eq, smul_zero] + erw [colimit_zero_eq _ (IsFiltered.nonempty.some : J), colimit_smul_mk_eq, smul_zero] rfl zero_smul := fun x => by refine' Quot.inductionOn x _; clear x; intro x; cases' x with j x diff --git a/Mathlib/Algebra/Category/MonCat/FilteredColimits.lean b/Mathlib/Algebra/Category/MonCat/FilteredColimits.lean index 1c864dd81b517..3aaf514b27ae3 100644 --- a/Mathlib/Algebra/Category/MonCat/FilteredColimits.lean +++ b/Mathlib/Algebra/Category/MonCat/FilteredColimits.lean @@ -80,7 +80,7 @@ variable [IsFiltered J] "As `J` is nonempty, we can pick an arbitrary object `j₀ : J`. We use this object to define the \"zero\" in the colimit as the equivalence class of `⟨j₀, 0 : F.obj j₀⟩`."] noncomputable instance colimitOne : - One (M.{v, u} F) where one := M.mk F ⟨IsFiltered.Nonempty.some,1⟩ + One (M.{v, u} F) where one := M.mk F ⟨IsFiltered.nonempty.some,1⟩ #align Mon.filtered_colimits.colimit_has_one MonCat.FilteredColimits.colimitOne #align AddMon.filtered_colimits.colimit_has_zero AddMonCat.FilteredColimits.colimitZero @@ -301,7 +301,7 @@ The only thing left to see is that it is a monoid homomorphism. def colimitDesc (t : Cocone F) : colimit.{v, u} F ⟶ t.pt where toFun := (Types.colimitCoconeIsColimit (F ⋙ forget MonCat)).desc ((forget MonCat).mapCocone t) map_one' := by - rw [colimit_one_eq F IsFiltered.Nonempty.some] + rw [colimit_one_eq F IsFiltered.nonempty.some] exact MonoidHom.map_one _ map_mul' x y := by refine Quot.induction_on₂ x y ?_ diff --git a/Mathlib/Algebra/ContinuedFractions/Computation/Approximations.lean b/Mathlib/Algebra/ContinuedFractions/Computation/Approximations.lean index dd2adc3f9bb42..f8a78987127d1 100644 --- a/Mathlib/Algebra/ContinuedFractions/Computation/Approximations.lean +++ b/Mathlib/Algebra/ContinuedFractions/Computation/Approximations.lean @@ -4,7 +4,7 @@ Released under Apache 2.0 license as described in the file LICENSE. Authors: Kevin Kappelmann -/ import Mathlib.Algebra.ContinuedFractions.Computation.CorrectnessTerminating -import Mathlib.Data.Nat.Fib +import Mathlib.Data.Nat.Fib.Basic import Mathlib.Tactic.Monotonicity import Mathlib.Tactic.SolveByElim diff --git a/Mathlib/Algebra/CovariantAndContravariant.lean b/Mathlib/Algebra/CovariantAndContravariant.lean index e6e66200dcee3..dbf7f5923f66d 100644 --- a/Mathlib/Algebra/CovariantAndContravariant.lean +++ b/Mathlib/Algebra/CovariantAndContravariant.lean @@ -31,7 +31,7 @@ relation (typically `(≤)` or `(<)`), these are the only two typeclasses that I The general approach is to formulate the lemma that you are interested in and prove it, with the `Ordered[...]` typeclass of your liking. After that, you convert the single typeclass, say `[OrderedCancelMonoid M]`, into three typeclasses, e.g. -`[LeftCancelSemigroup M] [PartialOrder M] [CovariantClass M M (Function.swap (*)) (≤)]` +`[CancelMonoid M] [PartialOrder M] [CovariantClass M M (Function.swap (*)) (≤)]` and have a go at seeing if the proof still works! Note that it is possible to combine several `Co(ntra)variantClass` assumptions together. @@ -362,27 +362,27 @@ theorem contravariant_le_of_contravariant_eq_and_lt [PartialOrder N] then the following four instances (actually eight) can be removed in favor of the above two. -/ @[to_additive] -instance LeftCancelSemigroup.covariant_mul_lt_of_covariant_mul_le [LeftCancelSemigroup N] +instance IsLeftCancelMul.covariant_mul_lt_of_covariant_mul_le [Mul N] [IsLeftCancelMul N] [PartialOrder N] [CovariantClass N N (· * ·) (· ≤ ·)] : CovariantClass N N (· * ·) (· < ·) where elim a _ _ bc := (CovariantClass.elim a bc.le).lt_of_ne ((mul_ne_mul_right a).mpr bc.ne) @[to_additive] -instance RightCancelSemigroup.covariant_swap_mul_lt_of_covariant_swap_mul_le - [RightCancelSemigroup N] [PartialOrder N] [CovariantClass N N (swap (· * ·)) (· ≤ ·)] : +instance IsRightCancelMul.covariant_swap_mul_lt_of_covariant_swap_mul_le + [Mul N] [IsRightCancelMul N] [PartialOrder N] [CovariantClass N N (swap (· * ·)) (· ≤ ·)] : CovariantClass N N (swap (· * ·)) (· < ·) where elim a _ _ bc := (CovariantClass.elim a bc.le).lt_of_ne ((mul_ne_mul_left a).mpr bc.ne) @[to_additive] -instance LeftCancelSemigroup.contravariant_mul_le_of_contravariant_mul_lt [LeftCancelSemigroup N] +instance IsLeftCancelMul.contravariant_mul_le_of_contravariant_mul_lt [Mul N] [IsLeftCancelMul N] [PartialOrder N] [ContravariantClass N N (· * ·) (· < ·)] : ContravariantClass N N (· * ·) (· ≤ ·) where elim := (contravariant_le_iff_contravariant_lt_and_eq N N _).mpr ⟨ContravariantClass.elim, fun _ ↦ mul_left_cancel⟩ @[to_additive] -instance RightCancelSemigroup.contravariant_swap_mul_le_of_contravariant_swap_mul_lt - [RightCancelSemigroup N] [PartialOrder N] [ContravariantClass N N (swap (· * ·)) (· < ·)] : +instance IsRightCancelMul.contravariant_swap_mul_le_of_contravariant_swap_mul_lt + [Mul N] [IsRightCancelMul N] [PartialOrder N] [ContravariantClass N N (swap (· * ·)) (· < ·)] : ContravariantClass N N (swap (· * ·)) (· ≤ ·) where elim := (contravariant_le_iff_contravariant_lt_and_eq N N _).mpr ⟨ContravariantClass.elim, fun _ ↦ mul_right_cancel⟩ diff --git a/Mathlib/Algebra/DirectSum/LinearMap.lean b/Mathlib/Algebra/DirectSum/LinearMap.lean index ec72c3363c828..605d2df10025c 100644 --- a/Mathlib/Algebra/DirectSum/LinearMap.lean +++ b/Mathlib/Algebra/DirectSum/LinearMap.lean @@ -4,6 +4,7 @@ Released under Apache 2.0 license as described in the file LICENSE. Authors: Oliver Nash -/ import Mathlib.Algebra.DirectSum.Module +import Mathlib.LinearAlgebra.Eigenspace.Basic import Mathlib.LinearAlgebra.Trace /-! @@ -16,6 +17,10 @@ domain and codomain. open Set BigOperators DirectSum +attribute [local instance] + isNoetherian_of_isNoetherianRing_of_finite + Module.free_of_finite_type_torsion_free' + variable {ι R M : Type*} [CommRing R] [AddCommGroup M] [Module R M] {N : ι → Submodule R M} [DecidableEq ι] (h : IsInternal N) @@ -55,4 +60,33 @@ lemma trace_eq_sum_trace_restrict' (hN : {i | N i ≠ ⊥}.Finite) rw [← Finset.sum_coe_sort, trace_eq_sum_trace_restrict (isInternal_ne_bot_iff.mpr h) _] exact Fintype.sum_equiv hN.subtypeEquivToFinset _ _ (fun i ↦ rfl) +/-- If `f` and `g` are commuting endomorphisms of a finite, free `R`-module `M`, such that `f` +is triangularizable, then to prove that the trace of `g ∘ f` vanishes, it is sufficient to prove +that the trace of `g` vanishes on each generalized eigenspace of `f`. -/ +lemma trace_comp_eq_zero_of_commute_of_trace_restrict_eq_zero + [IsDomain R] [IsPrincipalIdealRing R] [Module.Free R M] [Module.Finite R M] + {f g : Module.End R M} + (h_comm : Commute f g) + (hf : ⨆ μ, ⨆ k, f.generalizedEigenspace μ k = ⊤) + (hg : ∀ μ, trace R _ (g.restrict (f.mapsTo_iSup_generalizedEigenspace_of_comm h_comm μ)) = 0) : + trace R _ (g ∘ₗ f) = 0 := by + have hfg : ∀ μ, + MapsTo (g ∘ₗ f) ↑(⨆ k, f.generalizedEigenspace μ k) ↑(⨆ k, f.generalizedEigenspace μ k) := + fun μ ↦ (f.mapsTo_iSup_generalizedEigenspace_of_comm h_comm μ).comp + (f.mapsTo_iSup_generalizedEigenspace_of_comm rfl μ) + suffices ∀ μ, trace R _ ((g ∘ₗ f).restrict (hfg μ)) = 0 by + classical + have hds := DirectSum.isInternal_submodule_of_independent_of_iSup_eq_top + f.independent_generalizedEigenspace hf + have h_fin : {μ | ⨆ k, f.generalizedEigenspace μ k ≠ ⊥}.Finite := + CompleteLattice.WellFounded.finite_ne_bot_of_independent + (isNoetherian_iff_wellFounded.mp inferInstance) f.independent_generalizedEigenspace + simp [trace_eq_sum_trace_restrict' hds h_fin hfg, this] + intro μ + replace h_comm : Commute (g.restrict (f.mapsTo_iSup_generalizedEigenspace_of_comm h_comm μ)) + (f.restrict (f.mapsTo_iSup_generalizedEigenspace_of_comm rfl μ)) := + restrict_commute h_comm.symm _ _ + rw [restrict_comp, trace_comp_eq_mul_of_commute_of_isNilpotent μ h_comm + (f.isNilpotent_restrict_iSup_sub_algebraMap μ), hg, mul_zero] + end LinearMap diff --git a/Mathlib/Algebra/GeomSum.lean b/Mathlib/Algebra/GeomSum.lean index be03adf644329..99d2b0e0e9b84 100644 --- a/Mathlib/Algebra/GeomSum.lean +++ b/Mathlib/Algebra/GeomSum.lean @@ -584,3 +584,21 @@ theorem geom_sum_neg_iff [LinearOrderedRing α] (hn : n ≠ 0) : #align geom_sum_neg_iff geom_sum_neg_iff end Order + +variable {m n : ℕ} {s : Finset ℕ} + +/-- If all the elements of a finset of naturals are less than `n`, then the sum of their powers of +`m ≥ 2` is less than `m ^ n`. -/ +lemma Nat.geomSum_eq (hm : 2 ≤ m) (n : ℕ) : + ∑ k in range n, m ^ k = (m ^ n - 1) / (m - 1) := by + refine (Nat.div_eq_of_eq_mul_left (tsub_pos_iff_lt.2 hm) $ tsub_eq_of_eq_add ?_).symm + simpa only [tsub_add_cancel_of_le $ one_le_two.trans hm, eq_comm] using geom_sum_mul_add (m - 1) n + +/-- If all the elements of a finset of naturals are less than `n`, then the sum of their powers of +`m ≥ 2` is less than `m ^ n`. -/ +lemma Nat.geomSum_lt (hm : 2 ≤ m) (hs : ∀ k ∈ s, k < n) : ∑ k in s, m ^ k < m ^ n := + calc + ∑ k in s, m ^ k ≤ ∑ k in range n, m ^ k := sum_le_sum_of_subset fun k hk ↦ mem_range.2 $ hs _ hk + _ = (m ^ n - 1) / (m - 1) := Nat.geomSum_eq hm _ + _ ≤ m ^ n - 1 := Nat.div_le_self _ _ + _ < m ^ n := tsub_lt_self (by positivity) zero_lt_one diff --git a/Mathlib/Algebra/Group/Embedding.lean b/Mathlib/Algebra/Group/Embedding.lean index e25181185c952..201064e794f6f 100644 --- a/Mathlib/Algebra/Group/Embedding.lean +++ b/Mathlib/Algebra/Group/Embedding.lean @@ -13,17 +13,16 @@ import Mathlib.Logic.Embedding.Basic -/ -variable {R : Type*} +variable {G : Type*} section LeftOrRightCancelSemigroup -/-- The embedding of a left cancellative semigroup into itself -by left multiplication by a fixed element. - -/ +/-- If left-multiplication by any element is cancellative, left-multiplication by `g` is an +embedding. -/ @[to_additive (attr := simps) - "The embedding of a left cancellative additive semigroup into itself - by left translation by a fixed element." ] -def mulLeftEmbedding {G : Type*} [LeftCancelSemigroup G] (g : G) : G ↪ G where + "If left-addition by any element is cancellative, left-addition by `g` is an + embedding."] +def mulLeftEmbedding [Mul G] [IsLeftCancelMul G] (g : G) : G ↪ G where toFun h := g * h inj' := mul_right_injective g #align mul_left_embedding mulLeftEmbedding @@ -31,13 +30,12 @@ def mulLeftEmbedding {G : Type*} [LeftCancelSemigroup G] (g : G) : G ↪ G where #align add_left_embedding_apply addLeftEmbedding_apply #align mul_left_embedding_apply mulLeftEmbedding_apply -/-- The embedding of a right cancellative semigroup into itself -by right multiplication by a fixed element. - -/ +/-- If right-multiplication by any element is cancellative, right-multiplication by `g` is an +embedding. -/ @[to_additive (attr := simps) - "The embedding of a right cancellative additive semigroup into itself - by right translation by a fixed element."] -def mulRightEmbedding {G : Type*} [RightCancelSemigroup G] (g : G) : G ↪ G where + "If right-addition by any element is cancellative, right-addition by `g` is an + embedding."] +def mulRightEmbedding [Mul G] [IsRightCancelMul G] (g : G) : G ↪ G where toFun h := h * g inj' := mul_left_injective g #align mul_right_embedding mulRightEmbedding @@ -46,7 +44,7 @@ def mulRightEmbedding {G : Type*} [RightCancelSemigroup G] (g : G) : G ↪ G whe #align add_right_embedding_apply addRightEmbedding_apply @[to_additive] -theorem mulLeftEmbedding_eq_mulRightEmbedding {G : Type*} [CancelCommMonoid G] (g : G) : +theorem mulLeftEmbedding_eq_mulRightEmbedding [CommSemigroup G] [IsCancelMul G] (g : G) : mulLeftEmbedding g = mulRightEmbedding g := by ext exact mul_comm _ _ diff --git a/Mathlib/Algebra/Group/WithOne/Defs.lean b/Mathlib/Algebra/Group/WithOne/Defs.lean index e7f02628529f8..41cc2435e9b3d 100644 --- a/Mathlib/Algebra/Group/WithOne/Defs.lean +++ b/Mathlib/Algebra/Group/WithOne/Defs.lean @@ -309,7 +309,7 @@ instance [One α] [Pow α ℤ] : Pow (WithZero α) ℤ := | some x, n => ↑(x ^ n)⟩ @[simp, norm_cast] -theorem coe_zpow [DivInvMonoid α] {a : α} (n : ℤ) : ↑(a ^ n : α) = ((↑a : WithZero α) ^ n) := +theorem coe_zpow [DivInvMonoid α] {a : α} (n : ℤ) : ↑(a ^ n) = (↑a : WithZero α) ^ n := rfl #align with_zero.coe_zpow WithZero.coe_zpow diff --git a/Mathlib/Algebra/GroupRingAction/Basic.lean b/Mathlib/Algebra/GroupRingAction/Basic.lean index b1c6531e78541..3853de5e9d573 100644 --- a/Mathlib/Algebra/GroupRingAction/Basic.lean +++ b/Mathlib/Algebra/GroupRingAction/Basic.lean @@ -66,6 +66,29 @@ theorem toRingHom_injective [MulSemiringAction M R] [FaithfulSMul M R] : eq_of_smul_eq_smul fun r => RingHom.ext_iff.1 h r #align to_ring_hom_injective toRingHom_injective +/-- The tautological action by `R →+* R` on `R`. + +This generalizes `Function.End.applyMulAction`. -/ +instance RingHom.applyMulSemiringAction [Semiring R] : MulSemiringAction (R →+* R) R where + smul := (· <| ·) + smul_one := map_one + smul_mul := map_mul + smul_zero := RingHom.map_zero + smul_add := RingHom.map_add + one_smul _ := rfl + mul_smul _ _ _ := rfl +#align ring_hom.apply_distrib_mul_action RingHom.applyMulSemiringActionₓ + +@[simp] +protected theorem RingHom.smul_def [Semiring R] (f : R →+* R) (a : R) : f • a = f a := + rfl +#align ring_hom.smul_def RingHom.smul_def + +/-- `RingHom.applyMulSemiringAction` is faithful. -/ +instance RingHom.applyFaithfulSMul [Semiring R] : FaithfulSMul (R →+* R) R := + ⟨fun {_ _} h => RingHom.ext h⟩ +#align ring_hom.apply_has_faithful_smul RingHom.applyFaithfulSMul + /-- Each element of the group defines a semiring isomorphism. -/ @[simps!] def MulSemiringAction.toRingEquiv [MulSemiringAction G R] (x : G) : R ≃+* R := diff --git a/Mathlib/Algebra/Homology/HomologicalComplexLimits.lean b/Mathlib/Algebra/Homology/HomologicalComplexLimits.lean new file mode 100644 index 0000000000000..0624a358c1294 --- /dev/null +++ b/Mathlib/Algebra/Homology/HomologicalComplexLimits.lean @@ -0,0 +1,196 @@ +/- +Copyright (c) 2023 Joël Riou. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Joël Riou +-/ +import Mathlib.Algebra.Homology.HomologicalComplex +import Mathlib.CategoryTheory.Limits.Shapes.FiniteLimits +import Mathlib.CategoryTheory.Limits.Preserves.Finite + +/-! +# Limits and colimits in the category of homological complexes + +In this file, it is shown that if a category `C` has (co)limits of shape `J`, +then it is also the case of the categories `HomologicalComplex C c`, +and the evaluation functors `eval C c i : HomologicalComplex C c ⥤ C` +commute to these. + +-/ + +open CategoryTheory Category Limits + +namespace HomologicalComplex + +variable {C ι J : Type*} [Category C] [Category J] {c : ComplexShape ι} [HasZeroMorphisms C] + +section + +variable (F : J ⥤ HomologicalComplex C c) + +/-- A cone in `HomologicalComplex C c` is limit if the induced cones obtained +by applying `eval C c i : HomologicalComplex C c ⥤ C` for all `i` are limit. -/ +def isLimitOfEval (s : Cone F) + (hs : ∀ (i : ι), IsLimit ((eval C c i).mapCone s)) : IsLimit s where + lift t := + { f := fun i => (hs i).lift ((eval C c i).mapCone t) + comm' := fun i i' _ => by + apply IsLimit.hom_ext (hs i') + intro j + have eq := fun k => (hs k).fac ((eval C c k).mapCone t) + simp only [Functor.mapCone_π_app, eval_map] at eq + simp only [Functor.mapCone_π_app, eval_map, assoc] + rw [eq i', ← Hom.comm, reassoc_of% (eq i), Hom.comm] } + fac t j := by + ext i + apply (hs i).fac + uniq t m hm := by + ext i + apply (hs i).uniq ((eval C c i).mapCone t) + intro j + dsimp + simp only [← comp_f, hm] + +variable [∀ (n : ι), HasLimit (F ⋙ eval C c n)] + +/-- A cone for a functor `F : J ⥤ HomologicalComplex C c` which is given in degree `n` by +the limit `F ⋙ eval C c n`. -/ +@[simps] +noncomputable def coneOfHasLimitEval : Cone F where + pt := + { X := fun n => limit (F ⋙ eval C c n) + d := fun n m => limMap { app := fun j => (F.obj j).d n m } + shape := fun {n m} h => by + ext j + rw [limMap_π] + dsimp + rw [(F.obj j).shape _ _ h, comp_zero, zero_comp] } + π := + { app := fun j => { f := fun n => limit.π _ j } + naturality := fun i j φ => by + ext n + dsimp + erw [limit.w, id_comp] } + +/-- The cone `coneOfHasLimitEval F` is limit. -/ +noncomputable def isLimitConeOfHasLimitEval : IsLimit (coneOfHasLimitEval F) := + isLimitOfEval _ _ (fun _ => limit.isLimit _) + +instance : HasLimit F := ⟨⟨⟨_, isLimitConeOfHasLimitEval F⟩⟩⟩ + +noncomputable instance (n : ι) : PreservesLimit F (eval C c n) := + preservesLimitOfPreservesLimitCone (isLimitConeOfHasLimitEval F) (limit.isLimit _) + +end + +instance [HasLimitsOfShape J C] : HasLimitsOfShape J (HomologicalComplex C c) := ⟨inferInstance⟩ + +noncomputable instance [HasLimitsOfShape J C] (n : ι) : + PreservesLimitsOfShape J (eval C c n) := ⟨inferInstance⟩ + +instance [HasFiniteLimits C] : HasFiniteLimits (HomologicalComplex C c) := + ⟨fun _ _ => inferInstance⟩ + +noncomputable instance [HasFiniteLimits C] (n : ι) : + PreservesFiniteLimits (eval C c n) := ⟨fun _ _ _ => inferInstance⟩ + +instance [HasFiniteLimits C] {K L : HomologicalComplex C c} (φ : K ⟶ L) [Mono φ] (n : ι) : + Mono (φ.f n) := by + change Mono ((HomologicalComplex.eval C c n).map φ) + infer_instance + +section + +variable (F : J ⥤ HomologicalComplex C c) + +/-- A cocone in `HomologicalComplex C c` is colimit if the induced cocones obtained +by applying `eval C c i : HomologicalComplex C c ⥤ C` for all `i` are colimit. -/ +def isColimitOfEval (s : Cocone F) + (hs : ∀ (i : ι), IsColimit ((eval C c i).mapCocone s)) : IsColimit s where + desc t := + { f := fun i => (hs i).desc ((eval C c i).mapCocone t) + comm' := fun i i' _ => by + apply IsColimit.hom_ext (hs i) + intro j + have eq := fun k => (hs k).fac ((eval C c k).mapCocone t) + simp only [Functor.mapCocone_ι_app, eval_map] at eq + simp only [Functor.mapCocone_ι_app, eval_map, assoc] + rw [reassoc_of% (eq i), Hom.comm_assoc, eq i', Hom.comm] } + fac t j := by + ext i + apply (hs i).fac + uniq t m hm := by + ext i + apply (hs i).uniq ((eval C c i).mapCocone t) + intro j + dsimp + simp only [← comp_f, hm] + + +variable [∀ (n : ι), HasColimit (F ⋙ HomologicalComplex.eval C c n)] + +/-- A cocone for a functor `F : J ⥤ HomologicalComplex C c` which is given in degree `n` by +the colimit of `F ⋙ eval C c n`. -/ +@[simps] +noncomputable def coconeOfHasColimitEval : Cocone F where + pt := + { X := fun n => colimit (F ⋙ eval C c n) + d := fun n m => colimMap { app := fun j => (F.obj j).d n m } + shape := fun {n m} h => by + ext j + rw [ι_colimMap] + dsimp + rw [(F.obj j).shape _ _ h, zero_comp, comp_zero] } + ι := + { app := fun j => { f := fun n => colimit.ι (F ⋙ eval C c n) j } + naturality := fun i j φ => by + ext n + dsimp + erw [colimit.w (F ⋙ eval C c n) φ, comp_id] } + +/-- The cocone `coconeOfHasLimitEval F` is colimit. -/ +noncomputable def isColimitCoconeOfHasColimitEval : IsColimit (coconeOfHasColimitEval F) := + isColimitOfEval _ _ (fun _ => colimit.isColimit _) + +instance : HasColimit F := ⟨⟨⟨_, isColimitCoconeOfHasColimitEval F⟩⟩⟩ + +noncomputable instance (n : ι) : PreservesColimit F (eval C c n) := + preservesColimitOfPreservesColimitCocone (isColimitCoconeOfHasColimitEval F) + (colimit.isColimit _) + +end + +instance [HasColimitsOfShape J C] : HasColimitsOfShape J (HomologicalComplex C c) := ⟨inferInstance⟩ + +noncomputable instance [HasColimitsOfShape J C] (n : ι) : + PreservesColimitsOfShape J (eval C c n) := ⟨inferInstance⟩ + +instance [HasFiniteColimits C] : HasFiniteColimits (HomologicalComplex C c) := + ⟨fun _ _ => inferInstance⟩ + +noncomputable instance [HasFiniteColimits C] (n : ι) : + PreservesFiniteColimits (eval C c n) := ⟨fun _ _ _ => inferInstance⟩ + +instance [HasFiniteColimits C] {K L : HomologicalComplex C c} (φ : K ⟶ L) [Epi φ] (n : ι) : + Epi (φ.f n) := by + change Epi ((HomologicalComplex.eval C c n).map φ) + infer_instance + +/-- A functor `D ⥤ HomologicalComplex C c` preserves limits of shape `J` +if for any `i`, `G ⋙ eval C c i` does. -/ +def preservesLimitsOfShapeOfEval {D : Type*} [Category D] + (G : D ⥤ HomologicalComplex C c) + (_ : ∀ (i : ι), PreservesLimitsOfShape J (G ⋙ eval C c i)) : + PreservesLimitsOfShape J G := + ⟨fun {_} => ⟨fun hs => isLimitOfEval _ _ + (fun i => isLimitOfPreserves (G ⋙ eval C c i) hs)⟩⟩ + +/-- A functor `D ⥤ HomologicalComplex C c` preserves colimits of shape `J` +if for any `i`, `G ⋙ eval C c i` does. -/ +def preservesColimitsOfShapeOfEval {D : Type*} [Category D] + (G : D ⥤ HomologicalComplex C c) + (_ : ∀ (i : ι), PreservesColimitsOfShape J (G ⋙ eval C c i)) : + PreservesColimitsOfShape J G := + ⟨fun {_} => ⟨fun hs => isColimitOfEval _ _ + (fun i => isColimitOfPreserves (G ⋙ eval C c i) hs)⟩⟩ + +end HomologicalComplex diff --git a/Mathlib/Algebra/Homology/Homotopy.lean b/Mathlib/Algebra/Homology/Homotopy.lean index a0a7d0faf0a25..7d9e0e0f5f8fc 100644 --- a/Mathlib/Algebra/Homology/Homotopy.lean +++ b/Mathlib/Algebra/Homology/Homotopy.lean @@ -3,7 +3,7 @@ Copyright (c) 2021 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison -/ -import Mathlib.Algebra.Homology.Additive +import Mathlib.Algebra.Homology.ShortComplex.HomologicalComplex import Mathlib.Tactic.Abel #align_import algebra.homology.homotopy from "leanprover-community/mathlib"@"618ea3d5c99240cd7000d8376924906a148bf9ff" @@ -21,7 +21,7 @@ open Classical noncomputable section -open CategoryTheory CategoryTheory.Limits HomologicalComplex +open CategoryTheory Category Limits HomologicalComplex variable {ι : Type*} @@ -65,7 +65,7 @@ theorem dNext_comp_left (f : C ⟶ D) (g : ∀ i j, D.X i ⟶ E.X j) (i : ι) : @[simp 1100] theorem dNext_comp_right (f : ∀ i j, C.X i ⟶ D.X j) (g : D ⟶ E) (i : ι) : (dNext i fun i j => f i j ≫ g.f j) = dNext i f ≫ g.f i := - (Category.assoc _ _ _).symm + (assoc _ _ _).symm #align d_next_comp_right dNext_comp_right /-- The composition `f j (c.prev j) ≫ D.d (c.prev j) j`. -/ @@ -94,14 +94,14 @@ theorem prevD_eq (f : ∀ i j, C.X i ⟶ D.X j) {j j' : ι} (w : c.Rel j' j) : @[simp 1100] theorem prevD_comp_left (f : C ⟶ D) (g : ∀ i j, D.X i ⟶ E.X j) (j : ι) : (prevD j fun i j => f.f i ≫ g i j) = f.f j ≫ prevD j g := - Category.assoc _ _ _ + assoc _ _ _ #align prev_d_comp_left prevD_comp_left @[simp 1100] theorem prevD_comp_right (f : ∀ i j, C.X i ⟶ D.X j) (g : D ⟶ E) (j : ι) : (prevD j fun i j => f i j ≫ g.f j) = prevD j f ≫ g.f j := by dsimp [prevD] - simp only [Category.assoc, g.comm] + simp only [assoc, g.comm] #align prev_d_comp_right prevD_comp_right theorem dNext_nat (C D : ChainComplex V ℕ) (i : ℕ) (f : ∀ i j, C.X i ⟶ D.X j) : @@ -224,13 +224,13 @@ def comp {C₁ C₂ C₃ : HomologicalComplex V c} {f₁ g₁ : C₁ ⟶ C₂} { /-- a variant of `Homotopy.compRight` useful for dealing with homotopy equivalences. -/ @[simps!] def compRightId {f : C ⟶ C} (h : Homotopy f (𝟙 C)) (g : C ⟶ D) : Homotopy (f ≫ g) g := - (h.compRight g).trans (ofEq <| Category.id_comp _) + (h.compRight g).trans (ofEq <| id_comp _) #align homotopy.comp_right_id Homotopy.compRightId /-- a variant of `Homotopy.compLeft` useful for dealing with homotopy equivalences. -/ @[simps!] def compLeftId {f : D ⟶ D} (h : Homotopy f (𝟙 D)) (g : C ⟶ D) : Homotopy (g ≫ f) g := - (h.compLeft g).trans (ofEq <| Category.comp_id _) + (h.compLeft g).trans (ofEq <| comp_id _) #align homotopy.comp_left_id Homotopy.compLeftId /-! @@ -249,12 +249,12 @@ def nullHomotopicMap (hom : ∀ i j, C.X i ⟶ D.X j) : C ⟶ D where f i := dNext i hom + prevD i hom comm' i j hij := by have eq1 : prevD i hom ≫ D.d i j = 0 := by - simp only [prevD, AddMonoidHom.mk'_apply, Category.assoc, d_comp_d, comp_zero] + simp only [prevD, AddMonoidHom.mk'_apply, assoc, d_comp_d, comp_zero] have eq2 : C.d i j ≫ dNext j hom = 0 := by simp only [dNext, AddMonoidHom.mk'_apply, d_comp_d_assoc, zero_comp] dsimp only rw [dNext_eq hom hij, prevD_eq hom hij, Preadditive.comp_add, Preadditive.add_comp, eq1, eq2, - add_zero, zero_add, Category.assoc] + add_zero, zero_add, assoc] #align homotopy.null_homotopic_map Homotopy.nullHomotopicMap /-- Variant of `nullHomotopicMap` where the input consists only of the @@ -269,7 +269,7 @@ theorem nullHomotopicMap_comp (hom : ∀ i j, C.X i ⟶ D.X j) (g : D ⟶ E) : nullHomotopicMap hom ≫ g = nullHomotopicMap fun i j => hom i j ≫ g.f j := by ext n dsimp [nullHomotopicMap, fromNext, toPrev, AddMonoidHom.mk'_apply] - simp only [Preadditive.add_comp, Category.assoc, g.comm] + simp only [Preadditive.add_comp, assoc, g.comm] #align homotopy.null_homotopic_map_comp Homotopy.nullHomotopicMap_comp /-- Compatibility of `nullHomotopicMap'` with the postcomposition by a morphism @@ -291,7 +291,7 @@ theorem comp_nullHomotopicMap (f : C ⟶ D) (hom : ∀ i j, D.X i ⟶ E.X j) : f ≫ nullHomotopicMap hom = nullHomotopicMap fun i j => f.f i ≫ hom i j := by ext n dsimp [nullHomotopicMap, fromNext, toPrev, AddMonoidHom.mk'_apply] - simp only [Preadditive.comp_add, Category.assoc, f.comm_assoc] + simp only [Preadditive.comp_add, assoc, f.comm_assoc] #align homotopy.comp_null_homotopic_map Homotopy.comp_nullHomotopicMap /-- Compatibility of `nullHomotopicMap'` with the precomposition by a morphism @@ -570,9 +570,9 @@ def mkInductive : Homotopy e 0 where simp only [ChainComplex.next_nat_succ, dite_true] rw [mkInductiveAux₃ e zero comm_zero one comm_one succ] dsimp [xNextIso] - rw [Category.id_comp] + rw [id_comp] · dsimp [toPrev] - erw [dif_pos, Category.comp_id] + erw [dif_pos, comp_id] simp only [ChainComplex.prev] #align homotopy.mk_inductive Homotopy.mkInductive @@ -709,9 +709,9 @@ def mkCoinductive : Homotopy e 0 where simp only [CochainComplex.prev_nat_succ, dite_true] rw [mkCoinductiveAux₃ e zero comm_zero one comm_one succ] dsimp [xPrevIso] - rw [Category.comp_id] + rw [comp_id] · dsimp [fromNext] - erw [dif_pos, Category.id_comp] + erw [dif_pos, id_comp] simp only [CochainComplex.next] #align homotopy.mk_coinductive Homotopy.mkCoinductive @@ -734,6 +734,12 @@ structure HomotopyEquiv (C D : HomologicalComplex V c) where homotopyInvHomId : Homotopy (inv ≫ hom) (𝟙 D) #align homotopy_equiv HomotopyEquiv +variable (V c) in +/-- The morphism property on `HomologicalComplex V c` given by homotopy equivalences. -/ +def HomologicalComplex.homotopyEquivalences : + MorphismProperty (HomologicalComplex V c) := + fun X Y f => ∃ (e : HomotopyEquiv X Y), e.hom = f + namespace HomotopyEquiv /-- Any complex is homotopy equivalent to itself. -/ @@ -795,7 +801,7 @@ theorem homology'_map_eq_of_homotopy (h : Homotopy f g) (i : ι) : simp only [CategoryTheory.Subobject.factorThru_add_sub_factorThru_right] erw [Subobject.factorThru_ofLE (D.boundaries_le_cycles' i)] · simp - · rw [prevD_eq_toPrev_dTo, ← Category.assoc] + · rw [prevD_eq_toPrev_dTo, ← assoc] apply imageSubobject_factors_comp_self #align homology_map_eq_of_homotopy homology'_map_eq_of_homotopy @@ -845,3 +851,65 @@ def Functor.mapHomotopyEquiv (F : V ⥤ W) [F.Additive] (h : HomotopyEquiv C D) #align category_theory.functor.map_homotopy_equiv CategoryTheory.Functor.mapHomotopyEquiv end CategoryTheory + +section + +open HomologicalComplex CategoryTheory + +variable {C : Type*} [Category C] [Preadditive C] {ι : Type _} {c : ComplexShape ι} + [DecidableRel c.Rel] {K L : HomologicalComplex C c} {f g : K ⟶ L} + +/-- A homotopy between morphisms of homological complexes `K ⟶ L` induces a homotopy +between morphisms of short complexes `K.sc i ⟶ L.sc i`. -/ +noncomputable def Homotopy.toShortComplex (ho : Homotopy f g) (i : ι) : + ShortComplex.Homotopy ((shortComplexFunctor C c i).map f) + ((shortComplexFunctor C c i).map g) where + h₀ := + if c.Rel (c.prev i) i + then ho.hom _ (c.prev (c.prev i)) ≫ L.d _ _ + else f.f _ - g.f _ - K.d _ i ≫ ho.hom i _ + h₁ := ho.hom _ _ + h₂ := ho.hom _ _ + h₃ := + if c.Rel i (c.next i) + then K.d _ _ ≫ ho.hom (c.next (c.next i)) _ + else f.f _ - g.f _ - ho.hom _ i ≫ L.d _ _ + h₀_f := by + split_ifs with h + · dsimp + simp only [assoc, d_comp_d, comp_zero] + · dsimp + rw [L.shape _ _ h, comp_zero] + g_h₃ := by + split_ifs with h + · dsimp + simp + · dsimp + rw [K.shape _ _ h, zero_comp] + comm₁ := by + dsimp + split_ifs with h + · rw [ho.comm (c.prev i)] + dsimp [dFrom, dTo, fromNext, toPrev] + rw [congr_arg (fun j => d K (c.prev i) j ≫ ho.hom j (c.prev i)) (c.next_eq' h)] + · abel + comm₂ := ho.comm i + comm₃ := by + dsimp + split_ifs with h + · rw [ho.comm (c.next i)] + dsimp [dFrom, dTo, fromNext, toPrev] + rw [congr_arg (fun j => ho.hom (c.next i) j ≫ L.d j (c.next i)) (c.prev_eq' h)] + · abel + +lemma Homotopy.homologyMap_eq (ho : Homotopy f g) (i : ι) [K.HasHomology i] [L.HasHomology i] : + homologyMap f i = homologyMap g i := + ShortComplex.Homotopy.homologyMap_congr (ho.toShortComplex i) + +/-- The isomorphism in homology induced by an homotopy equivalence. -/ +noncomputable def HomotopyEquiv.toHomologyIso (h : HomotopyEquiv K L) (i : ι) + [K.HasHomology i] [L.HasHomology i] : K.homology i ≅ L.homology i where + hom := homologyMap h.hom i + inv := homologyMap h.inv i + hom_inv_id := by rw [← homologyMap_comp, h.homotopyHomInvId.homologyMap_eq, homologyMap_id] + inv_hom_id := by rw [← homologyMap_comp, h.homotopyInvHomId.homologyMap_eq, homologyMap_id] diff --git a/Mathlib/Algebra/Homology/HomotopyCategory.lean b/Mathlib/Algebra/Homology/HomotopyCategory.lean index 03c4a76e79ac8..6d2ff807180c3 100644 --- a/Mathlib/Algebra/Homology/HomotopyCategory.lean +++ b/Mathlib/Algebra/Homology/HomotopyCategory.lean @@ -4,7 +4,8 @@ Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison -/ import Mathlib.Algebra.Homology.Homotopy -import Mathlib.CategoryTheory.Quotient +import Mathlib.Algebra.Homology.Additive +import Mathlib.CategoryTheory.Quotient.Preadditive #align_import algebra.homology.homotopy_category from "leanprover-community/mathlib"@"13ff898b0eee75d3cc75d1c06a491720eaaf911d" @@ -59,11 +60,21 @@ instance : Category (HomotopyCategory V v) := by -- TODO the homotopy_category is preadditive namespace HomotopyCategory +instance : Preadditive (HomotopyCategory V c) := Quotient.preadditive _ (by + rintro _ _ _ _ _ _ ⟨h⟩ ⟨h'⟩ + exact ⟨Homotopy.add h h'⟩) + /-- The quotient functor from complexes to the homotopy category. -/ def quotient : HomologicalComplex V c ⥤ HomotopyCategory V c := CategoryTheory.Quotient.functor _ #align homotopy_category.quotient HomotopyCategory.quotient +instance : Full (quotient V c) := Quotient.fullFunctor _ + +instance : EssSurj (quotient V c) := Quotient.essSurj_functor _ + +instance : (quotient V c).Additive where + open ZeroObject -- TODO upgrade this to `HasZeroObject`, presumably for any `quotient`. @@ -141,6 +152,9 @@ def homotopyEquivOfIso {C D : HomologicalComplex V c} #align homotopy_category.homotopy_equiv_of_iso HomotopyCategory.homotopyEquivOfIso variable (V c) + +section + variable [HasEqualizers V] [HasImages V] [HasImageMaps V] [HasCokernels V] /-- The `i`-th homology, as a functor from the homotopy category. -/ @@ -173,6 +187,34 @@ theorem homology'Functor_map_factors (i : ι) {C D : HomologicalComplex V c} (f (CategoryTheory.Quotient.lift_map_functor_map _ (_root_.homology'Functor V c i) _ f).symm #align homotopy_category.homology_functor_map_factors HomotopyCategory.homology'Functor_map_factors +end + +section + +variable [CategoryWithHomology V] + +/-- The `i`-th homology, as a functor from the homotopy category. -/ +noncomputable def homologyFunctor (i : ι) : HomotopyCategory V c ⥤ V := + CategoryTheory.Quotient.lift _ (HomologicalComplex.homologyFunctor V c i) (by + rintro K L f g ⟨h⟩ + exact h.homologyMap_eq i) + +/-- The homology functor on the homotopy category is induced by +the homology functor on homological complexes. -/ +noncomputable def homologyFunctorFactors (i : ι) : + quotient V c ⋙ homologyFunctor V c i ≅ + HomologicalComplex.homologyFunctor V c i := + Quotient.lift.isLift _ _ _ + +-- this is to prevent any abuse of defeq +attribute [irreducible] homologyFunctor homologyFunctorFactors + +instance (i : ι) : (homologyFunctor V c i).Additive := by + have := Functor.additive_of_iso (homologyFunctorFactors V c i).symm + exact Functor.additive_of_full_essSurj_comp (quotient V c) _ + +end + end HomotopyCategory namespace CategoryTheory diff --git a/Mathlib/Algebra/Homology/ModuleCat.lean b/Mathlib/Algebra/Homology/ModuleCat.lean index 57f3a47153e6a..10384281baeb6 100644 --- a/Mathlib/Algebra/Homology/ModuleCat.lean +++ b/Mathlib/Algebra/Homology/ModuleCat.lean @@ -6,7 +6,7 @@ Authors: Scott Morrison import Mathlib.Algebra.Homology.Homotopy import Mathlib.Algebra.Category.ModuleCat.Abelian import Mathlib.Algebra.Category.ModuleCat.Subobject -import Mathlib.CategoryTheory.Limits.ConcreteCategory +import Mathlib.CategoryTheory.Limits.Shapes.ConcreteCategory #align_import algebra.homology.Module from "leanprover-community/mathlib"@"70fd9563a21e7b963887c9360bd29b2393e6225a" @@ -42,7 +42,7 @@ theorem homology'_ext {L M N K : ModuleCat R} {f : L ⟶ M} {g : M ⟶ N} (w : f h (cokernel.π (imageToKernel _ _ w) (toKernelSubobject x)) = k (cokernel.π (imageToKernel _ _ w) (toKernelSubobject x))) : h = k := by - refine' cokernel_funext fun n => _ + refine' Concrete.cokernel_funext fun n => _ -- porting note: as `equiv_rw` was not ported, it was replaced by `Equiv.surjective` -- Gosh it would be nice if `equiv_rw` could directly use an isomorphism, or an enriched `≃`. obtain ⟨n, rfl⟩ := (kernelSubobjectIso g ≪≫ diff --git a/Mathlib/Algebra/Homology/QuasiIso.lean b/Mathlib/Algebra/Homology/QuasiIso.lean index 41579d5b9819b..6cff863fd12cb 100644 --- a/Mathlib/Algebra/Homology/QuasiIso.lean +++ b/Mathlib/Algebra/Homology/QuasiIso.lean @@ -220,6 +220,8 @@ end open HomologicalComplex +section + variable {ι : Type*} {C : Type u} [Category.{v} C] [HasZeroMorphisms C] {c : ComplexShape ι} {K L M K' L' : HomologicalComplex C c} @@ -417,3 +419,43 @@ lemma quasiIso_of_arrow_mk_iso (φ : K ⟶ L) (φ' : K' ⟶ L') (e : Arrow.mk φ [∀ i, K'.HasHomology i] [∀ i, L'.HasHomology i] [hφ : QuasiIso φ] : QuasiIso φ' := by simpa only [← quasiIso_iff_of_arrow_mk_iso φ φ' e] + +namespace HomologicalComplex + +variable (C c) + +/-- The morphism property on `HomologicalComplex C c` given by quasi-isomorphisms. -/ +def qis [CategoryWithHomology C] : + MorphismProperty (HomologicalComplex C c) := fun _ _ f => QuasiIso f + +variable {C c} + +@[simp] +lemma qis_iff [CategoryWithHomology C] (f : K ⟶ L) : qis C c f ↔ QuasiIso f := by rfl + +end HomologicalComplex + +end + +section + +variable {ι : Type*} {C : Type u} [Category.{v} C] [Preadditive C] + {c : ComplexShape ι} {K L : HomologicalComplex C c} + +section + +variable (e : HomotopyEquiv K L) [∀ i, K.HasHomology i] [∀ i, L.HasHomology i] + +instance : QuasiIso e.hom where + quasiIsoAt n := by + classical + rw [quasiIsoAt_iff_isIso_homologyMap] + exact IsIso.of_iso (e.toHomologyIso n) + +instance : QuasiIso e.inv := (inferInstance : QuasiIso e.symm.hom) + +lemma homotopyEquivalences_subset_qis [CategoryWithHomology C] : + homotopyEquivalences C c ⊆ qis C c := by + rintro K L _ ⟨e, rfl⟩ + simp only [qis_iff] + infer_instance diff --git a/Mathlib/Algebra/Homology/ShortComplex/HomologicalComplex.lean b/Mathlib/Algebra/Homology/ShortComplex/HomologicalComplex.lean index ad373ec78b2a5..980896f5c29e9 100644 --- a/Mathlib/Algebra/Homology/ShortComplex/HomologicalComplex.lean +++ b/Mathlib/Algebra/Homology/ShortComplex/HomologicalComplex.lean @@ -3,8 +3,9 @@ Copyright (c) 2023 Joël Riou. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Joël Riou -/ -import Mathlib.Algebra.Homology.HomologicalComplex +import Mathlib.Algebra.Homology.Additive import Mathlib.Algebra.Homology.ShortComplex.Exact +import Mathlib.Algebra.Homology.ShortComplex.Preadditive import Mathlib.Tactic.Linarith /-! @@ -601,3 +602,32 @@ lemma isoHomologyπ₀_inv_naturality [L.HasHomology 0] : HomologicalComplex.isoHomologyπ_hom_inv_id_assoc] end CochainComplex + +namespace HomologicalComplex + +variable {C ι : Type*} [Category C] [Preadditive C] {c : ComplexShape ι} + {K L : HomologicalComplex C c} {f g : K ⟶ L} + +variable (φ ψ : K ⟶ L) (i : ι) [K.HasHomology i] [L.HasHomology i] + +@[simp] +lemma homologyMap_neg : homologyMap (-φ) i = -homologyMap φ i := by + dsimp [homologyMap] + rw [← ShortComplex.homologyMap_neg] + rfl + +@[simp] +lemma homologyMap_add : homologyMap (φ + ψ) i = homologyMap φ i + homologyMap ψ i := by + dsimp [homologyMap] + rw [← ShortComplex.homologyMap_add] + rfl + +@[simp] +lemma homologyMap_sub : homologyMap (φ - ψ) i = homologyMap φ i - homologyMap ψ i := by + dsimp [homologyMap] + rw [← ShortComplex.homologyMap_sub] + rfl + +instance [CategoryWithHomology C] : (homologyFunctor C c i).Additive where + +end HomologicalComplex diff --git a/Mathlib/Algebra/Lie/Abelian.lean b/Mathlib/Algebra/Lie/Abelian.lean index 322b06f661cc2..ea25f01255453 100644 --- a/Mathlib/Algebra/Lie/Abelian.lean +++ b/Mathlib/Algebra/Lie/Abelian.lean @@ -284,12 +284,21 @@ theorem isLieAbelian_iff_center_eq_top : IsLieAbelian L ↔ center R L = ⊤ := end LieAlgebra -variable {R L} in -lemma LieModule.commute_toEndomorphism_of_mem_center_left - {x : L} (hx : x ∈ LieAlgebra.center R L) (y : L) : +namespace LieModule + +variable {R L} +variable {x : L} (hx : x ∈ LieAlgebra.center R L) (y : L) + +lemma commute_toEndomorphism_of_mem_center_left : Commute (toEndomorphism R L M x) (toEndomorphism R L M y) := by rw [Commute.symm_iff, commute_iff_lie_eq, ← LieHom.map_lie, hx y, LieHom.map_zero] +lemma commute_toEndomorphism_of_mem_center_right : + Commute (toEndomorphism R L M y) (toEndomorphism R L M x) := + (LieModule.commute_toEndomorphism_of_mem_center_left M hx y).symm + +end LieModule + end Center section IdealOperations diff --git a/Mathlib/Algebra/Lie/BaseChange.lean b/Mathlib/Algebra/Lie/BaseChange.lean index 16e154ff8164b..f7d73e38f5afb 100644 --- a/Mathlib/Algebra/Lie/BaseChange.lean +++ b/Mathlib/Algebra/Lie/BaseChange.lean @@ -6,6 +6,7 @@ Authors: Oliver Nash import Mathlib.Algebra.Algebra.RestrictScalars import Mathlib.Algebra.Lie.TensorProduct import Mathlib.LinearAlgebra.TensorProduct.Tower +import Mathlib.RingTheory.TensorProduct #align_import algebra.lie.base_change from "leanprover-community/mathlib"@"9264b15ee696b7ca83f13c8ad67c83d6eb70b730" @@ -144,3 +145,111 @@ instance lieAlgebra [CommRing R] [Algebra R A] : LieAlgebra R (RestrictScalars R end RestrictScalars end LieAlgebra + +section ExtendScalars + +variable [CommRing R] [LieRing L] [LieAlgebra R L] + [AddCommGroup M] [Module R M] [LieRingModule L M] [LieModule R L M] + [CommRing A] [Algebra R A] + +@[simp] +lemma LieModule.toEndomorphism_baseChange (x : L) : + toEndomorphism A (A ⊗[R] L) (A ⊗[R] M) (1 ⊗ₜ x) = (toEndomorphism R L M x).baseChange A := by + ext; simp + +namespace LieSubmodule + +variable (N : LieSubmodule R L M) + +open LieModule + +variable {R L M} in +/-- If `A` is an `R`-algebra, any Lie submodule of a Lie module `M` with coefficients in `R` may be +pushed forward to a Lie submodule of `A ⊗ M` with coefficients in `A`. + +This "base change" operation is also known as "extension of scalars". -/ +def baseChange : LieSubmodule A (A ⊗[R] L) (A ⊗[R] M) := + { (N : Submodule R M).baseChange A with + lie_mem := by + intro x m hm + simp only [AddSubsemigroup.mem_carrier, AddSubmonoid.mem_toSubsemigroup, + Submodule.mem_toAddSubmonoid] at hm ⊢ + obtain ⟨c, rfl⟩ := (Finsupp.mem_span_iff_total _ _ _).mp hm + refine x.induction_on (by simp) (fun a y ↦ ?_) (fun y z hy hz ↦ ?_) + · change toEndomorphism A (A ⊗[R] L) (A ⊗[R] M) _ _ ∈ _ + simp_rw [Finsupp.total_apply, Finsupp.sum, map_sum, map_smul, toEndomorphism_apply_apply] + suffices ∀ n : (N : Submodule R M).map (TensorProduct.mk R A M 1), + ⁅a ⊗ₜ[R] y, (n : A ⊗[R] M)⁆ ∈ (N : Submodule R M).baseChange A by + exact Submodule.sum_mem _ fun n _ ↦ Submodule.smul_mem _ _ (this n) + rintro ⟨-, ⟨n : M, hn : n ∈ N, rfl⟩⟩ + exact Submodule.tmul_mem_baseChange_of_mem _ (N.lie_mem hn) + · rw [add_lie] + exact ((N : Submodule R M).baseChange A).add_mem hy hz } + +@[simp] +lemma coe_baseChange : + (N.baseChange A : Submodule A (A ⊗[R] M)) = (N : Submodule R M).baseChange A := + rfl + +variable {N} + +variable {R A L M} in +lemma tmul_mem_baseChange_of_mem (a : A) {m : M} (hm : m ∈ N) : + a ⊗ₜ[R] m ∈ N.baseChange A := + (N : Submodule R M).tmul_mem_baseChange_of_mem a hm + +lemma mem_baseChange_iff {m : A ⊗[R] M} : + m ∈ N.baseChange A ↔ + m ∈ Submodule.span A ((N : Submodule R M).map (TensorProduct.mk R A M 1)) := + Iff.rfl + +@[simp] +lemma baseChange_bot : (⊥ : LieSubmodule R L M).baseChange A = ⊥ := by + simp only [baseChange, bot_coeSubmodule, Submodule.baseChange_bot, + Submodule.bot_toAddSubmonoid] + rfl + +@[simp] +lemma baseChange_top : (⊤ : LieSubmodule R L M).baseChange A = ⊤ := by + simp only [baseChange, top_coeSubmodule, Submodule.baseChange_top, + Submodule.bot_toAddSubmonoid] + rfl + +lemma lie_baseChange {I : LieIdeal R L} {N : LieSubmodule R L M} : + ⁅I, N⁆.baseChange A = ⁅I.baseChange A, N.baseChange A⁆ := by + set s : Set (A ⊗[R] M) := { m | ∃ x ∈ I, ∃ n ∈ N, 1 ⊗ₜ ⁅x, n⁆ = m} + have : (TensorProduct.mk R A M 1) '' {m | ∃ x ∈ I, ∃ n ∈ N, ⁅x, n⁆ = m} = s := by ext; simp + rw [← coe_toSubmodule_eq_iff, coe_baseChange, lieIdeal_oper_eq_linear_span', + Submodule.baseChange_span, this, lieIdeal_oper_eq_linear_span'] + refine le_antisymm (Submodule.span_mono ?_) (Submodule.span_le.mpr ?_) + · rintro - ⟨x, hx, m, hm, rfl⟩ + exact ⟨1 ⊗ₜ x, tmul_mem_baseChange_of_mem 1 hx, + 1 ⊗ₜ m, tmul_mem_baseChange_of_mem 1 hm, by simp⟩ + · rintro - ⟨x, hx, m, hm, rfl⟩ + revert m + apply Submodule.span_induction + (p := fun x' ↦ ∀ m' ∈ N.baseChange A, ⁅x', m'⁆ ∈ Submodule.span A s) hx + · rintro _ ⟨y : L, hy : y ∈ I, rfl⟩ m hm + apply Submodule.span_induction (p := fun m' ↦ ⁅(1 : A) ⊗ₜ[R] y, m'⁆ ∈ Submodule.span A s) hm + · rintro - ⟨m', hm' : m' ∈ N, rfl⟩ + rw [TensorProduct.mk_apply, LieAlgebra.ExtendScalars.bracket_tmul, mul_one] + apply Submodule.subset_span + exact ⟨y, hy, m', hm', rfl⟩ + · simp + · intro u v hu hv + rw [lie_add] + exact Submodule.add_mem _ hu hv + · intro a u hu + rw [lie_smul] + exact Submodule.smul_mem _ a hu + · simp + · intro x y hx hy m' hm' + rw [add_lie] + exact Submodule.add_mem _ (hx _ hm') (hy _ hm') + · intro a x hx m' hm' + rw [smul_lie] + exact Submodule.smul_mem _ a (hx _ hm') + +end LieSubmodule + +end ExtendScalars diff --git a/Mathlib/Algebra/Lie/Killing.lean b/Mathlib/Algebra/Lie/Killing.lean index c65ae490a231b..57e5312f970bb 100644 --- a/Mathlib/Algebra/Lie/Killing.lean +++ b/Mathlib/Algebra/Lie/Killing.lean @@ -7,6 +7,7 @@ import Mathlib.Algebra.DirectSum.LinearMap import Mathlib.Algebra.Lie.Nilpotent import Mathlib.Algebra.Lie.Semisimple import Mathlib.Algebra.Lie.Weights.Cartan +import Mathlib.FieldTheory.IsAlgClosed.AlgebraicClosure import Mathlib.LinearAlgebra.PID import Mathlib.LinearAlgebra.Trace @@ -36,8 +37,8 @@ We define the trace / Killing form in this file and prove some basic properties. a Lie algebra is non-singular, it remains non-singular when restricted to a Cartan subalgebra. * `LieAlgebra.IsKilling.isSemisimple`: if a Lie algebra has non-singular Killing form then it is semisimple. - * `LieAlgebra.IsKilling.isLieAbelian_of_isCartanSubalgebra`: if the Killing form of a Lie algebra - is non-singular, then its Cartan subalgebras are Abelian. + * `LieAlgebra.IsKilling.instIsLieAbelian_of_isCartanSubalgebra`: if the Killing form of a Lie + algebra is non-singular, then its Cartan subalgebras are Abelian. ## TODO @@ -184,10 +185,11 @@ lemma trace_toEndomorphism_eq_zero_of_mem_lcs · simp [hu, hv] · simp [hu] -variable [LieAlgebra.IsNilpotent R L] [IsTriangularizable R L M] - [IsDomain R] [IsPrincipalIdealRing R] +open TensorProduct -lemma traceForm_eq_sum_weightSpaceOf (z : L) : +variable [LieAlgebra.IsNilpotent R L] [IsDomain R] [IsPrincipalIdealRing R] + +lemma traceForm_eq_sum_weightSpaceOf [IsTriangularizable R L M] (z : L) : traceForm R L M = ∑ χ in (finite_weightSpaceOf_ne_bot R L M z).toFinset, traceForm R L (weightSpaceOf M χ z) := by ext x y @@ -208,20 +210,49 @@ lemma traceForm_eq_sum_weightSpaceOf (z : L) : -- In characteristic zero a stronger result holds (no `⊓ LieAlgebra.center K L`) TODO prove this! lemma lowerCentralSeries_one_inf_center_le_ker_traceForm : lowerCentralSeries R L L 1 ⊓ LieAlgebra.center R L ≤ LinearMap.ker (traceForm R L M) := by + /- Sketch of proof (due to Zassenhaus): + + Let `z ∈ lowerCentralSeries R L L 1 ⊓ LieAlgebra.center R L` and `x : L`. We must show that + `trace (φ x ∘ φ z) = 0` where `φ z : End R M` indicates the action of `z` on `M` (and likewise + for `φ x`). + + Because `z` belongs to the indicated intersection, it has two key properties: + (a) the trace of the action of `z` vanishes on any Lie module of `L` + (see `LieModule.trace_toEndomorphism_eq_zero_of_mem_lcs`), + (b) `z` commutes with all elements of `L`. + + If `φ x` were triangularizable, we could write `M` as a direct sum of generalized eigenspaces of + `φ x`. Because `L` is nilpotent these are all Lie submodules, thus Lie modules in their own right, + and thus by (a) above we learn that `trace (φ z) = 0` restricted to each generalized eigenspace. + Because `z` commutes with `x`, this forces `trace (φ x ∘ φ z) = 0` on each generalized eigenspace, + and so by summing the traces on each generalized eigenspace we learn the total trace is zero, as + required (see `LinearMap.trace_comp_eq_zero_of_commute_of_trace_restrict_eq_zero`). + + To cater for the fact that `φ x` may not be triangularizable, we first extend the scalars from `R` + to `AlgebraicClosure (FractionRing R)` and argue using the action of `A ⊗ L` on `A ⊗ M`. -/ rintro z ⟨hz : z ∈ lowerCentralSeries R L L 1, hzc : z ∈ LieAlgebra.center R L⟩ ext x - suffices ∀ χ : R, traceForm R L (weightSpaceOf M χ x) z x = 0 by - simp [traceForm_eq_sum_weightSpaceOf R L M x, this] - intro χ - replace hz : LinearMap.trace R _ (toEndomorphism R L (weightSpaceOf M χ x) z) = 0 := - trace_toEndomorphism_eq_zero_of_mem_lcs R L _ (le_refl _) hz - let f := toEndomorphism R L (weightSpaceOf M χ x) z - let g := toEndomorphism R L (weightSpaceOf M χ x) x - have h_comm : Commute f g := commute_toEndomorphism_of_mem_center_left _ hzc x - have hg : _root_.IsNilpotent (g - algebraMap R _ χ) := - (toEndomorphism R L M x).isNilpotent_restrict_iSup_sub_algebraMap χ - rw [traceForm_apply_apply, LinearMap.trace_comp_eq_mul_of_commute_of_isNilpotent χ h_comm hg, hz, - mul_zero] + rw [traceForm_apply_apply, LinearMap.zero_apply] + let A := AlgebraicClosure (FractionRing R) + suffices algebraMap R A (trace R _ ((φ z).comp (φ x))) = 0 by + have _i : NoZeroSMulDivisors R A := NoZeroSMulDivisors.trans R (FractionRing R) A + rw [← map_zero (algebraMap R A)] at this + exact NoZeroSMulDivisors.algebraMap_injective R A this + rw [← LinearMap.trace_baseChange, LinearMap.baseChange_comp, ← toEndomorphism_baseChange, + ← toEndomorphism_baseChange] + replace hz : 1 ⊗ₜ z ∈ lowerCentralSeries A (A ⊗[R] L) (A ⊗[R] L) 1 := by + simp only [lowerCentralSeries_succ, lowerCentralSeries_zero] at hz ⊢ + rw [← LieSubmodule.baseChange_top, ← LieSubmodule.lie_baseChange] + exact Submodule.tmul_mem_baseChange_of_mem 1 hz + replace hzc : 1 ⊗ₜ[R] z ∈ LieAlgebra.center A (A ⊗[R] L) := by + simp only [mem_maxTrivSubmodule] at hzc ⊢ + intro y + exact y.induction_on rfl (fun a u ↦ by simp [hzc u]) (fun u v hu hv ↦ by simp [hu, hv]) + apply LinearMap.trace_comp_eq_zero_of_commute_of_trace_restrict_eq_zero + · exact IsTriangularizable.iSup_eq_top (1 ⊗ₜ[R] x) + · exact fun μ ↦ trace_toEndomorphism_eq_zero_of_mem_lcs A (A ⊗[R] L) + (weightSpaceOf (A ⊗[R] M) μ (1 ⊗ₜ x)) (le_refl 1) hz + · exact commute_toEndomorphism_of_mem_center_right (A ⊗[R] M) hzc (1 ⊗ₜ x) /-- A nilpotent Lie algebra with a representation whose trace form is non-singular is Abelian. -/ lemma isLieAbelian_of_ker_traceForm_eq_bot (h : LinearMap.ker (traceForm R L M) = ⊥) : @@ -386,7 +417,7 @@ instance isSemisimple [IsDomain R] [IsPrincipalIdealRing R] : IsSemisimple R L : instance instIsLieAbelian_of_isCartanSubalgebra [IsDomain R] [IsPrincipalIdealRing R] [IsArtinian R L] - (H : LieSubalgebra R L) [H.IsCartanSubalgebra] [LieModule.IsTriangularizable R H L] : + (H : LieSubalgebra R L) [H.IsCartanSubalgebra] : IsLieAbelian H := LieModule.isLieAbelian_of_ker_traceForm_eq_bot R H L <| ker_restrictBilinear_of_isCartanSubalgebra_eq_bot R L H diff --git a/Mathlib/Algebra/Lie/Nilpotent.lean b/Mathlib/Algebra/Lie/Nilpotent.lean index 968f967fb737c..e778405f8bd04 100644 --- a/Mathlib/Algebra/Lie/Nilpotent.lean +++ b/Mathlib/Algebra/Lie/Nilpotent.lean @@ -3,6 +3,7 @@ Copyright (c) 2021 Oliver Nash. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Oliver Nash -/ +import Mathlib.Algebra.Lie.BaseChange import Mathlib.Algebra.Lie.Solvable import Mathlib.Algebra.Lie.Quotient import Mathlib.Algebra.Lie.Normalizer @@ -30,7 +31,6 @@ carries a natural concept of nilpotency. We define these here via the lower cent lie algebra, lower central series, nilpotent -/ - universe u v w w₁ w₂ section NilpotentModules @@ -856,3 +856,25 @@ theorem _root_.LieAlgebra.isNilpotent_ad_of_isNilpotent {L : LieSubalgebra R A} #align lie_algebra.is_nilpotent_ad_of_is_nilpotent LieAlgebra.isNilpotent_ad_of_isNilpotent end OfAssociative + +section ExtendScalars + +open LieModule TensorProduct + +variable (R A L M : Type*) [CommRing R] [LieRing L] [LieAlgebra R L] + [AddCommGroup M] [Module R M] [LieRingModule L M] [LieModule R L M] + [CommRing A] [Algebra R A] + +@[simp] +lemma LieSubmodule.lowerCentralSeries_tensor_eq_baseChange (k : ℕ) : + lowerCentralSeries A (A ⊗[R] L) (A ⊗[R] M) k = + (lowerCentralSeries R L M k).baseChange A := by + induction' k with k ih; simp + simp only [lowerCentralSeries_succ, ih, ← baseChange_top, lie_baseChange] + +instance LieModule.instIsNilpotentTensor [IsNilpotent R L M] : + IsNilpotent A (A ⊗[R] L) (A ⊗[R] M) := by + obtain ⟨k, hk⟩ := inferInstanceAs (IsNilpotent R L M) + exact ⟨k, by simp [hk]⟩ + +end ExtendScalars diff --git a/Mathlib/Algebra/Module/Basic.lean b/Mathlib/Algebra/Module/Basic.lean index 1dd0e67e51ec4..91f19844a5a7d 100644 --- a/Mathlib/Algebra/Module/Basic.lean +++ b/Mathlib/Algebra/Module/Basic.lean @@ -358,27 +358,6 @@ def RingHom.toModule [Semiring R] [Semiring S] (f : R →+* S) : Module R S := Module.compHom S f #align ring_hom.to_module RingHom.toModule -/-- The tautological action by `R →+* R` on `R`. - -This generalizes `Function.End.applyMulAction`. -/ -instance RingHom.applyDistribMulAction [Semiring R] : DistribMulAction (R →+* R) R where - smul := (· <| ·) - smul_zero := RingHom.map_zero - smul_add := RingHom.map_add - one_smul _ := rfl - mul_smul _ _ _ := rfl -#align ring_hom.apply_distrib_mul_action RingHom.applyDistribMulAction - -@[simp] -protected theorem RingHom.smul_def [Semiring R] (f : R →+* R) (a : R) : f • a = f a := - rfl -#align ring_hom.smul_def RingHom.smul_def - -/-- `RingHom.applyDistribMulAction` is faithful. -/ -instance RingHom.applyFaithfulSMul [Semiring R] : FaithfulSMul (R →+* R) R := - ⟨fun {_ _} h => RingHom.ext h⟩ -#align ring_hom.apply_has_faithful_smul RingHom.applyFaithfulSMul - section AddCommMonoid variable [Semiring R] [AddCommMonoid M] [Module R M] diff --git a/Mathlib/Algebra/Module/Hom.lean b/Mathlib/Algebra/Module/Hom.lean index 415287a0a29ab..77d48d056b1df 100644 --- a/Mathlib/Algebra/Module/Hom.lean +++ b/Mathlib/Algebra/Module/Hom.lean @@ -65,18 +65,14 @@ instance isCentralScalar [DistribMulAction Rᵐᵒᵖ B] [IsCentralScalar R B] : end /-- Scalar multiplication on the left as an additive monoid homomorphism. -/ -@[simps (config := .asFn)] -protected def smulLeft [Monoid M] [AddMonoid A] [DistribMulAction M A] (c : M) : A →+ A where - toFun := (c • ·) - map_zero' := smul_zero c - map_add' := smul_add c +@[simps! (config := .asFn)] +protected def smulLeft [Monoid M] [AddMonoid A] [DistribMulAction M A] (c : M) : A →+ A := + DistribMulAction.toAddMonoidHom _ c /-- Scalar multiplication as a biadditive monoid homomorphism. We need `M` to be commutative to have addition on `M →+ M`. -/ -protected def smul [Semiring R] [AddCommMonoid M] [Module R M] : R →+ M →+ M where - toFun := .smulLeft - map_zero' := AddMonoidHom.ext <| zero_smul _ - map_add' _ _ := AddMonoidHom.ext <| add_smul _ _ +protected def smul [Semiring R] [AddCommMonoid M] [Module R M] : R →+ M →+ M := + (Module.toAddMonoidEnd R M).toAddMonoidHom @[simp] theorem coe_smul' [Semiring R] [AddCommMonoid M] [Module R M] : ⇑(.smul : R →+ M →+ M) = AddMonoidHom.smulLeft := rfl @@ -87,3 +83,10 @@ instance module [Semiring R] [AddMonoid A] [AddCommMonoid B] [Module R B] : Modu #align add_monoid_hom.module AddMonoidHom.module end AddMonoidHom + +/-- The tautological action by `AddMonoid.End α` on `α`. + +This generalizes `AddMonoid.End.applyDistribMulAction`. -/ +instance AddMonoid.End.applyModule [AddCommMonoid A] : Module (AddMonoid.End A) A where + add_smul _ _ _ := rfl + zero_smul _ := rfl diff --git a/Mathlib/Algebra/Module/Injective.lean b/Mathlib/Algebra/Module/Injective.lean index df1003afae5f2..c0110c680241b 100644 --- a/Mathlib/Algebra/Module/Injective.lean +++ b/Mathlib/Algebra/Module/Injective.lean @@ -175,8 +175,7 @@ set_option linter.uppercaseLean3 false in /-- The maximal element of every nonempty chain of `extension_of i f`. -/ def ExtensionOf.max {c : Set (ExtensionOf i f)} (hchain : IsChain (· ≤ ·) c) (hnonempty : c.Nonempty) : ExtensionOf i f := - { - LinearPMap.sSup _ + { LinearPMap.sSup _ (IsChain.directedOn <| chain_linearPMap_of_chain_extensionOf hchain) with diff --git a/Mathlib/Algebra/Module/Submodule/LinearMap.lean b/Mathlib/Algebra/Module/Submodule/LinearMap.lean index 7a441ac35bf5f..a0084077ab8ad 100644 --- a/Mathlib/Algebra/Module/Submodule/LinearMap.lean +++ b/Mathlib/Algebra/Module/Submodule/LinearMap.lean @@ -29,7 +29,7 @@ In this file we define a number of linear maps involving submodules of a module. submodule, subspace, linear map -/ -open BigOperators Function +open BigOperators Function Set universe u'' u' u v w @@ -199,6 +199,20 @@ theorem restrict_apply {f : M →ₗ[R] M₁} {p : Submodule R M} {q : Submodule rfl #align linear_map.restrict_apply LinearMap.restrict_apply +lemma restrict_comp + {M₂ M₃ : Type*} [AddCommMonoid M₂] [AddCommMonoid M₃] [Module R M₂] [Module R M₃] + {p : Submodule R M} {p₂ : Submodule R M₂} {p₃ : Submodule R M₃} + {f : M →ₗ[R] M₂} {g : M₂ →ₗ[R] M₃} + (hf : MapsTo f p p₂) (hg : MapsTo g p₂ p₃) (hfg : MapsTo (g ∘ₗ f) p p₃ := hg.comp hf) : + (g ∘ₗ f).restrict hfg = (g.restrict hg) ∘ₗ (f.restrict hf) := + rfl + +lemma restrict_commute {f g : M →ₗ[R] M} (h : Commute f g) {p : Submodule R M} + (hf : MapsTo f p p) (hg : MapsTo g p p) : + Commute (f.restrict hf) (g.restrict hg) := by + change _ * _ = _ * _ + conv_lhs => rw [mul_eq_comp, ← restrict_comp]; congr; rw [← mul_eq_comp, h.eq] + theorem subtype_comp_restrict {f : M →ₗ[R] M₁} {p : Submodule R M} {q : Submodule R M₁} (hf : ∀ x ∈ p, f x ∈ q) : q.subtype.comp (f.restrict hf) = f.domRestrict p := rfl @@ -226,13 +240,15 @@ instance uniqueOfRight [Subsingleton M₂] : Unique (M →ₛₗ[σ₁₂] M₂) #align linear_map.unique_of_right LinearMap.uniqueOfRight /-- Evaluation of a `σ₁₂`-linear map at a fixed `a`, as an `AddMonoidHom`. -/ +@[simps] def evalAddMonoidHom (a : M) : (M →ₛₗ[σ₁₂] M₂) →+ M₂ where toFun f := f a map_add' f g := LinearMap.add_apply f g a map_zero' := rfl #align linear_map.eval_add_monoid_hom LinearMap.evalAddMonoidHom -/-- `LinearMap.toAddMonoidHom` promoted to a `AddMonoidHom` -/ +/-- `LinearMap.toAddMonoidHom` promoted to an `AddMonoidHom`. -/ +@[simps] def toAddMonoidHom' : (M →ₛₗ[σ₁₂] M₂) →+ M →+ M₂ where toFun := toAddMonoidHom map_zero' := by ext; rfl diff --git a/Mathlib/Algebra/MonoidAlgebra/Support.lean b/Mathlib/Algebra/MonoidAlgebra/Support.lean index adf99e3bc63f8..0befaa5dd839f 100644 --- a/Mathlib/Algebra/MonoidAlgebra/Support.lean +++ b/Mathlib/Algebra/MonoidAlgebra/Support.lean @@ -79,21 +79,21 @@ theorem support_mul [Mul G] [DecidableEq G] (a b : MonoidAlgebra k G) : Subset.trans support_sum <| biUnion_mono fun _a₂ _ => support_single_subset #align monoid_algebra.support_mul MonoidAlgebra.support_mul -theorem support_mul_single [RightCancelSemigroup G] (f : MonoidAlgebra k G) (r : k) +theorem support_mul_single [Mul G] [IsRightCancelMul G] (f : MonoidAlgebra k G) (r : k) (hr : ∀ y, y * r = 0 ↔ y = 0) (x : G) : (f * single x r).support = f.support.map (mulRightEmbedding x) := by classical ext - simp only [support_mul_single_eq_image f hr (isRightRegular_of_rightCancelSemigroup x), + simp only [support_mul_single_eq_image f hr (IsRightRegular.all x), mem_image, mem_map, mulRightEmbedding_apply] #align monoid_algebra.support_mul_single MonoidAlgebra.support_mul_single -theorem support_single_mul [LeftCancelSemigroup G] (f : MonoidAlgebra k G) (r : k) +theorem support_single_mul [Mul G] [IsLeftCancelMul G] (f : MonoidAlgebra k G) (r : k) (hr : ∀ y, r * y = 0 ↔ y = 0) (x : G) : (single x r * f : MonoidAlgebra k G).support = f.support.map (mulLeftEmbedding x) := by classical ext - simp only [support_single_mul_eq_image f hr (isLeftRegular_of_leftCancelSemigroup x), mem_image, + simp only [support_single_mul_eq_image f hr (IsLeftRegular.all x), mem_image, mem_map, mulLeftEmbedding_apply] #align monoid_algebra.support_single_mul MonoidAlgebra.support_single_mul @@ -122,16 +122,16 @@ theorem support_mul [DecidableEq G] [Add G] (a b : k[G]) : @MonoidAlgebra.support_mul k (Multiplicative G) _ _ _ _ _ #align add_monoid_algebra.support_mul AddMonoidAlgebra.support_mul -theorem support_mul_single [AddRightCancelSemigroup G] (f : k[G]) (r : k) +theorem support_mul_single [Add G] [IsRightCancelAdd G] (f : k[G]) (r : k) (hr : ∀ y, y * r = 0 ↔ y = 0) (x : G) : (f * single x r : k[G]).support = f.support.map (addRightEmbedding x) := - @MonoidAlgebra.support_mul_single k (Multiplicative G) _ _ _ _ hr _ + MonoidAlgebra.support_mul_single (G := Multiplicative G) _ _ hr _ #align add_monoid_algebra.support_mul_single AddMonoidAlgebra.support_mul_single -theorem support_single_mul [AddLeftCancelSemigroup G] (f : k[G]) (r : k) +theorem support_single_mul [Add G] [IsLeftCancelAdd G] (f : k[G]) (r : k) (hr : ∀ y, r * y = 0 ↔ y = 0) (x : G) : (single x r * f : k[G]).support = f.support.map (addLeftEmbedding x) := - @MonoidAlgebra.support_single_mul k (Multiplicative G) _ _ _ _ hr _ + MonoidAlgebra.support_single_mul (G := Multiplicative G) _ _ hr _ #align add_monoid_algebra.support_single_mul AddMonoidAlgebra.support_single_mul section Span diff --git a/Mathlib/Algebra/Order/Group/Defs.lean b/Mathlib/Algebra/Order/Group/Defs.lean index cc4a99c2fa948..0a29356297ca9 100644 --- a/Mathlib/Algebra/Order/Group/Defs.lean +++ b/Mathlib/Algebra/Order/Group/Defs.lean @@ -60,7 +60,7 @@ instance (priority := 100) OrderedCommGroup.toOrderedCancelCommMonoid [OrderedCo #align ordered_add_comm_group.to_ordered_cancel_add_comm_monoid OrderedAddCommGroup.toOrderedCancelAddCommMonoid example (α : Type u) [OrderedAddCommGroup α] : CovariantClass α α (swap (· + ·)) (· < ·) := - AddRightCancelSemigroup.covariant_swap_add_lt_of_covariant_swap_add_le α + IsRightCancelAdd.covariant_swap_add_lt_of_covariant_swap_add_le α -- Porting note: this instance is not used, -- and causes timeouts after lean4#2210. diff --git a/Mathlib/Algebra/Order/WithZero.lean b/Mathlib/Algebra/Order/WithZero.lean index e299e11667a68..a9be045b370ee 100644 --- a/Mathlib/Algebra/Order/WithZero.lean +++ b/Mathlib/Algebra/Order/WithZero.lean @@ -208,7 +208,7 @@ theorem mul_lt_right₀ (c : α) (h : a < b) (hc : c ≠ 0) : a * c < b * c := b theorem inv_lt_inv₀ (ha : a ≠ 0) (hb : b ≠ 0) : a⁻¹ < b⁻¹ ↔ b < a := show (Units.mk0 a ha)⁻¹ < (Units.mk0 b hb)⁻¹ ↔ Units.mk0 b hb < Units.mk0 a ha from have : CovariantClass αˣ αˣ (· * ·) (· < ·) := - LeftCancelSemigroup.covariant_mul_lt_of_covariant_mul_le αˣ + IsLeftCancelMul.covariant_mul_lt_of_covariant_mul_le αˣ inv_lt_inv_iff #align inv_lt_inv₀ inv_lt_inv₀ diff --git a/Mathlib/Algebra/Regular/Basic.lean b/Mathlib/Algebra/Regular/Basic.lean index ce711ab138051..072ce3179a3c1 100644 --- a/Mathlib/Algebra/Regular/Basic.lean +++ b/Mathlib/Algebra/Regular/Basic.lean @@ -343,35 +343,26 @@ theorem IsUnit.isRegular (ua : IsUnit a) : IsRegular a := by end Monoid -/-- Elements of a left cancel semigroup are left regular. -/ -@[to_additive "Elements of an add left cancel semigroup are add-left-regular."] -theorem isLeftRegular_of_leftCancelSemigroup [LeftCancelSemigroup R] - (g : R) : IsLeftRegular g := +/-- If all multiplications cancel on the left then every element is left-regular. -/ +@[to_additive "If all additions cancel on the left then every element is add-left-regular."] +theorem IsLeftRegular.all [Mul R] [IsLeftCancelMul R] (g : R) : IsLeftRegular g := mul_right_injective g -#align is_left_regular_of_left_cancel_semigroup isLeftRegular_of_leftCancelSemigroup -#align is_add_left_regular_of_left_cancel_add_semigroup isAddLeftRegular_of_addLeftCancelSemigroup +#align is_left_regular_of_left_cancel_semigroup IsLeftRegular.all +#align is_add_left_regular_of_left_cancel_add_semigroup IsAddLeftRegular.all -/-- Elements of a right cancel semigroup are right regular. -/ -@[to_additive "Elements of an add right cancel semigroup are add-right-regular"] -theorem isRightRegular_of_rightCancelSemigroup [RightCancelSemigroup R] - (g : R) : IsRightRegular g := +/-- If all multiplications cancel on the right then every element is right-regular. -/ +@[to_additive "If all additions cancel on the right then every element is add-right-regular."] +theorem IsRightRegular.all [Mul R] [IsRightCancelMul R] (g : R) : IsRightRegular g := mul_left_injective g -#align is_right_regular_of_right_cancel_semigroup isRightRegular_of_rightCancelSemigroup -#align is_add_right_regular_of_right_cancel_add_semigroup isAddRightRegular_of_addRightCancelSemigroup +#align is_right_regular_of_right_cancel_semigroup IsRightRegular.all +#align is_add_right_regular_of_right_cancel_add_semigroup IsLeftRegular.all -section CancelMonoid - -variable [CancelMonoid R] - -/-- Elements of a cancel monoid are regular. Cancel semigroups do not appear to exist. -/ -@[to_additive "Elements of an add cancel monoid are regular. -Add cancel semigroups do not appear to exist."] -theorem isRegular_of_cancelMonoid (g : R) : IsRegular g := +/-- If all multiplications cancel then every element is regular. -/ +@[to_additive "If all additions cancel then every element is add-regular."] +theorem IsRegular.all [Mul R] [IsCancelMul R] (g : R) : IsRegular g := ⟨mul_right_injective g, mul_left_injective g⟩ -#align is_regular_of_cancel_monoid isRegular_of_cancelMonoid -#align is_add_regular_of_cancel_add_monoid isAddRegular_of_addCancelMonoid - -end CancelMonoid +#align is_regular_of_cancel_monoid IsRegular.all +#align is_add_regular_of_cancel_add_monoid IsAddRegular.all section CancelMonoidWithZero diff --git a/Mathlib/AlgebraicGeometry/OpenImmersion.lean b/Mathlib/AlgebraicGeometry/OpenImmersion.lean index 21833de097f79..a933b9e7736c7 100644 --- a/Mathlib/AlgebraicGeometry/OpenImmersion.lean +++ b/Mathlib/AlgebraicGeometry/OpenImmersion.lean @@ -750,6 +750,28 @@ def Scheme.OpenCover.pullbackCover {X : Scheme} (𝒰 : X.OpenCover) {W : Scheme · rw [← TopCat.epi_iff_surjective]; infer_instance #align algebraic_geometry.Scheme.open_cover.pullback_cover AlgebraicGeometry.Scheme.OpenCover.pullbackCover +/-- Given an open cover on `X`, we may pull them back along a morphism `f : W ⟶ X` to obtain +an open cover of `W`. This is similar to `Scheme.OpenCover.pullbackCover`, but here we +take `pullback (𝒰.map x) f` instead of `pullback f (𝒰.map x)`. -/ +@[simps] +def Scheme.OpenCover.pullbackCover' {X : Scheme} (𝒰 : X.OpenCover) {W : Scheme} (f : W ⟶ X) : + W.OpenCover where + J := 𝒰.J + obj x := pullback (𝒰.map x) f + map x := pullback.snd + f x := 𝒰.f (f.1.base x) + Covers x := by + rw [← + show _ = (pullback.snd : pullback (𝒰.map (𝒰.f (f.1.base x))) f ⟶ _).1.base from + PreservesPullback.iso_hom_snd Scheme.forgetToTop (𝒰.map (𝒰.f (f.1.base x))) f] + -- Porting note : `rw` to `erw` on this single lemma + erw [coe_comp] + rw [Set.range_comp, Set.range_iff_surjective.mpr, Set.image_univ, + TopCat.pullback_snd_range] + obtain ⟨y, h⟩ := 𝒰.Covers (f.1.base x) + exact ⟨y, h⟩ + · rw [← TopCat.epi_iff_surjective]; infer_instance + theorem Scheme.OpenCover.iUnion_range {X : Scheme} (𝒰 : X.OpenCover) : ⋃ i, Set.range (𝒰.map i).1.base = Set.univ := by rw [Set.eq_univ_iff_forall] diff --git a/Mathlib/AlgebraicGeometry/Sites/BigZariski.lean b/Mathlib/AlgebraicGeometry/Sites/BigZariski.lean new file mode 100644 index 0000000000000..408c713dac126 --- /dev/null +++ b/Mathlib/AlgebraicGeometry/Sites/BigZariski.lean @@ -0,0 +1,74 @@ +/- +Copyright (c) 2023 Joël Riou. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Joël Riou +-/ +import Mathlib.AlgebraicGeometry.Pullbacks +import Mathlib.CategoryTheory.Sites.Pretopology +/-! +# The big Zariski site of schemes + +In this file, we define the Zariski topology, as a Grothendieck topology on the +category `Scheme.{u}`: this is `Scheme.zariskiTopology.{u}`. If `X : Scheme.{u}`, +the Zariski topology on `Over X` can be obtained as `Scheme.zariskiTopology.over X` +(see `CategoryTheory.Sites.Over`.). + +TODO: +* If `Y : Scheme.{u}`, define a continuous functor from the category of opens of `Y` +to `Over Y`, and show that a presheaf on `Over Y` is a sheaf for the Zariski topology +iff its "restriction" to the topological space `Z` is a sheaf for all `Z : Over Y`. +* We should have good notions of (pre)sheaves of `Type (u + 1)` (e.g. associated +sheaf functor, pushforward, pullbacks) on `Scheme.{u}` for this topology. However, +some constructions in the `CategoryTheory.Sites` folder currently assume that +the site is a small category: this should be generalized. As a result, +this big Zariski site can considered as a test case of the Grothendieck topology API +for future applications to étale cohomology. + +-/ + +universe v u + +open CategoryTheory + +namespace AlgebraicGeometry + +namespace Scheme + +/-- The Zariski pretopology on the category of schemes. -/ +def zariskiPretopology : Pretopology (Scheme.{u}) where + coverings Y S := ∃ (U : OpenCover.{u} Y), S = Presieve.ofArrows U.obj U.map + has_isos Y X f _ := ⟨openCoverOfIsIso f, (Presieve.ofArrows_pUnit _).symm⟩ + pullbacks := by + rintro Y X f _ ⟨U, rfl⟩ + exact ⟨U.pullbackCover' f, (Presieve.ofArrows_pullback _ _ _).symm⟩ + Transitive := by + rintro X _ T ⟨U, rfl⟩ H + choose V hV using H + use U.bind (fun j => V (U.map j) ⟨j⟩) + simpa only [OpenCover.bind, ← hV] using Presieve.ofArrows_bind U.obj U.map _ + (fun _ f H => (V f H).obj) (fun _ f H => (V f H).map) + +/-- The Zariski topology on the category of schemes. -/ +abbrev zariskiTopology : GrothendieckTopology (Scheme.{u}) := + zariskiPretopology.toGrothendieck + +lemma zariskiPretopology_openCover {Y : Scheme.{u}} (U : OpenCover.{u} Y) : + zariskiPretopology Y (Presieve.ofArrows U.obj U.map) := + ⟨U, rfl⟩ + +lemma zariskiTopology_openCover {Y : Scheme.{u}} (U : OpenCover.{v} Y) : + zariskiTopology Y (Sieve.generate (Presieve.ofArrows U.obj U.map)) := by + let V : OpenCover.{u} Y := + { J := Y + obj := fun y => U.obj (U.f y) + map := fun y => U.map (U.f y) + f := id + Covers := U.Covers + IsOpen := fun _ => U.IsOpen _ } + refine' ⟨_, zariskiPretopology_openCover V, _⟩ + rintro _ _ ⟨y⟩ + exact ⟨_, 𝟙 _, U.map (U.f y), ⟨_⟩, by simp⟩ + +end Scheme + +end AlgebraicGeometry diff --git a/Mathlib/AlgebraicTopology/DoldKan/EquivalencePseudoabelian.lean b/Mathlib/AlgebraicTopology/DoldKan/EquivalencePseudoabelian.lean index 79b8d4df22d42..0156f6ab91913 100644 --- a/Mathlib/AlgebraicTopology/DoldKan/EquivalencePseudoabelian.lean +++ b/Mathlib/AlgebraicTopology/DoldKan/EquivalencePseudoabelian.lean @@ -66,24 +66,33 @@ def Γ [IsIdempotentComplete C] [HasFiniteCoproducts C] : ChainComplex C ℕ ⥤ variable [IsIdempotentComplete C] [HasFiniteCoproducts C] -theorem hN₁ : - (toKaroubiEquivalence (SimplicialObject C)).functor ⋙ Preadditive.DoldKan.equivalence.functor = - N₁ := - Functor.congr_obj (functorExtension₁_comp_whiskeringLeft_toKaroubi _ _) N₁ -set_option linter.uppercaseLean3 false in -#align category_theory.idempotents.dold_kan.hN₁ CategoryTheory.Idempotents.DoldKan.hN₁ - -theorem hΓ₀ : - (toKaroubiEquivalence (ChainComplex C ℕ)).functor ⋙ Preadditive.DoldKan.equivalence.inverse = +/-- A reformulation of the isomorphism `toKaroubi (SimplicialObject C) ⋙ N₂ ≅ N₁` -/ +def isoN₁ : + (toKaroubiEquivalence (SimplicialObject C)).functor ⋙ + Preadditive.DoldKan.equivalence.functor ≅ N₁ := toKaroubiCompN₂IsoN₁ + +@[simp] +lemma isoN₁_hom_app_f (X : SimplicialObject C) : + (isoN₁.hom.app X).f = PInfty := rfl + +/-- A reformulation of the canonical isomorphism +`toKaroubi (ChainComplex C ℕ) ⋙ Γ₂ ≅ Γ ⋙ toKaroubi (SimplicialObject C)`. -/ +def isoΓ₀ : + (toKaroubiEquivalence (ChainComplex C ℕ)).functor ⋙ Preadditive.DoldKan.equivalence.inverse ≅ Γ ⋙ (toKaroubiEquivalence _).functor := - Functor.congr_obj (functorExtension₂_comp_whiskeringLeft_toKaroubi _ _) Γ₀ -#align category_theory.idempotents.dold_kan.hΓ₀ CategoryTheory.Idempotents.DoldKan.hΓ₀ + (functorExtension₂CompWhiskeringLeftToKaroubiIso _ _).app Γ₀ + +@[simp] +lemma N₂_map_isoΓ₀_hom_app_f (X : ChainComplex C ℕ) : + (N₂.map (isoΓ₀.hom.app X)).f = PInfty := by + ext + apply comp_id /-- The Dold-Kan equivalence for pseudoabelian categories given by the functors `N` and `Γ`. It is obtained by applying the results in `Compatibility.lean` to the equivalence `Preadditive.DoldKan.Equivalence`. -/ def equivalence : SimplicialObject C ≌ ChainComplex C ℕ := - Compatibility.equivalence (eqToIso hN₁) (eqToIso hΓ₀) + Compatibility.equivalence isoN₁ isoΓ₀ #align category_theory.idempotents.dold_kan.equivalence CategoryTheory.Idempotents.DoldKan.equivalence theorem equivalence_functor : (equivalence : SimplicialObject C ≌ _).functor = N := @@ -98,12 +107,11 @@ theorem equivalence_inverse : (equivalence : SimplicialObject C ≌ _).inverse = for the construction of our counit isomorphism `η` -/ theorem hη : Compatibility.τ₀ = - Compatibility.τ₁ (eqToIso hN₁) (eqToIso hΓ₀) + Compatibility.τ₁ isoN₁ isoΓ₀ (N₁Γ₀ : Γ ⋙ N₁ ≅ (toKaroubiEquivalence (ChainComplex C ℕ)).functor) := by ext K : 3 - simp only [Compatibility.τ₀_hom_app, Compatibility.τ₁_hom_app, eqToIso.hom] - refine' (N₂Γ₂_compatible_with_N₁Γ₀ K).trans _ - simp only [N₂Γ₂ToKaroubiIso_hom, eqToHom_map, eqToHom_app, eqToHom_trans_assoc] + simp only [Compatibility.τ₀_hom_app, Compatibility.τ₁_hom_app] + refine' (N₂Γ₂_compatible_with_N₁Γ₀ K).trans (by simp ) #align category_theory.idempotents.dold_kan.hη CategoryTheory.Idempotents.DoldKan.hη /-- The counit isomorphism induced by `N₁Γ₀` -/ @@ -119,9 +127,10 @@ theorem equivalence_counitIso : #align category_theory.idempotents.dold_kan.equivalence_counit_iso CategoryTheory.Idempotents.DoldKan.equivalence_counitIso theorem hε : - Compatibility.υ (eqToIso hN₁) = + Compatibility.υ (isoN₁) = (Γ₂N₁ : (toKaroubiEquivalence _).functor ≅ (N₁ : SimplicialObject C ⥤ _) ⋙ Preadditive.DoldKan.equivalence.inverse) := by + dsimp only [isoN₁] ext1 rw [← cancel_epi Γ₂N₁.inv, Iso.inv_hom_id] ext X : 2 @@ -129,13 +138,15 @@ theorem hε : erw [compatibility_Γ₂N₁_Γ₂N₂_natTrans X] rw [Compatibility.υ_hom_app, Preadditive.DoldKan.equivalence_unitIso, Iso.app_inv, assoc] erw [← NatTrans.comp_app_assoc, IsIso.hom_inv_id] - rw [NatTrans.id_app, id_comp, NatTrans.id_app, eqToIso.hom, eqToHom_app, eqToHom_map] - rw [compatibility_Γ₂N₁_Γ₂N₂_inv_app, eqToHom_trans, eqToHom_refl] + rw [NatTrans.id_app, id_comp, NatTrans.id_app, Γ₂N₂ToKaroubiIso_inv_app] + dsimp only [Preadditive.DoldKan.equivalence_inverse, Preadditive.DoldKan.Γ] + rw [← Γ₂.map_comp, Iso.inv_hom_id_app, Γ₂.map_id] + rfl #align category_theory.idempotents.dold_kan.hε CategoryTheory.Idempotents.DoldKan.hε /-- The unit isomorphism induced by `Γ₂N₁`. -/ def ε : 𝟭 (SimplicialObject C) ≅ N ⋙ Γ := - Compatibility.equivalenceUnitIso (eqToIso hΓ₀) Γ₂N₁ + Compatibility.equivalenceUnitIso isoΓ₀ Γ₂N₁ #align category_theory.idempotents.dold_kan.ε CategoryTheory.Idempotents.DoldKan.ε theorem equivalence_unitIso : diff --git a/Mathlib/AlgebraicTopology/DoldKan/FunctorN.lean b/Mathlib/AlgebraicTopology/DoldKan/FunctorN.lean index e728fa265609b..b5778c54563d9 100644 --- a/Mathlib/AlgebraicTopology/DoldKan/FunctorN.lean +++ b/Mathlib/AlgebraicTopology/DoldKan/FunctorN.lean @@ -63,9 +63,17 @@ def N₂ : Karoubi (SimplicialObject C) ⥤ Karoubi (ChainComplex C ℕ) := set_option linter.uppercaseLean3 false in #align algebraic_topology.dold_kan.N₂ AlgebraicTopology.DoldKan.N₂ --- porting note: added to ease the port of `AlgebraicTopology.DoldKan.NCompGamma` -lemma compatibility_N₁_N₂ : toKaroubi (SimplicialObject C) ⋙ N₂ = N₁ := - Functor.congr_obj (functorExtension₁_comp_whiskeringLeft_toKaroubi _ _) N₁ +/-- The canonical isomorphism `toKaroubi (SimplicialObject C) ⋙ N₂ ≅ N₁`. -/ +def toKaroubiCompN₂IsoN₁ : toKaroubi (SimplicialObject C) ⋙ N₂ ≅ N₁ := + (functorExtension₁CompWhiskeringLeftToKaroubiIso _ _).app N₁ + +@[simp] +lemma toKaroubiCompN₂IsoN₁_hom_app (X : SimplicialObject C) : + (toKaroubiCompN₂IsoN₁.hom.app X).f = PInfty := rfl + +@[simp] +lemma toKaroubiCompN₂IsoN₁_inv_app (X : SimplicialObject C) : + (toKaroubiCompN₂IsoN₁.inv.app X).f = PInfty := rfl end DoldKan diff --git a/Mathlib/AlgebraicTopology/DoldKan/GammaCompN.lean b/Mathlib/AlgebraicTopology/DoldKan/GammaCompN.lean index 82ef04695fa2d..1634c0e310058 100644 --- a/Mathlib/AlgebraicTopology/DoldKan/GammaCompN.lean +++ b/Mathlib/AlgebraicTopology/DoldKan/GammaCompN.lean @@ -119,24 +119,46 @@ set_option linter.uppercaseLean3 false in -- Porting note: added to speed up elaboration attribute [irreducible] N₁Γ₀ -theorem N₂Γ₂_toKaroubi : toKaroubi (ChainComplex C ℕ) ⋙ Γ₂ ⋙ N₂ = Γ₀ ⋙ N₁ := by - have h := Functor.congr_obj (functorExtension₂_comp_whiskeringLeft_toKaroubi - (ChainComplex C ℕ) (SimplicialObject C)) Γ₀ - have h' := Functor.congr_obj (functorExtension₁_comp_whiskeringLeft_toKaroubi - (SimplicialObject C) (ChainComplex C ℕ)) N₁ - dsimp [N₂, Γ₂, functorExtension₁] at h h' ⊢ - rw [← Functor.assoc, h, Functor.assoc, h'] -set_option linter.uppercaseLean3 false in -#align algebraic_topology.dold_kan.N₂Γ₂_to_karoubi AlgebraicTopology.DoldKan.N₂Γ₂_toKaroubi - /-- Compatibility isomorphism between `toKaroubi _ ⋙ Γ₂ ⋙ N₂` and `Γ₀ ⋙ N₁` which are functors `ChainComplex C ℕ ⥤ Karoubi (ChainComplex C ℕ)`. -/ -@[simps!] def N₂Γ₂ToKaroubiIso : toKaroubi (ChainComplex C ℕ) ⋙ Γ₂ ⋙ N₂ ≅ Γ₀ ⋙ N₁ := - eqToIso N₂Γ₂_toKaroubi + calc + toKaroubi (ChainComplex C ℕ) ⋙ Γ₂ ⋙ N₂ ≅ + toKaroubi (ChainComplex C ℕ) ⋙ (Γ₂ ⋙ N₂) := (Functor.associator _ _ _).symm + _ ≅ (Γ₀ ⋙ toKaroubi (SimplicialObject C)) ⋙ N₂ := + isoWhiskerRight ((functorExtension₂CompWhiskeringLeftToKaroubiIso _ _).app Γ₀) N₂ + _ ≅ Γ₀ ⋙ toKaroubi (SimplicialObject C) ⋙ N₂ := Functor.associator _ _ _ + _ ≅ Γ₀ ⋙ N₁ := + isoWhiskerLeft Γ₀ ((functorExtension₁CompWhiskeringLeftToKaroubiIso _ _).app N₁) set_option linter.uppercaseLean3 false in #align algebraic_topology.dold_kan.N₂Γ₂_to_karoubi_iso AlgebraicTopology.DoldKan.N₂Γ₂ToKaroubiIso +@[simp] +lemma N₂Γ₂ToKaroubiIso_hom_app (X : ChainComplex C ℕ) : + (N₂Γ₂ToKaroubiIso.hom.app X).f = PInfty := by + ext n + dsimp [N₂Γ₂ToKaroubiIso] + simp only [comp_id, assoc, PInfty_f_idem] + conv_rhs => + rw [← PInfty_f_idem] + congr 1 + apply (Γ₀.splitting X).hom_ext' + intro A + rw [Splitting.ι_desc_assoc, assoc] + apply id_comp + +@[simp] +lemma N₂Γ₂ToKaroubiIso_inv_app (X : ChainComplex C ℕ) : + (N₂Γ₂ToKaroubiIso.inv.app X).f = PInfty := by + ext n + dsimp [N₂Γ₂ToKaroubiIso] + simp only [comp_id, PInfty_f_idem_assoc, AlternatingFaceMapComplex.obj_X, Γ₀_obj_obj] + convert comp_id _ + apply (Γ₀.splitting X).hom_ext' + intro A + rw [Splitting.ι_desc] + erw [comp_id, id_comp] + -- Porting note: added to speed up elaboration attribute [irreducible] N₂Γ₂ToKaroubiIso @@ -151,16 +173,15 @@ set_option linter.uppercaseLean3 false in theorem N₂Γ₂_inv_app_f_f (X : Karoubi (ChainComplex C ℕ)) (n : ℕ) : (N₂Γ₂.inv.app X).f.f n = X.p.f n ≫ (Γ₀.splitting X.X).ιSummand (Splitting.IndexSet.id (op [n])) := by - simp only [N₂Γ₂, Functor.preimageIso, Iso.trans, - whiskeringLeft_obj_preimage_app, N₂Γ₂ToKaroubiIso_inv, assoc, - Functor.id_map, NatTrans.comp_app, eqToHom_app, Karoubi.comp_f, - Karoubi.eqToHom_f, Karoubi.decompId_p_f, HomologicalComplex.comp_f, - N₁Γ₀_inv_app_f_f, Splitting.toKaroubiNondegComplexIsoN₁_hom_f_f, - Functor.comp_map, Functor.comp_obj, Karoubi.decompId_i_f, - eqToHom_refl, comp_id, N₂_map_f_f, Γ₂_map_f_app, N₁_obj_p, - PInfty_on_Γ₀_splitting_summand_eq_self_assoc, toKaroubi_obj_X, - Splitting.ι_desc, Splitting.IndexSet.id_fst, SimplexCategory.len_mk, unop_op, - Karoubi.HomologicalComplex.p_idem_assoc] + dsimp [N₂Γ₂] + simp only [whiskeringLeft_obj_preimage_app, NatTrans.comp_app, Functor.comp_map, + Karoubi.comp_f, N₂Γ₂ToKaroubiIso_inv_app, HomologicalComplex.comp_f, + N₁Γ₀_inv_app_f_f, toKaroubi_obj_X, Splitting.toKaroubiNondegComplexIsoN₁_hom_f_f, + Γ₀.obj_obj, PInfty_on_Γ₀_splitting_summand_eq_self, N₂_map_f_f, + Γ₂_map_f_app, unop_op, Karoubi.decompId_p_f, PInfty_f_idem_assoc, + PInfty_on_Γ₀_splitting_summand_eq_self_assoc, Splitting.IndexSet.id_fst, SimplexCategory.len_mk, + Splitting.ι_desc] + apply Karoubi.HomologicalComplex.p_idem_assoc set_option linter.uppercaseLean3 false in #align algebraic_topology.dold_kan.N₂Γ₂_inv_app_f_f AlgebraicTopology.DoldKan.N₂Γ₂_inv_app_f_f diff --git a/Mathlib/AlgebraicTopology/DoldKan/Homotopies.lean b/Mathlib/AlgebraicTopology/DoldKan/Homotopies.lean index d19d1145e70b7..028f650c93d58 100644 --- a/Mathlib/AlgebraicTopology/DoldKan/Homotopies.lean +++ b/Mathlib/AlgebraicTopology/DoldKan/Homotopies.lean @@ -94,7 +94,7 @@ theorem cs_down_0_not_rel_left (j : ℕ) : ¬c.Rel 0 j := by /-- The sequence of maps which gives the null homotopic maps `Hσ` that shall be in the inductive construction of the projections `P q : K[X] ⟶ K[X]` -/ def hσ (q : ℕ) (n : ℕ) : X _[n] ⟶ X _[n + 1] := - if n < q then 0 else (-1 : ℤ) ^ (n - q) • X.σ ⟨n - q, Nat.sub_lt_succ n q⟩ + if n < q then 0 else (-1 : ℤ) ^ (n - q) • X.σ ⟨n - q, Nat.lt_succ_of_le (Nat.sub_le _ _)⟩ #align algebraic_topology.dold_kan.hσ AlgebraicTopology.DoldKan.hσ /-- We can turn `hσ` into a datum that can be passed to `nullHomotopicMap'`. -/ diff --git a/Mathlib/AlgebraicTopology/DoldKan/NCompGamma.lean b/Mathlib/AlgebraicTopology/DoldKan/NCompGamma.lean index f3b44627df2ca..3181a978372d3 100644 --- a/Mathlib/AlgebraicTopology/DoldKan/NCompGamma.lean +++ b/Mathlib/AlgebraicTopology/DoldKan/NCompGamma.lean @@ -172,43 +172,39 @@ end Γ₂N₁ -- porting note: removed @[simps] attribute because it was creating timeouts /-- The compatibility isomorphism relating `N₂ ⋙ Γ₂` and `N₁ ⋙ Γ₂`. -/ -def compatibility_Γ₂N₁_Γ₂N₂ : toKaroubi (SimplicialObject C) ⋙ N₂ ⋙ Γ₂ ≅ N₁ ⋙ Γ₂ := - eqToIso (by rw [← Functor.assoc, compatibility_N₁_N₂]) +def Γ₂N₂ToKaroubiIso : toKaroubi (SimplicialObject C) ⋙ N₂ ⋙ Γ₂ ≅ N₁ ⋙ Γ₂ := + (Functor.associator _ _ _).symm ≪≫ isoWhiskerRight toKaroubiCompN₂IsoN₁ Γ₂ set_option linter.uppercaseLean3 false in -#align algebraic_topology.dold_kan.compatibility_Γ₂N₁_Γ₂N₂ AlgebraicTopology.DoldKan.compatibility_Γ₂N₁_Γ₂N₂ +#align algebraic_topology.dold_kan.compatibility_Γ₂N₁_Γ₂N₂ AlgebraicTopology.DoldKan.Γ₂N₂ToKaroubiIso --- porting note: no @[simp] attribute because this would trigger a timeout -lemma compatibility_Γ₂N₁_Γ₂N₂_hom_app (X : SimplicialObject C) : - compatibility_Γ₂N₁_Γ₂N₂.hom.app X = - eqToHom (by rw [← Functor.assoc, compatibility_N₁_N₂]) := by - dsimp only [compatibility_Γ₂N₁_Γ₂N₂, CategoryTheory.eqToIso] - apply eqToHom_app +@[simp] +lemma Γ₂N₂ToKaroubiIso_hom_app (X : SimplicialObject C) : + Γ₂N₂ToKaroubiIso.hom.app X = Γ₂.map (toKaroubiCompN₂IsoN₁.hom.app X) := by + simp [Γ₂N₂ToKaroubiIso] --- Porting note: added to speed up elaboration -attribute [irreducible] compatibility_Γ₂N₁_Γ₂N₂ +@[simp] +lemma Γ₂N₂ToKaroubiIso_inv_app (X : SimplicialObject C) : + Γ₂N₂ToKaroubiIso.inv.app X = Γ₂.map (toKaroubiCompN₂IsoN₁.inv.app X) := by + simp [Γ₂N₂ToKaroubiIso] -lemma compatibility_Γ₂N₁_Γ₂N₂_inv_app (X : SimplicialObject C) : - compatibility_Γ₂N₁_Γ₂N₂.inv.app X = - eqToHom (by rw [← Functor.assoc, compatibility_N₁_N₂]) := by - rw [← cancel_mono (compatibility_Γ₂N₁_Γ₂N₂.hom.app X), Iso.inv_hom_id_app, - compatibility_Γ₂N₁_Γ₂N₂_hom_app, eqToHom_trans, eqToHom_refl] +-- Porting note: added to speed up elaboration +attribute [irreducible] Γ₂N₂ToKaroubiIso namespace Γ₂N₂ /-- The natural transformation `N₂ ⋙ Γ₂ ⟶ 𝟭 (SimplicialObject C)`. -/ def natTrans : (N₂ : Karoubi (SimplicialObject C) ⥤ _) ⋙ Γ₂ ⟶ 𝟭 _ := ((whiskeringLeft _ _ _).obj (toKaroubi (SimplicialObject C))).preimage - (by exact compatibility_Γ₂N₁_Γ₂N₂.hom ≫ Γ₂N₁.natTrans) + (Γ₂N₂ToKaroubiIso.hom ≫ Γ₂N₁.natTrans) set_option linter.uppercaseLean3 false in #align algebraic_topology.dold_kan.Γ₂N₂.nat_trans AlgebraicTopology.DoldKan.Γ₂N₂.natTrans theorem natTrans_app_f_app (P : Karoubi (SimplicialObject C)) : - Γ₂N₂.natTrans.app P = by - exact (N₂ ⋙ Γ₂).map P.decompId_i ≫ - (compatibility_Γ₂N₁_Γ₂N₂.hom ≫ Γ₂N₁.natTrans).app P.X ≫ P.decompId_p := by + Γ₂N₂.natTrans.app P = + (N₂ ⋙ Γ₂).map P.decompId_i ≫ + (Γ₂N₂ToKaroubiIso.hom ≫ Γ₂N₁.natTrans).app P.X ≫ P.decompId_p := by dsimp only [natTrans] - rw [whiskeringLeft_obj_preimage_app - (compatibility_Γ₂N₁_Γ₂N₂.hom ≫ Γ₂N₁.natTrans : _ ⟶ toKaroubi _ ⋙ 𝟭 _) P, Functor.id_map] + simp only [whiskeringLeft_obj_preimage_app, Functor.id_map, assoc] set_option linter.uppercaseLean3 false in #align algebraic_topology.dold_kan.Γ₂N₂.nat_trans_app_f_app AlgebraicTopology.DoldKan.Γ₂N₂.natTrans_app_f_app @@ -219,7 +215,7 @@ end Γ₂N₂ theorem compatibility_Γ₂N₁_Γ₂N₂_natTrans (X : SimplicialObject C) : Γ₂N₁.natTrans.app X = - (compatibility_Γ₂N₁_Γ₂N₂.app X).inv ≫ + (Γ₂N₂ToKaroubiIso.app X).inv ≫ Γ₂N₂.natTrans.app ((toKaroubi (SimplicialObject C)).obj X) := by rw [Γ₂N₂.natTrans_app_f_app] dsimp only [Karoubi.decompId_i_toKaroubi, Karoubi.decompId_p_toKaroubi, Functor.comp_map, @@ -239,14 +235,16 @@ theorem identity_N₂_objectwise (P : Karoubi (SimplicialObject C)) : have eq₂ : (Γ₀.splitting (N₂.obj P).X).ιSummand (Splitting.IndexSet.id (op [n])) ≫ (N₂.map (Γ₂N₂.natTrans.app P)).f.f n = PInfty.f n ≫ P.p.app (op [n]) := by dsimp - simp only [assoc, Γ₂N₂.natTrans_app_f_app, Functor.comp_map, NatTrans.comp_app, - Karoubi.comp_f, compatibility_Γ₂N₁_Γ₂N₂_hom_app, eqToHom_refl, Karoubi.eqToHom_f, - PInfty_on_Γ₀_splitting_summand_eq_self_assoc, Functor.comp_obj] - dsimp [N₂] - simp only [Splitting.ι_desc_assoc, assoc, id_comp, unop_op, - Splitting.IndexSet.id_fst, len_mk, Splitting.IndexSet.e, - Splitting.IndexSet.id_snd_coe, op_id, P.X.map_id, id_comp, - PInfty_f_naturality_assoc, PInfty_f_idem_assoc, app_idem] + rw [PInfty_on_Γ₀_splitting_summand_eq_self_assoc, Γ₂N₂.natTrans_app_f_app] + dsimp + rw [Γ₂N₂ToKaroubiIso_hom_app, assoc, Splitting.ι_desc_assoc, assoc, assoc] + dsimp [toKaroubi] + rw [Splitting.ι_desc_assoc] + dsimp + simp only [assoc, Splitting.ι_desc_assoc, unop_op, Splitting.IndexSet.id_fst, + len_mk, NatTrans.naturality, PInfty_f_idem_assoc, + PInfty_f_naturality_assoc, app_idem_assoc] + erw [P.X.map_id, comp_id] simp only [Karoubi.comp_f, HomologicalComplex.comp_f, Karoubi.id_eq, N₂_obj_p_f, assoc, eq₁, eq₂, PInfty_f_naturality_assoc, app_idem, PInfty_f_idem_assoc] set_option linter.uppercaseLean3 false in diff --git a/Mathlib/AlgebraicTopology/FundamentalGroupoid/SimplyConnected.lean b/Mathlib/AlgebraicTopology/FundamentalGroupoid/SimplyConnected.lean index 8bd8f26c701da..0c0985e8e4698 100644 --- a/Mathlib/AlgebraicTopology/FundamentalGroupoid/SimplyConnected.lean +++ b/Mathlib/AlgebraicTopology/FundamentalGroupoid/SimplyConnected.lean @@ -60,8 +60,8 @@ attribute [local instance] Path.Homotopic.setoid instance (priority := 100) : PathConnectedSpace X := let unique_homotopic := (simply_connected_iff_unique_homotopic X).mp inferInstance - { Nonempty := unique_homotopic.1 - Joined := fun x y => ⟨(unique_homotopic.2 x y).some.default.out⟩ } + { nonempty := unique_homotopic.1 + joined := fun x y => ⟨(unique_homotopic.2 x y).some.default.out⟩ } /-- In a simply connected space, any two paths are homotopic -/ theorem paths_homotopic {x y : X} (p₁ p₂ : Path x y) : Path.Homotopic p₁ p₂ := diff --git a/Mathlib/Analysis/BoundedVariation.lean b/Mathlib/Analysis/BoundedVariation.lean index 514f4c13030c0..ba0263c323518 100644 --- a/Mathlib/Analysis/BoundedVariation.lean +++ b/Mathlib/Analysis/BoundedVariation.lean @@ -530,7 +530,7 @@ theorem comp_le_of_antitoneOn (f : α → E) {s : Set α} {t : Set β} (φ : β rintro ⟨n, u, hu, ut⟩ rw [← Finset.sum_range_reflect] refine' (Finset.sum_congr rfl fun x hx => _).trans_le <| le_iSup_of_le - ⟨n, fun i => φ (u <| n - i), fun x y xy => hφ (ut _) (ut _) (hu <| Nat.sub_le_sub_left n xy), + ⟨n, fun i => φ (u <| n - i), fun x y xy => hφ (ut _) (ut _) (hu <| Nat.sub_le_sub_left xy n), fun i => φst (ut _)⟩ le_rfl dsimp only [Subtype.coe_mk] diff --git a/Mathlib/Analysis/Calculus/ContDiff/Defs.lean b/Mathlib/Analysis/Calculus/ContDiff/Defs.lean index f179edc221f19..9ff8127679f5d 100644 --- a/Mathlib/Analysis/Calculus/ContDiff/Defs.lean +++ b/Mathlib/Analysis/Calculus/ContDiff/Defs.lean @@ -1369,6 +1369,10 @@ theorem ContDiffAt.differentiableAt (h : ContDiffAt 𝕜 n f x) (hn : 1 ≤ n) : simpa [hn, differentiableWithinAt_univ] using h.differentiableWithinAt #align cont_diff_at.differentiable_at ContDiffAt.differentiableAt +nonrec lemma ContDiffAt.contDiffOn {m : ℕ} (h : ContDiffAt 𝕜 n f x) (hm : m ≤ n) : + ∃ u ∈ 𝓝 x, ContDiffOn 𝕜 m f u := by + simpa [nhdsWithin_univ] using h.contDiffOn hm + /-- A function is `C^(n + 1)` at a point iff locally, it has a derivative which is `C^n`. -/ theorem contDiffAt_succ_iff_hasFDerivAt {n : ℕ} : ContDiffAt 𝕜 (n + 1 : ℕ) f x ↔ diff --git a/Mathlib/Analysis/Convex/Mul.lean b/Mathlib/Analysis/Convex/Mul.lean new file mode 100644 index 0000000000000..10ef1adf91e27 --- /dev/null +++ b/Mathlib/Analysis/Convex/Mul.lean @@ -0,0 +1,176 @@ +/- +Copyright (c) 2023 Yaël Dillies. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Yaël Dillies +-/ +import Mathlib.Analysis.Convex.Function +import Mathlib.Algebra.Order.Monovary +import Mathlib.Tactic.FieldSimp + +/-! +# Product of convex functions + +This file proves that the product of convex functions is convex, provided they monovary. + +As corollaries, we also prove that `x ↦ x ^ n` is convex +* `Even.convexOn_pow`: for even `n : ℕ`. +* `convexOn_pow`: over $[0, +∞)$ for `n : ℕ`. +* `convexOn_zpow`: over $(0, +∞)$ For `n : ℤ`. +-/ + +open Set + +variable {𝕜 E F : Type*} + +section LinearOrderedCommRing +variable [LinearOrderedCommRing 𝕜] [LinearOrderedCommRing E] [LinearOrderedAddCommGroup F] + [Module 𝕜 E] [Module 𝕜 F] [Module E F] [IsScalarTower 𝕜 E F] [SMulCommClass 𝕜 E F] + [OrderedSMul 𝕜 E] [OrderedSMul 𝕜 F] [OrderedSMul E F] {s : Set 𝕜} {f : 𝕜 → E} {g : 𝕜 → F} + +lemma ConvexOn.smul' (hf : ConvexOn 𝕜 s f) (hg : ConvexOn 𝕜 s g) (hf₀ : ∀ ⦃x⦄, x ∈ s → 0 ≤ f x) + (hg₀ : ∀ ⦃x⦄, x ∈ s → 0 ≤ g x) (hfg : MonovaryOn f g s) : ConvexOn 𝕜 s (f • g) := by + refine ⟨hf.1, fun x hx y hy a b ha hb hab ↦ ?_⟩ + dsimp + refine (smul_le_smul (hf.2 hx hy ha hb hab) (hg.2 hx hy ha hb hab) (hg₀ $ hf.1 hx hy ha hb hab) $ + add_nonneg (smul_nonneg ha $ hf₀ hx) $ smul_nonneg hb $ hf₀ hy).trans ?_ + calc + _ = (a * a) • (f x • g x) + (b * b) • (f y • g y) + (a * b) • (f x • g y + f y • g x) := ?_ + _ ≤ (a * a) • (f x • g x) + (b * b) • (f y • g y) + (a * b) • (f x • g x + f y • g y) := by + gcongr _ + (a * b) • ?_; exact hfg.smul_add_smul_le_smul_add_smul hx hy + _ = (a * (a + b)) • (f x • g x) + (b * (a + b)) • (f y • g y) := by + simp only [mul_add, add_smul, smul_add, mul_comm _ a]; abel + _ = _ := by simp_rw [hab, mul_one] + simp only [mul_add, add_smul, smul_add] + rw [←smul_smul_smul_comm a, ←smul_smul_smul_comm b, ←smul_smul_smul_comm a b, + ←smul_smul_smul_comm b b, smul_eq_mul, smul_eq_mul, smul_eq_mul, smul_eq_mul, mul_comm b, + add_comm _ ((b * b) • f y • g y), add_add_add_comm, add_comm ((a * b) • f y • g x)] + +lemma ConcaveOn.smul' (hf : ConcaveOn 𝕜 s f) (hg : ConcaveOn 𝕜 s g) (hf₀ : ∀ ⦃x⦄, x ∈ s → 0 ≤ f x) + (hg₀ : ∀ ⦃x⦄, x ∈ s → 0 ≤ g x) (hfg : AntivaryOn f g s) : ConcaveOn 𝕜 s (f • g) := by + refine ⟨hf.1, fun x hx y hy a b ha hb hab ↦ ?_⟩ + dsimp + refine (smul_le_smul (hf.2 hx hy ha hb hab) (hg.2 hx hy ha hb hab) (add_nonneg + (smul_nonneg ha $ hg₀ hx) $ smul_nonneg hb $ hg₀ hy) $ hf₀ $ hf.1 hx hy ha hb hab).trans' ?_ + calc a • f x • g x + b • f y • g y + = (a * (a + b)) • (f x • g x) + (b * (a + b)) • (f y • g y) := by simp_rw [hab, mul_one] + _ = (a * a) • (f x • g x) + (b * b) • (f y • g y) + (a * b) • (f x • g x + f y • g y) := by + simp only [mul_add, add_smul, smul_add, mul_comm _ a]; abel + _ ≤ (a * a) • (f x • g x) + (b * b) • (f y • g y) + (a * b) • (f x • g y + f y • g x) := by + gcongr _ + (a * b) • ?_; exact hfg.smul_add_smul_le_smul_add_smul hx hy + _ = _ := ?_ + simp only [mul_add, add_smul, smul_add] + rw [←smul_smul_smul_comm a, ←smul_smul_smul_comm b, ←smul_smul_smul_comm a b, + ←smul_smul_smul_comm b b, smul_eq_mul, smul_eq_mul, smul_eq_mul, smul_eq_mul, mul_comm b a, + add_comm ((a * b) • f x • g y), add_comm ((a * b) • f x • g y), add_add_add_comm] + +lemma ConvexOn.smul'' (hf : ConvexOn 𝕜 s f) (hg : ConvexOn 𝕜 s g) (hf₀ : ∀ ⦃x⦄, x ∈ s → f x ≤ 0) + (hg₀ : ∀ ⦃x⦄, x ∈ s → g x ≤ 0) (hfg : AntivaryOn f g s) : ConcaveOn 𝕜 s (f • g) := by + rw [←neg_smul_neg] + exact hf.neg.smul' hg.neg (fun x hx ↦ neg_nonneg.2 <| hf₀ hx) (fun x hx ↦ neg_nonneg.2 <| hg₀ hx) + hfg.neg + +lemma ConcaveOn.smul'' (hf : ConcaveOn 𝕜 s f) (hg : ConcaveOn 𝕜 s g) (hf₀ : ∀ ⦃x⦄, x ∈ s → f x ≤ 0) + (hg₀ : ∀ ⦃x⦄, x ∈ s → g x ≤ 0) (hfg : MonovaryOn f g s) : ConvexOn 𝕜 s (f • g) := by + rw [←neg_smul_neg] + exact hf.neg.smul' hg.neg (fun x hx ↦ neg_nonneg.2 <| hf₀ hx) (fun x hx ↦ neg_nonneg.2 <| hg₀ hx) + hfg.neg + +lemma ConvexOn.smul_concaveOn (hf : ConvexOn 𝕜 s f) (hg : ConcaveOn 𝕜 s g) + (hf₀ : ∀ ⦃x⦄, x ∈ s → 0 ≤ f x) (hg₀ : ∀ ⦃x⦄, x ∈ s → g x ≤ 0) (hfg : AntivaryOn f g s) : + ConcaveOn 𝕜 s (f • g) := by + rw [←neg_convexOn_iff, ←smul_neg] + exact hf.smul' hg.neg hf₀ (fun x hx ↦ neg_nonneg.2 <| hg₀ hx) hfg.neg_right + +lemma ConcaveOn.smul_convexOn (hf : ConcaveOn 𝕜 s f) (hg : ConvexOn 𝕜 s g) + (hf₀ : ∀ ⦃x⦄, x ∈ s → 0 ≤ f x) (hg₀ : ∀ ⦃x⦄, x ∈ s → g x ≤ 0) (hfg : MonovaryOn f g s) : + ConvexOn 𝕜 s (f • g) := by + rw [←neg_concaveOn_iff, ←smul_neg] + exact hf.smul' hg.neg hf₀ (fun x hx ↦ neg_nonneg.2 <| hg₀ hx) hfg.neg_right + +lemma ConvexOn.smul_concaveOn' (hf : ConvexOn 𝕜 s f) (hg : ConcaveOn 𝕜 s g) + (hf₀ : ∀ ⦃x⦄, x ∈ s → f x ≤ 0) (hg₀ : ∀ ⦃x⦄, x ∈ s → 0 ≤ g x) (hfg : MonovaryOn f g s) : + ConvexOn 𝕜 s (f • g) := by + rw [←neg_concaveOn_iff, ←smul_neg] + exact hf.smul'' hg.neg hf₀ (fun x hx ↦ neg_nonpos.2 $ hg₀ hx) hfg.neg_right + +lemma ConcaveOn.smul_convexOn' (hf : ConcaveOn 𝕜 s f) (hg : ConvexOn 𝕜 s g) + (hf₀ : ∀ ⦃x⦄, x ∈ s → f x ≤ 0) (hg₀ : ∀ ⦃x⦄, x ∈ s → 0 ≤ g x) (hfg : AntivaryOn f g s) : + ConcaveOn 𝕜 s (f • g) := by + rw [←neg_convexOn_iff, ←smul_neg] + exact hf.smul'' hg.neg hf₀ (fun x hx ↦ neg_nonpos.2 $ hg₀ hx) hfg.neg_right + +variable [IsScalarTower 𝕜 E E] [SMulCommClass 𝕜 E E] {f g : 𝕜 → E} + +lemma ConvexOn.mul (hf : ConvexOn 𝕜 s f) (hg : ConvexOn 𝕜 s g) (hf₀ : ∀ ⦃x⦄, x ∈ s → 0 ≤ f x) + (hg₀ : ∀ ⦃x⦄, x ∈ s → 0 ≤ g x) (hfg : MonovaryOn f g s) : + ConvexOn 𝕜 s (f * g) := hf.smul' hg hf₀ hg₀ hfg + +lemma ConcaveOn.mul (hf : ConcaveOn 𝕜 s f) (hg : ConcaveOn 𝕜 s g) + (hf₀ : ∀ ⦃x⦄, x ∈ s → 0 ≤ f x) (hg₀ : ∀ ⦃x⦄, x ∈ s → 0 ≤ g x) (hfg : AntivaryOn f g s) : + ConcaveOn 𝕜 s (f * g) := hf.smul' hg hf₀ hg₀ hfg + +lemma ConvexOn.mul' (hf : ConvexOn 𝕜 s f) (hg : ConvexOn 𝕜 s g) (hf₀ : ∀ ⦃x⦄, x ∈ s → f x ≤ 0) + (hg₀ : ∀ ⦃x⦄, x ∈ s → g x ≤ 0) (hfg : AntivaryOn f g s) : + ConcaveOn 𝕜 s (f * g) := hf.smul'' hg hf₀ hg₀ hfg + +lemma ConcaveOn.mul' (hf : ConcaveOn 𝕜 s f) (hg : ConcaveOn 𝕜 s g) (hf₀ : ∀ ⦃x⦄, x ∈ s → f x ≤ 0) + (hg₀ : ∀ ⦃x⦄, x ∈ s → g x ≤ 0) (hfg : MonovaryOn f g s) : + ConvexOn 𝕜 s (f * g) := hf.smul'' hg hf₀ hg₀ hfg + +lemma ConvexOn.mul_concaveOn (hf : ConvexOn 𝕜 s f) (hg : ConcaveOn 𝕜 s g) + (hf₀ : ∀ ⦃x⦄, x ∈ s → 0 ≤ f x) (hg₀ : ∀ ⦃x⦄, x ∈ s → g x ≤ 0) (hfg : AntivaryOn f g s) : + ConcaveOn 𝕜 s (f * g) := hf.smul_concaveOn hg hf₀ hg₀ hfg + +lemma ConcaveOn.mul_convexOn (hf : ConcaveOn 𝕜 s f) (hg : ConvexOn 𝕜 s g) + (hf₀ : ∀ ⦃x⦄, x ∈ s → 0 ≤ f x) (hg₀ : ∀ ⦃x⦄, x ∈ s → g x ≤ 0) (hfg : MonovaryOn f g s) : + ConvexOn 𝕜 s (f * g) := hf.smul_convexOn hg hf₀ hg₀ hfg + +lemma ConvexOn.mul_concaveOn' (hf : ConvexOn 𝕜 s f) (hg : ConcaveOn 𝕜 s g) + (hf₀ : ∀ ⦃x⦄, x ∈ s → f x ≤ 0) (hg₀ : ∀ ⦃x⦄, x ∈ s → 0 ≤ g x) (hfg : MonovaryOn f g s) : + ConvexOn 𝕜 s (f * g) := hf.smul_concaveOn' hg hf₀ hg₀ hfg + +lemma ConcaveOn.mul_convexOn' (hf : ConcaveOn 𝕜 s f) (hg : ConvexOn 𝕜 s g) + (hf₀ : ∀ ⦃x⦄, x ∈ s → f x ≤ 0) (hg₀ : ∀ ⦃x⦄, x ∈ s → 0 ≤ g x) (hfg : AntivaryOn f g s) : + ConcaveOn 𝕜 s (f • g) := hf.smul_convexOn' hg hf₀ hg₀ hfg + +lemma ConvexOn.pow (hf : ConvexOn 𝕜 s f) (hf₀ : ∀ ⦃x⦄, x ∈ s → 0 ≤ f x) : + ∀ n, ConvexOn 𝕜 s (f ^ n) + | 0 => by simpa using convexOn_const 1 hf.1 + | n + 1 => by rw [pow_succ]; exact hf.mul (hf.pow hf₀ _) hf₀ (fun x hx ↦ pow_nonneg (hf₀ hx) _) <| + (monovaryOn_self f s).pow_right₀ hf₀ n + +/-- `x^n`, `n : ℕ` is convex on `[0, +∞)` for all `n`. -/ +lemma convexOn_pow : ∀ n, ConvexOn 𝕜 (Ici 0) fun x : 𝕜 ↦ x ^ n := + (convexOn_id <| convex_Ici _).pow fun _ ↦ id + +/-- `x^n`, `n : ℕ` is convex on the whole real line whenever `n` is even. -/ +protected lemma Even.convexOn_pow {n : ℕ} (hn : Even n) : ConvexOn 𝕜 univ fun x : 𝕜 ↦ x ^ n := by + obtain ⟨n, rfl⟩ := hn + simp_rw [←two_mul, pow_mul] + refine ConvexOn.pow ⟨convex_univ, fun x _ y _ a b ha hb hab ↦ sub_nonneg.1 ?_⟩ + (fun _ _ ↦ by positivity) _ + calc + (0 : 𝕜) ≤ (a * b) * (x - y) ^ 2 := by positivity + _ = _ := by obtain rfl := eq_sub_of_add_eq hab; simp only [smul_eq_mul]; ring + +end LinearOrderedCommRing + +section LinearOrderedField +variable [LinearOrderedField 𝕜] + +open Int in +/-- `x^m`, `m : ℤ` is convex on `(0, +∞)` for all `m`. -/ +lemma convexOn_zpow : ∀ n : ℤ, ConvexOn 𝕜 (Ioi 0) fun x : 𝕜 ↦ x ^ n + | (n : ℕ) => by + simp_rw [zpow_ofNat] + exact (convexOn_pow n).subset Ioi_subset_Ici_self (convex_Ioi _) + | -[n+1] => by + simp_rw [zpow_negSucc, ←inv_pow] + refine (convexOn_iff_forall_pos.2 ⟨convex_Ioi _, ?_⟩).pow (fun x (hx : 0 < x) ↦ by positivity) _ + rintro x (hx : 0 < x) y (hy : 0 < y) a b ha hb hab + field_simp + rw [div_le_div_iff, ←sub_nonneg] + calc + 0 ≤ a * b * (x - y) ^ 2 := by positivity + _ = _ := by obtain rfl := eq_sub_of_add_eq hab; ring + all_goals positivity diff --git a/Mathlib/Analysis/Convex/SpecificFunctions/Basic.lean b/Mathlib/Analysis/Convex/SpecificFunctions/Basic.lean index a7f2fd7df0d40..f78ba29edff39 100644 --- a/Mathlib/Analysis/Convex/SpecificFunctions/Basic.lean +++ b/Mathlib/Analysis/Convex/SpecificFunctions/Basic.lean @@ -3,6 +3,7 @@ Copyright (c) 2020 Yury Kudryashov. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yury Kudryashov, Sébastien Gouëzel, Heather Macbeth -/ +import Mathlib.Analysis.Convex.Mul import Mathlib.Analysis.Convex.Slope import Mathlib.Analysis.SpecialFunctions.Pow.Real import Mathlib.Tactic.LinearCombination @@ -15,9 +16,6 @@ import Mathlib.Tactic.LinearCombination In this file we prove that the following functions are convex or strictly convex: * `strictConvexOn_exp` : The exponential function is strictly convex. -* `Even.convexOn_pow`: For an even `n : ℕ`, `fun x ↦ x ^ n` is convex. -* `convexOn_pow`: For `n : ℕ`, `fun x ↦ x ^ n` is convex on $[0, +∞)$. -* `convexOn_zpow`: For `m : ℤ`, `fun x ↦ x ^ m` is convex on $[0, +∞)$. * `strictConcaveOn_log_Ioi`, `strictConcaveOn_log_Iio`: `Real.log` is strictly concave on $(0, +∞)$ and $(-∞, 0)$ respectively. * `convexOn_rpow`, `strictConvexOn_rpow` : For `p : ℝ`, `fun x ↦ x ^ p` is convex on $[0, +∞)$ when @@ -32,6 +30,10 @@ theory. For `p : ℝ`, prove that `fun x ↦ x ^ p` is concave when `0 ≤ p ≤ 1` and strictly concave when `0 < p < 1`. + +## See also + +`Analysis.Convex.Mul` for convexity of `x ↦ x ^ n`` -/ open Real Set BigOperators NNReal @@ -67,83 +69,8 @@ theorem convexOn_exp : ConvexOn ℝ univ exp := strictConvexOn_exp.convexOn #align convex_on_exp convexOn_exp -/-- `x^n`, `n : ℕ` is convex on `[0, +∞)` for all `n`. - -We give an elementary proof rather than using the second derivative test, since this lemma is -needed early in the analysis library. -/ -theorem convexOn_pow (n : ℕ) : ConvexOn ℝ (Ici 0) fun x : ℝ => x ^ n := by - induction' n with k IH - · exact convexOn_const (1 : ℝ) (convex_Ici _) - refine' ⟨convex_Ici _, _⟩ - rintro a (ha : 0 ≤ a) b (hb : 0 ≤ b) μ ν hμ hν h - have H := IH.2 ha hb hμ hν h - have : 0 ≤ (b ^ k - a ^ k) * (b - a) * μ * ν := by - cases' le_or_lt a b with hab hab - · have : a ^ k ≤ b ^ k := by gcongr - have : 0 ≤ (b ^ k - a ^ k) * (b - a) := by nlinarith - positivity - · have : b ^ k ≤ a ^ k := by gcongr - have : 0 ≤ (b ^ k - a ^ k) * (b - a) := by nlinarith - positivity - calc - (μ * a + ν * b) ^ k.succ = (μ * a + ν * b) * (μ * a + ν * b) ^ k := pow_succ _ _ - _ ≤ (μ * a + ν * b) * (μ * a ^ k + ν * b ^ k) := by gcongr; exact H - _ ≤ (μ * a + ν * b) * (μ * a ^ k + ν * b ^ k) + (b ^ k - a ^ k) * (b - a) * μ * ν := by linarith - _ = (μ + ν) * (μ * a ^ k.succ + ν * b ^ k.succ) := by rw [Nat.succ_eq_add_one]; ring - _ = μ * a ^ k.succ + ν * b ^ k.succ := by rw [h]; ring #align convex_on_pow convexOn_pow - -/-- `x^n`, `n : ℕ` is convex on the whole real line whenever `n` is even. - -We give an elementary proof rather than using the second derivative test, since this lemma is -needed early in the analysis library. -/ -nonrec theorem Even.convexOn_pow {n : ℕ} (hn : Even n) : - ConvexOn ℝ Set.univ fun x : ℝ => x ^ n := by - refine' ⟨convex_univ, _⟩ - rintro a - b - μ ν hμ hν h - obtain ⟨k, rfl⟩ := hn.exists_two_nsmul _ - -- Porting note: added type ascription to LHS - have : (0 : ℝ) ≤ (a - b) ^ 2 * μ * ν := by positivity - calc - (μ * a + ν * b) ^ (2 * k) = ((μ * a + ν * b) ^ 2) ^ k := by rw [pow_mul] - _ ≤ ((μ + ν) * (μ * a ^ 2 + ν * b ^ 2)) ^ k := by gcongr; linarith - _ = (μ * a ^ 2 + ν * b ^ 2) ^ k := by rw [h]; ring - _ ≤ μ * (a ^ 2) ^ k + ν * (b ^ 2) ^ k := ?_ - _ ≤ μ * a ^ (2 * k) + ν * b ^ (2 * k) := by ring_nf; rfl - -- Porting note: `rw [mem_Ici]` was `dsimp` - refine' (convexOn_pow k).2 _ _ hμ hν h <;> rw [mem_Ici] <;> positivity #align even.convex_on_pow Even.convexOn_pow - -open Int in -/-- `x^m`, `m : ℤ` is convex on `(0, +∞)` for all `m`. - -We give an elementary proof rather than using the second derivative test, since this lemma is -needed early in the analysis library. -/ -theorem convexOn_zpow : ∀ m : ℤ, ConvexOn ℝ (Ioi 0) fun x : ℝ => x ^ m - | (n : ℕ) => by - simp_rw [zpow_ofNat] - exact (convexOn_pow n).subset Ioi_subset_Ici_self (convex_Ioi _) - | -[n+1] => by - simp_rw [zpow_negSucc] - refine' ⟨convex_Ioi _, _⟩ - rintro a (ha : 0 < a) b (hb : 0 < b) μ ν hμ hν h - field_simp - rw [div_le_div_iff] - · -- Porting note: added type ascription to LHS - calc - (1 : ℝ) * (a ^ (n + 1) * b ^ (n + 1)) = ((μ + ν) ^ 2 * (a * b)) ^ (n + 1) := by rw [h]; ring - _ ≤ ((μ * b + ν * a) * (μ * a + ν * b)) ^ (n + 1) := ?_ - _ = (μ * b + ν * a) ^ (n + 1) * (μ * a + ν * b) ^ (n + 1) := by rw [mul_pow] - _ ≤ (μ * b ^ (n + 1) + ν * a ^ (n + 1)) * (μ * a + ν * b) ^ (n + 1) := ?_ - · -- Porting note: added type ascription to LHS - gcongr (?_ : ℝ) ^ _ - have : (0 : ℝ) ≤ μ * ν * (a - b) ^ 2 := by positivity - linarith - · gcongr - apply (convexOn_pow (n + 1)).2 hb.le ha.le hμ hν h - · have : 0 < μ * a + ν * b := by cases le_or_lt a b <;> nlinarith - positivity - · positivity #align convex_on_zpow convexOn_zpow /- `Real.log` is strictly concave on $(0, +∞)$. diff --git a/Mathlib/Analysis/MeanInequalitiesPow.lean b/Mathlib/Analysis/MeanInequalitiesPow.lean index 0223ad658c688..e5516d0873a84 100644 --- a/Mathlib/Analysis/MeanInequalitiesPow.lean +++ b/Mathlib/Analysis/MeanInequalitiesPow.lean @@ -64,7 +64,7 @@ theorem pow_arith_mean_le_arith_mean_pow (w z : ι → ℝ) (hw : ∀ i ∈ s, 0 theorem pow_arith_mean_le_arith_mean_pow_of_even (w z : ι → ℝ) (hw : ∀ i ∈ s, 0 ≤ w i) (hw' : ∑ i in s, w i = 1) {n : ℕ} (hn : Even n) : (∑ i in s, w i * z i) ^ n ≤ ∑ i in s, w i * z i ^ n := - hn.convexOn_pow.map_sum_le hw hw' fun _ _ => trivial + hn.convexOn_pow.map_sum_le hw hw' fun _ _ => Set.mem_univ _ #align real.pow_arith_mean_le_arith_mean_pow_of_even Real.pow_arith_mean_le_arith_mean_pow_of_even /-- Specific case of Jensen's inequality for sums of powers -/ diff --git a/Mathlib/Analysis/SpecificLimits/Basic.lean b/Mathlib/Analysis/SpecificLimits/Basic.lean index aeaba8e6cf390..1f01fe9675837 100644 --- a/Mathlib/Analysis/SpecificLimits/Basic.lean +++ b/Mathlib/Analysis/SpecificLimits/Basic.lean @@ -595,6 +595,11 @@ theorem tendsto_nat_floor_atTop {α : Type*} [LinearOrderedSemiring α] [FloorSe Nat.floor_mono.tendsto_atTop_atTop fun x => ⟨max 0 (x + 1), by simp [Nat.le_floor_iff]⟩ #align tendsto_nat_floor_at_top tendsto_nat_floor_atTop +lemma tendsto_nat_floor_mul_atTop {α : Type _} [LinearOrderedSemifield α] [FloorSemiring α] + [Archimedean α] (a : α) (ha : 0 < a) : Tendsto (fun (x:ℕ) => ⌊a * x⌋₊) atTop atTop := + Tendsto.comp tendsto_nat_floor_atTop + <| Tendsto.const_mul_atTop ha tendsto_nat_cast_atTop_atTop + variable {R : Type*} [TopologicalSpace R] [LinearOrderedField R] [OrderTopology R] [FloorRing R] theorem tendsto_nat_floor_mul_div_atTop {a : R} (ha : 0 ≤ a) : diff --git a/Mathlib/CategoryTheory/ComposableArrows.lean b/Mathlib/CategoryTheory/ComposableArrows.lean index 60c527a58630c..2db2f804a55f0 100644 --- a/Mathlib/CategoryTheory/ComposableArrows.lean +++ b/Mathlib/CategoryTheory/ComposableArrows.lean @@ -29,7 +29,6 @@ In the namespace `CategoryTheory.ComposableArrows`, we provide constructors like `mk₁ f`, `mk₂ f g`, `mk₃ f g h` for `ComposableArrows C n` for small `n`. TODO (@joelriou): -* define various constructors for objects, morphisms, isomorphisms in `ComposableArrows C n` * redefine `Arrow C` as `ComposableArrow C 1`? * construct some elements in `ComposableArrows m (Fin (n + 1))` for small `n` the precomposition with which shall induce funtors @@ -90,6 +89,12 @@ in `ComposableArrows C n` when `i` is a natural number such that `i ≤ n`. -/ abbrev app' (φ : F ⟶ G) (i : ℕ) (hi : i ≤ n := by linarith) : F.obj' i ⟶ G.obj' i := φ.app _ +@[reassoc] +lemma naturality' (φ : F ⟶ G) (i j : ℕ) (hij : i ≤ j := by linarith) + (hj : j ≤ n := by linarith) : + F.map' i j ≫ app' φ j = app' φ i ≫ G.map' i j := + φ.naturality _ + /-- Constructor for `ComposableArrows C 0`. -/ @[simps!] def mk₀ (X : C) : ComposableArrows C 0 := (Functor.const (Fin 1)).obj X @@ -440,6 +445,205 @@ abbrev δ₀ (F : ComposableArrows C (n + 1)) := δ₀Functor.obj F @[simp] lemma precomp_δ₀ {X : C} (f : X ⟶ F.left) : (F.precomp f).δ₀ = F := rfl +section + +variable {F G : ComposableArrows C (n + 1)} + (α : F.obj' 0 ⟶ G.obj' 0) + (β : F.δ₀ ⟶ G.δ₀) + (w : F.map' 0 1 ≫ app' β 0 = α ≫ G.map' 0 1) + +/-- Inductive construction of morphisms in `ComposableArrows C (n + 1)`: in order to construct +a morphism `F ⟶ G`, it suffices to provide `α : F.obj' 0 ⟶ G.obj' 0` and `β : F.δ₀ ⟶ G.δ₀` +such that `F.map' 0 1 ≫ app' β 0 = α ≫ G.map' 0 1`. -/ +def homMkSucc : F ⟶ G := + homMk + (fun i => match i with + | ⟨0, _⟩ => α + | ⟨i + 1, hi⟩ => app' β i) + (fun i hi => by + obtain _ | i := i + · exact w + · exact naturality' β i (i + 1)) + +@[simp] +lemma homMkSucc_app_zero : (homMkSucc α β w).app 0 = α := rfl + +@[simp] +lemma homMkSucc_app_succ (i : ℕ) (hi : i + 1 < n + 1 + 1) : + (homMkSucc α β w).app ⟨i + 1, hi⟩ = app' β i := rfl + +end + +lemma hom_ext_succ {F G : ComposableArrows C (n + 1)} {f g : F ⟶ G} + (h₀ : app' f 0 = app' g 0) (h₁ : δ₀Functor.map f = δ₀Functor.map g) : f = g := by + ext ⟨i, hi⟩ + obtain _ | i := i + · exact h₀ + · rw [Nat.succ_eq_add_one] at hi + exact congr_app h₁ ⟨i, by linarith⟩ + +/-- Inductive construction of isomorphisms in `ComposableArrows C (n + 1)`: in order to +construct an isomorphism `F ≅ G`, it suffices to provide `α : F.obj' 0 ≅ G.obj' 0` and +`β : F.δ₀ ≅ G.δ₀` such that `F.map' 0 1 ≫ app' β.hom 0 = α.hom ≫ G.map' 0 1`. -/ +@[simps] +def isoMkSucc {F G : ComposableArrows C (n + 1)} (α : F.obj' 0 ≅ G.obj' 0) + (β : F.δ₀ ≅ G.δ₀) (w : F.map' 0 1 ≫ app' β.hom 0 = α.hom ≫ G.map' 0 1) : F ≅ G where + hom := homMkSucc α.hom β.hom w + inv := homMkSucc α.inv β.inv (by + rw [← cancel_epi α.hom, ← reassoc_of% w, α.hom_inv_id_assoc, β.hom_inv_id_app] + dsimp + rw [comp_id]) + hom_inv_id := by + apply hom_ext_succ + · simp + · ext ⟨i, hi⟩ + simp + inv_hom_id := by + apply hom_ext_succ + · simp + · ext ⟨i, hi⟩ + simp + +lemma ext_succ {F G : ComposableArrows C (n + 1)} (h₀ : F.obj' 0 = G.obj' 0) + (h : F.δ₀ = G.δ₀) (w : F.map' 0 1 = eqToHom h₀ ≫ G.map' 0 1 ≫ + eqToHom (Functor.congr_obj h.symm 0)): F = G := by + have : ∀ i, F.obj i = G.obj i := by + intro ⟨i, hi⟩ + cases' i with i + · exact h₀ + · rw [Nat.succ_eq_add_one] at hi + exact Functor.congr_obj h ⟨i, by linarith⟩ + exact Functor.ext_of_iso (isoMkSucc (eqToIso h₀) (eqToIso h) (by + rw [w] + dsimp [app'] + erw [eqToHom_app, assoc, assoc, eqToHom_trans, eqToHom_refl, comp_id])) this (by + rintro ⟨i, hi⟩ + dsimp + cases' i with i + · erw [homMkSucc_app_zero] + · erw [homMkSucc_app_succ] + dsimp [app'] + erw [eqToHom_app]) + +lemma precomp_surjective (F : ComposableArrows C (n + 1)) : + ∃ (F₀ : ComposableArrows C n) (X₀ : C) (f₀ : X₀ ⟶ F₀.left), F = F₀.precomp f₀ := + ⟨F.δ₀, _, F.map' 0 1, ext_succ rfl (by simp) (by simp)⟩ + +section + +variable + {f g : ComposableArrows C 2} + (app₀ : f.obj' 0 ⟶ g.obj' 0) (app₁ : f.obj' 1 ⟶ g.obj' 1) (app₂ : f.obj' 2 ⟶ g.obj' 2) + (w₀ : f.map' 0 1 ≫ app₁ = app₀ ≫ g.map' 0 1) + (w₁ : f.map' 1 2 ≫ app₂ = app₁ ≫ g.map' 1 2) + +/-- Constructor for morphisms in `ComposableArrows C 2`. -/ +def homMk₂ : f ⟶ g := homMkSucc app₀ (homMk₁ app₁ app₂ w₁) w₀ + +@[simp] +lemma homMk₂_app_zero : (homMk₂ app₀ app₁ app₂ w₀ w₁).app 0 = app₀ := rfl + +@[simp] +lemma homMk₂_app_one : (homMk₂ app₀ app₁ app₂ w₀ w₁).app 1 = app₁ := rfl + +@[simp] +lemma homMk₂_app_two : (homMk₂ app₀ app₁ app₂ w₀ w₁).app ⟨2, by linarith⟩ = app₂ := rfl + +end + +@[ext] +lemma hom_ext₂ {f g : ComposableArrows C 2} {φ φ' : f ⟶ g} + (h₀ : app' φ 0 = app' φ' 0) (h₁ : app' φ 1 = app' φ' 1) (h₂ : app' φ 2 = app' φ' 2) : + φ = φ' := + hom_ext_succ h₀ (hom_ext₁ h₁ h₂) + +/-- Constructor for isomorphisms in `ComposableArrows C 2`. -/ +@[simps] +def isoMk₂ {f g : ComposableArrows C 2} + (app₀ : f.obj' 0 ≅ g.obj' 0) (app₁ : f.obj' 1 ≅ g.obj' 1) (app₂ : f.obj' 2 ≅ g.obj' 2) + (w₀ : f.map' 0 1 ≫ app₁.hom = app₀.hom ≫ g.map' 0 1) + (w₁ : f.map' 1 2 ≫ app₂.hom = app₁.hom ≫ g.map' 1 2) : f ≅ g where + hom := homMk₂ app₀.hom app₁.hom app₂.hom w₀ w₁ + inv := homMk₂ app₀.inv app₁.inv app₂.inv + (by rw [← cancel_epi app₀.hom, ← reassoc_of% w₀, app₁.hom_inv_id, + comp_id, app₀.hom_inv_id_assoc]) + (by rw [← cancel_epi app₁.hom, ← reassoc_of% w₁, app₂.hom_inv_id, + comp_id, app₁.hom_inv_id_assoc]) + +lemma ext₂ {f g : ComposableArrows C 2} + (h₀ : f.obj' 0 = g.obj' 0) (h₁ : f.obj' 1 = g.obj' 1) (h₂ : f.obj' 2 = g.obj' 2) + (w₀ : f.map' 0 1 = eqToHom h₀ ≫ g.map' 0 1 ≫ eqToHom h₁.symm) + (w₁ : f.map' 1 2 = eqToHom h₁ ≫ g.map' 1 2 ≫ eqToHom h₂.symm) : f = g := + ext_succ h₀ (ext₁ h₁ h₂ w₁) w₀ + +lemma mk₂_surjective (X : ComposableArrows C 2) : + ∃ (X₀ X₁ X₂ : C) (f₀ : X₀ ⟶ X₁) (f₁ : X₁ ⟶ X₂), X = mk₂ f₀ f₁:= + ⟨_, _, _, X.map' 0 1, X.map' 1 2, ext₂ rfl rfl rfl (by simp) (by simp)⟩ + +section + +variable + {f g : ComposableArrows C 3} + (app₀ : f.obj' 0 ⟶ g.obj' 0) (app₁ : f.obj' 1 ⟶ g.obj' 1) (app₂ : f.obj' 2 ⟶ g.obj' 2) + (app₃ : f.obj' 3 ⟶ g.obj' 3) + (w₀ : f.map' 0 1 ≫ app₁ = app₀ ≫ g.map' 0 1) + (w₁ : f.map' 1 2 ≫ app₂ = app₁ ≫ g.map' 1 2) + (w₂ : f.map' 2 3 ≫ app₃ = app₂ ≫ g.map' 2 3) + +/-- Constructor for morphisms in `ComposableArrows C 3`. -/ +def homMk₃ : f ⟶ g := homMkSucc app₀ (homMk₂ app₁ app₂ app₃ w₁ w₂) w₀ + +@[simp] +lemma homMk₃_app_zero : (homMk₃ app₀ app₁ app₂ app₃ w₀ w₁ w₂).app 0 = app₀ := rfl + +@[simp] +lemma homMk₃_app_one : (homMk₃ app₀ app₁ app₂ app₃ w₀ w₁ w₂).app 1 = app₁ := rfl + +@[simp] +lemma homMk₃_app_two : (homMk₃ app₀ app₁ app₂ app₃ w₀ w₁ w₂).app ⟨2, by linarith⟩ = app₂ := rfl + +@[simp] +lemma homMk₃_app_three : (homMk₃ app₀ app₁ app₂ app₃ w₀ w₁ w₂).app ⟨3, by linarith⟩ = app₃ := rfl + +end + +@[ext] +lemma hom_ext₃ {f g : ComposableArrows C 3} {φ φ' : f ⟶ g} + (h₀ : app' φ 0 = app' φ' 0) (h₁ : app' φ 1 = app' φ' 1) (h₂ : app' φ 2 = app' φ' 2) + (h₃ : app' φ 3 = app' φ' 3) : + φ = φ' := + hom_ext_succ h₀ (hom_ext₂ h₁ h₂ h₃) + +/-- Constructor for isomorphisms in `ComposableArrows C 3`. -/ +@[simps] +def isoMk₃ {f g : ComposableArrows C 3} + (app₀ : f.obj' 0 ≅ g.obj' 0) (app₁ : f.obj' 1 ≅ g.obj' 1) (app₂ : f.obj' 2 ≅ g.obj' 2) + (app₃ : f.obj' 3 ≅ g.obj' 3) + (w₀ : f.map' 0 1 ≫ app₁.hom = app₀.hom ≫ g.map' 0 1) + (w₁ : f.map' 1 2 ≫ app₂.hom = app₁.hom ≫ g.map' 1 2) + (w₂ : f.map' 2 3 ≫ app₃.hom = app₂.hom ≫ g.map' 2 3) : f ≅ g where + hom := homMk₃ app₀.hom app₁.hom app₂.hom app₃.hom w₀ w₁ w₂ + inv := homMk₃ app₀.inv app₁.inv app₂.inv app₃.inv + (by rw [← cancel_epi app₀.hom, ← reassoc_of% w₀, app₁.hom_inv_id, + comp_id, app₀.hom_inv_id_assoc]) + (by rw [← cancel_epi app₁.hom, ← reassoc_of% w₁, app₂.hom_inv_id, + comp_id, app₁.hom_inv_id_assoc]) + (by rw [← cancel_epi app₂.hom, ← reassoc_of% w₂, app₃.hom_inv_id, + comp_id, app₂.hom_inv_id_assoc]) + +lemma ext₃ {f g : ComposableArrows C 3} + (h₀ : f.obj' 0 = g.obj' 0) (h₁ : f.obj' 1 = g.obj' 1) (h₂ : f.obj' 2 = g.obj' 2) + (h₃ : f.obj' 3 = g.obj' 3) + (w₀ : f.map' 0 1 = eqToHom h₀ ≫ g.map' 0 1 ≫ eqToHom h₁.symm) + (w₁ : f.map' 1 2 = eqToHom h₁ ≫ g.map' 1 2 ≫ eqToHom h₂.symm) + (w₂ : f.map' 2 3 = eqToHom h₂ ≫ g.map' 2 3 ≫ eqToHom h₃.symm) : f = g := + ext_succ h₀ (ext₂ h₁ h₂ h₃ w₁ w₂) w₀ + +lemma mk₃_surjective (X : ComposableArrows C 3) : + ∃ (X₀ X₁ X₂ X₃ : C) (f₀ : X₀ ⟶ X₁) (f₁ : X₁ ⟶ X₂) (f₂ : X₂ ⟶ X₃), X = mk₃ f₀ f₁ f₂ := + ⟨_, _, _, _, X.map' 0 1, X.map' 1 2, X.map' 2 3, + ext₃ rfl rfl rfl rfl (by simp) (by simp) (by simp)⟩ + end ComposableArrows variable {C} diff --git a/Mathlib/CategoryTheory/EssentiallySmall.lean b/Mathlib/CategoryTheory/EssentiallySmall.lean index 37add555cce7e..4a82484b465b0 100644 --- a/Mathlib/CategoryTheory/EssentiallySmall.lean +++ b/Mathlib/CategoryTheory/EssentiallySmall.lean @@ -3,9 +3,10 @@ Copyright (c) 2021 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison -/ -import Mathlib.Logic.Small.Basic import Mathlib.CategoryTheory.Category.ULift import Mathlib.CategoryTheory.Skeletal +import Mathlib.Logic.UnivLE +import Mathlib.Logic.Small.Basic #align_import category_theory.essentially_small from "leanprover-community/mathlib"@"f7707875544ef1f81b32cb68c79e0e24e45a0e76" @@ -33,6 +34,7 @@ namespace CategoryTheory /-- A category is `EssentiallySmall.{w}` if there exists an equivalence to some `S : Type w` with `[SmallCategory S]`. -/ +@[pp_with_univ] class EssentiallySmall (C : Type u) [Category.{v} C] : Prop where /-- An essentially small category is equivalent to some small category. -/ equiv_smallCategory : ∃ (S : Type w) (_ : SmallCategory S), Nonempty (C ≌ S) @@ -47,6 +49,7 @@ theorem EssentiallySmall.mk' {C : Type u} [Category.{v} C] {S : Type w} [SmallCa /-- An arbitrarily chosen small model for an essentially small category. -/ --@[nolint has_nonempty_instance] +@[pp_with_univ] def SmallModel (C : Type u) [Category.{v} C] [EssentiallySmall.{w} C] : Type w := Classical.choose (@EssentiallySmall.equiv_smallCategory C _ _) #align category_theory.small_model CategoryTheory.SmallModel @@ -89,6 +92,7 @@ theorem essentiallySmallSelf : EssentiallySmall.{max w v u} C := See `ShrinkHoms C` for a category instance where every hom set has been replaced by a small model. -/ +@[pp_with_univ] class LocallySmall (C : Type u) [Category.{v} C] : Prop where /-- A locally small category has small hom-types. -/ hom_small : ∀ X Y : C, Small.{w} (X ⟶ Y) := by infer_instance @@ -118,6 +122,9 @@ instance (priority := 100) locallySmall_self (C : Type u) [Category.{v} C] : Loc where #align category_theory.locally_small_self CategoryTheory.locallySmall_self +instance (priority := 100) locallySmall_of_univLE (C : Type u) [Category.{v} C] [UnivLE.{v, w}] : + LocallySmall.{w} C where + theorem locallySmall_max {C : Type u} [Category.{v} C] : LocallySmall.{max v w} C where hom_small _ _ := small_max.{w} _ @@ -131,6 +138,7 @@ instance (priority := 100) locallySmall_of_essentiallySmall (C : Type u) [Catego we'll put a `Category.{w}` instance on `ShrinkHoms C`. -/ --@[nolint has_nonempty_instance] +@[pp_with_univ] def ShrinkHoms (C : Type u) := C #align category_theory.shrink_homs CategoryTheory.ShrinkHoms @@ -200,6 +208,17 @@ noncomputable def equivalence : C ≌ ShrinkHoms C := end ShrinkHoms +namespace Shrink + +noncomputable instance [Small.{w} C] : Category.{v} (Shrink.{w} C) := + InducedCategory.category (equivShrink C).symm + +/-- The categorical equivalence between `C` and `Shrink C`, when `C` is small. -/ +noncomputable def equivalence [Small.{w} C] : C ≌ Shrink.{w} C := + (inducedFunctor (equivShrink C).symm).asEquivalence.symm + +end Shrink + /-- A category is essentially small if and only if the underlying type of its skeleton (i.e. the "set" of isomorphism classes) is small, and it is locally small. @@ -244,4 +263,6 @@ theorem essentiallySmall_iff_of_thin {C : Type u} [Category.{v} C] [Quiver.IsThi simp [essentiallySmall_iff, CategoryTheory.locallySmall_of_thin] #align category_theory.essentially_small_iff_of_thin CategoryTheory.essentiallySmall_iff_of_thin +instance [Small.{w} C] : Small.{w} (Discrete C) := small_map discreteEquiv + end CategoryTheory diff --git a/Mathlib/CategoryTheory/Extensive.lean b/Mathlib/CategoryTheory/Extensive.lean index 156535410d5a8..0ae838fd8cef7 100644 --- a/Mathlib/CategoryTheory/Extensive.lean +++ b/Mathlib/CategoryTheory/Extensive.lean @@ -80,24 +80,24 @@ attribute [instance] PreservesPullbacksOfInclusions.preservesPullbackInl and binary coproducts are universal. -/ class FinitaryPreExtensive (C : Type u) [Category.{v} C] : Prop where [hasFiniteCoproducts : HasFiniteCoproducts C] - [HasPullbacksOfInclusions : HasPullbacksOfInclusions C] + [hasPullbacksOfInclusions : HasPullbacksOfInclusions C] /-- In a finitary extensive category, all coproducts are van Kampen-/ universal' : ∀ {X Y : C} (c : BinaryCofan X Y), IsColimit c → IsUniversalColimit c attribute [instance] FinitaryPreExtensive.hasFiniteCoproducts -attribute [instance] FinitaryPreExtensive.HasPullbacksOfInclusions +attribute [instance] FinitaryPreExtensive.hasPullbacksOfInclusions /-- A category is (finitary) extensive if it has finite coproducts, and binary coproducts are van Kampen. -/ class FinitaryExtensive (C : Type u) [Category.{v} C] : Prop where [hasFiniteCoproducts : HasFiniteCoproducts C] - [HasPullbacksOfInclusions : HasPullbacksOfInclusions C] + [hasPullbacksOfInclusions : HasPullbacksOfInclusions C] /-- In a finitary extensive category, all coproducts are van Kampen-/ van_kampen' : ∀ {X Y : C} (c : BinaryCofan X Y), IsColimit c → IsVanKampenColimit c #align category_theory.finitary_extensive CategoryTheory.FinitaryExtensive attribute [instance] FinitaryExtensive.hasFiniteCoproducts -attribute [instance] FinitaryExtensive.HasPullbacksOfInclusions +attribute [instance] FinitaryExtensive.hasPullbacksOfInclusions theorem FinitaryExtensive.vanKampen [FinitaryExtensive C] {F : Discrete WalkingPair ⥤ C} (c : Cocone F) (hc : IsColimit c) : IsVanKampenColimit c := by diff --git a/Mathlib/CategoryTheory/Filtered/Basic.lean b/Mathlib/CategoryTheory/Filtered/Basic.lean index 26fa8d03e5f04..bbe37867f6cc9 100644 --- a/Mathlib/CategoryTheory/Filtered/Basic.lean +++ b/Mathlib/CategoryTheory/Filtered/Basic.lean @@ -83,7 +83,7 @@ See . (They also define a diagram bei -/ class IsFiltered extends IsFilteredOrEmpty C : Prop where /-- a filtered category must be non empty -/ - [Nonempty : Nonempty C] + [nonempty : Nonempty C] #align category_theory.is_filtered CategoryTheory.IsFiltered instance (priority := 100) isFilteredOrEmpty_of_semilatticeSup (α : Type u) [SemilatticeSup α] : @@ -123,7 +123,6 @@ instance : IsFiltered (Discrete PUnit) where cocone_maps X Y f g := ⟨⟨PUnit.unit⟩, ⟨⟨by trivial⟩⟩, by apply ULift.ext apply Subsingleton.elim⟩ - Nonempty := ⟨⟨PUnit.unit⟩⟩ namespace IsFiltered @@ -234,10 +233,10 @@ variable [IsFiltered C] /-- Any finite collection of objects in a filtered category has an object "to the right". -/ -theorem sup_objs_exists (O : Finset C) : ∃ S : C, ∀ {X}, X ∈ O → _root_.Nonempty (X ⟶ S) := by +theorem sup_objs_exists (O : Finset C) : ∃ S : C, ∀ {X}, X ∈ O → Nonempty (X ⟶ S) := by classical induction' O using Finset.induction with X O' nm h - · exact ⟨Classical.choice IsFiltered.Nonempty, by intro; simp⟩ + · exact ⟨Classical.choice IsFiltered.nonempty, by intro; simp⟩ · obtain ⟨S', w'⟩ := h use max X S' rintro Y mY @@ -312,7 +311,7 @@ variable {J : Type v} [SmallCategory J] [FinCategory J] /-- If we have `IsFiltered C`, then for any functor `F : J ⥤ C` with `FinCategory J`, there exists a cocone over `F`. -/ -theorem cocone_nonempty (F : J ⥤ C) : _root_.Nonempty (Cocone F) := by +theorem cocone_nonempty (F : J ⥤ C) : Nonempty (Cocone F) := by classical let O := Finset.univ.image F.obj let H : Finset (Σ' (X Y : C) (_ : X ∈ O) (_ : Y ∈ O), X ⟶ Y) := @@ -342,7 +341,7 @@ variable {D : Type u₁} [Category.{v₁} D] -/ theorem of_right_adjoint {L : D ⥤ C} {R : C ⥤ D} (h : L ⊣ R) : IsFiltered D := { IsFilteredOrEmpty.of_right_adjoint h with - Nonempty := IsFiltered.Nonempty.map R.obj } + nonempty := IsFiltered.nonempty.map R.obj } #align category_theory.is_filtered.of_right_adjoint CategoryTheory.IsFiltered.of_right_adjoint /-- If `C` is filtered, and we have a right adjoint functor `R : C ⥤ D`, then `D` is filtered. -/ @@ -512,7 +511,7 @@ See . -/ class IsCofiltered extends IsCofilteredOrEmpty C : Prop where /-- a cofiltered category must be non empty -/ - [Nonempty : Nonempty C] + [nonempty : Nonempty C] #align category_theory.is_cofiltered CategoryTheory.IsCofiltered instance (priority := 100) isCofilteredOrEmpty_of_semilatticeInf (α : Type u) [SemilatticeInf α] : @@ -551,7 +550,6 @@ instance : IsCofiltered (Discrete PUnit) where cone_maps X Y f g := ⟨⟨PUnit.unit⟩, ⟨⟨by trivial⟩⟩, by apply ULift.ext apply Subsingleton.elim⟩ - Nonempty := ⟨⟨PUnit.unit⟩⟩ namespace IsCofiltered @@ -678,10 +676,10 @@ variable [IsCofiltered C] /-- Any finite collection of objects in a cofiltered category has an object "to the left". -/ -theorem inf_objs_exists (O : Finset C) : ∃ S : C, ∀ {X}, X ∈ O → _root_.Nonempty (S ⟶ X) := by +theorem inf_objs_exists (O : Finset C) : ∃ S : C, ∀ {X}, X ∈ O → Nonempty (S ⟶ X) := by classical induction' O using Finset.induction with X O' nm h - · exact ⟨Classical.choice IsCofiltered.Nonempty, by intro; simp⟩ + · exact ⟨Classical.choice IsCofiltered.nonempty, by intro; simp⟩ · obtain ⟨S', w'⟩ := h use min X S' rintro Y mY @@ -756,7 +754,7 @@ variable {J : Type w} [SmallCategory J] [FinCategory J] /-- If we have `IsCofiltered C`, then for any functor `F : J ⥤ C` with `FinCategory J`, there exists a cone over `F`. -/ -theorem cone_nonempty (F : J ⥤ C) : _root_.Nonempty (Cone F) := by +theorem cone_nonempty (F : J ⥤ C) : Nonempty (Cone F) := by classical let O := Finset.univ.image F.obj let H : Finset (Σ' (X Y : C) (_ : X ∈ O) (_ : Y ∈ O), X ⟶ Y) := @@ -788,7 +786,7 @@ then `D` is cofiltered. -/ theorem of_left_adjoint {L : C ⥤ D} {R : D ⥤ C} (h : L ⊣ R) : IsCofiltered D := { IsCofilteredOrEmpty.of_left_adjoint h with - Nonempty := IsCofiltered.Nonempty.map L.obj } + nonempty := IsCofiltered.nonempty.map L.obj } #align category_theory.is_cofiltered.of_left_adjoint CategoryTheory.IsCofiltered.of_left_adjoint /-- If `C` is cofiltered, and we have a left adjoint functor `L : C ⥤ D`, then `D` is cofiltered. -/ @@ -821,7 +819,7 @@ instance isCofilteredOrEmpty_op_of_isFilteredOrEmpty [IsFilteredOrEmpty C] : exact IsFiltered.coeq_condition f.unop g.unop⟩ instance isCofiltered_op_of_isFiltered [IsFiltered C] : IsCofiltered Cᵒᵖ where - Nonempty := ⟨op IsFiltered.Nonempty.some⟩ + nonempty := letI : Nonempty C := IsFiltered.nonempty; inferInstance #align category_theory.is_cofiltered_op_of_is_filtered CategoryTheory.isCofiltered_op_of_isFiltered instance isFilteredOrEmpty_op_of_isCofilteredOrEmpty [IsCofilteredOrEmpty C] : @@ -836,7 +834,7 @@ instance isFilteredOrEmpty_op_of_isCofilteredOrEmpty [IsCofilteredOrEmpty C] : exact IsCofiltered.eq_condition f.unop g.unop⟩ instance isFiltered_op_of_isCofiltered [IsCofiltered C] : IsFiltered Cᵒᵖ where - Nonempty := ⟨op IsCofiltered.Nonempty.some⟩ + nonempty := letI : Nonempty C := IsCofiltered.nonempty; inferInstance #align category_theory.is_filtered_op_of_is_cofiltered CategoryTheory.isFiltered_op_of_isCofiltered /-- If Cᵒᵖ is filtered or empty, then C is cofiltered or empty. -/ diff --git a/Mathlib/CategoryTheory/Filtered/Small.lean b/Mathlib/CategoryTheory/Filtered/Small.lean index 6f1df9d2d8e3a..337837eaea2c8 100644 --- a/Mathlib/CategoryTheory/Filtered/Small.lean +++ b/Mathlib/CategoryTheory/Filtered/Small.lean @@ -156,9 +156,9 @@ noncomputable def factoringCompInclusion : factoring F ⋙ inclusion F ≅ F := instance : IsFilteredOrEmpty (SmallFilteredIntermediate F) := IsFilteredOrEmpty.of_equivalence (equivSmallModel _) -instance [_root_.Nonempty D] : IsFiltered (SmallFilteredIntermediate F) := +instance [Nonempty D] : IsFiltered (SmallFilteredIntermediate F) := { (inferInstance : IsFilteredOrEmpty _) with - Nonempty := Nonempty.map (factoring F).obj inferInstance } + nonempty := Nonempty.map (factoring F).obj inferInstance } end SmallFilteredIntermediate @@ -289,9 +289,9 @@ noncomputable def factoringCompInclusion : factoring F ⋙ inclusion F ≅ F := instance : IsCofilteredOrEmpty (SmallCofilteredIntermediate F) := IsCofilteredOrEmpty.of_equivalence (equivSmallModel _) -instance [_root_.Nonempty D] : IsCofiltered (SmallCofilteredIntermediate F) := +instance [Nonempty D] : IsCofiltered (SmallCofilteredIntermediate F) := { (inferInstance : IsCofilteredOrEmpty _) with - Nonempty := Nonempty.map (factoring F).obj inferInstance } + nonempty := Nonempty.map (factoring F).obj inferInstance } end SmallCofilteredIntermediate diff --git a/Mathlib/CategoryTheory/Functor/Flat.lean b/Mathlib/CategoryTheory/Functor/Flat.lean index 57d41d0658911..a25b8d3184f42 100644 --- a/Mathlib/CategoryTheory/Functor/Flat.lean +++ b/Mathlib/CategoryTheory/Functor/Flat.lean @@ -67,7 +67,7 @@ class RepresentablyFlat (F : C ⥤ D) : Prop where attribute [instance] RepresentablyFlat.cofiltered -attribute [local instance] IsCofiltered.Nonempty +attribute [local instance] IsCofiltered.nonempty instance RepresentablyFlat.id : RepresentablyFlat (𝟭 C) := by constructor @@ -142,7 +142,7 @@ attribute [local instance] hasFiniteLimits_of_hasFiniteLimits_of_size theorem cofiltered_of_hasFiniteLimits [HasFiniteLimits C] : IsCofiltered C := { cone_objs := fun A B => ⟨Limits.prod A B, Limits.prod.fst, Limits.prod.snd, trivial⟩ cone_maps := fun _ _ f g => ⟨equalizer f g, equalizer.ι f g, equalizer.condition f g⟩ - Nonempty := ⟨⊤_ C⟩ } + nonempty := ⟨⊤_ C⟩ } #align category_theory.cofiltered_of_has_finite_limits CategoryTheory.cofiltered_of_hasFiniteLimits theorem flat_of_preservesFiniteLimits [HasFiniteLimits C] (F : C ⥤ D) [PreservesFiniteLimits F] : diff --git a/Mathlib/CategoryTheory/Idempotents/FunctorExtension.lean b/Mathlib/CategoryTheory/Idempotents/FunctorExtension.lean index c7fbd1d5adfff..a573ca4eadc7d 100644 --- a/Mathlib/CategoryTheory/Idempotents/FunctorExtension.lean +++ b/Mathlib/CategoryTheory/Idempotents/FunctorExtension.lean @@ -20,23 +20,14 @@ when `D` is idempotent complete, we get equivalences `karoubiUniversal₂ C D : C ⥤ D ≌ Karoubi C ⥤ Karoubi D` and `karoubiUniversal C D : C ⥤ D ≌ Karoubi C ⥤ D`. -We occasionally state and use equalities of functors because it is -sometimes convenient to use rewrites when proving properties of -functors obtained using the constructions in this file. Users are -encouraged to use the corresponding natural isomorphism -whenever possible. - -/ - -open CategoryTheory.Category - -open CategoryTheory.Idempotents.Karoubi - namespace CategoryTheory namespace Idempotents +open Category Karoubi + variable {C D E : Type*} [Category C] [Category D] [Category E] /-- A natural transformation between functors `Karoubi C ⥤ D` is determined @@ -44,9 +35,7 @@ by its value on objects coming from `C`. -/ theorem natTrans_eq {F G : Karoubi C ⥤ D} (φ : F ⟶ G) (P : Karoubi C) : φ.app P = F.map (decompId_i P) ≫ φ.app P.X ≫ G.map (decompId_p P) := by rw [← φ.naturality, ← assoc, ← F.map_comp] - conv => - lhs - rw [← id_comp (φ.app P), ← F.map_id] + conv_lhs => rw [← id_comp (φ.app P), ← F.map_id] congr apply decompId #align category_theory.idempotents.nat_trans_eq CategoryTheory.Idempotents.natTrans_eq @@ -110,28 +99,19 @@ def functorExtension₁ : (C ⥤ Karoubi D) ⥤ Karoubi C ⥤ Karoubi D where simp only [assoc] #align category_theory.idempotents.functor_extension₁ CategoryTheory.Idempotents.functorExtension₁ -theorem functorExtension₁_comp_whiskeringLeft_toKaroubi : - functorExtension₁ C D ⋙ (whiskeringLeft C (Karoubi C) (Karoubi D)).obj (toKaroubi C) = - 𝟭 _ := by - refine' Functor.ext _ _ - · intro F - refine' Functor.ext _ _ - · intro X - ext - · simp - · simp - · aesop_cat - · intro F G φ - aesop_cat -#align category_theory.idempotents.functor_extension₁_comp_whiskering_left_to_karoubi CategoryTheory.Idempotents.functorExtension₁_comp_whiskeringLeft_toKaroubi - /-- The natural isomorphism expressing that functors `Karoubi C ⥤ Karoubi D` obtained using `functorExtension₁` actually extends the original functors `C ⥤ Karoubi D`. -/ @[simps!] -def functorExtension₁_comp_whiskeringLeft_toKaroubi_iso : +def functorExtension₁CompWhiskeringLeftToKaroubiIso : functorExtension₁ C D ⋙ (whiskeringLeft C (Karoubi C) (Karoubi D)).obj (toKaroubi C) ≅ 𝟭 _ := - eqToIso (functorExtension₁_comp_whiskeringLeft_toKaroubi C D) -#align category_theory.idempotents.functor_extension₁_comp_whiskering_left_to_karoubi_iso CategoryTheory.Idempotents.functorExtension₁_comp_whiskeringLeft_toKaroubi_iso + NatIso.ofComponents + (fun F => NatIso.ofComponents + (fun X => + { hom := { f := (F.obj X).p } + inv := { f := (F.obj X).p } }) + (fun {X Y} f => by aesop_cat)) + (by aesop_cat) +#align category_theory.idempotents.functor_extension₁_comp_whiskering_left_to_karoubi_iso CategoryTheory.Idempotents.functorExtension₁CompWhiskeringLeftToKaroubiIso /-- The counit isomorphism of the equivalence `(C ⥤ Karoubi D) ≌ (Karoubi C ⥤ Karoubi D)`. -/ @[simps!] @@ -177,26 +157,20 @@ def KaroubiUniversal₁.counitIso : def karoubiUniversal₁ : C ⥤ Karoubi D ≌ Karoubi C ⥤ Karoubi D where functor := functorExtension₁ C D inverse := (whiskeringLeft C (Karoubi C) (Karoubi D)).obj (toKaroubi C) - unitIso := (functorExtension₁_comp_whiskeringLeft_toKaroubi_iso C D).symm + unitIso := (functorExtension₁CompWhiskeringLeftToKaroubiIso C D).symm counitIso := KaroubiUniversal₁.counitIso C D functor_unitIso_comp F := by ext P - dsimp [FunctorExtension₁.map, KaroubiUniversal₁.counitIso] - simp only [eqToHom_app, Functor.id_obj, Functor.comp_obj, functorExtension₁_obj, - whiskeringLeft_obj_obj, eqToHom_f, FunctorExtension₁.obj_obj_X, toKaroubi_obj_X, - eqToHom_refl, comp_id, comp_p, ←comp_f, ← F.map_comp, P.idem] + dsimp + rw [comp_p, ← comp_f, ← F.map_comp, P.idem] #align category_theory.idempotents.karoubi_universal₁ CategoryTheory.Idempotents.karoubiUniversal₁ -theorem functorExtension₁_comp (F : C ⥤ Karoubi D) (G : D ⥤ Karoubi E) : - (functorExtension₁ C E).obj (F ⋙ (functorExtension₁ D E).obj G) = - (functorExtension₁ C D).obj F ⋙ (functorExtension₁ D E).obj G := by - refine' Functor.ext _ _ - · aesop_cat - · intro X Y f - ext - simp only [eqToHom_refl, id_comp, comp_id] - rfl -#align category_theory.idempotents.functor_extension₁_comp CategoryTheory.Idempotents.functorExtension₁_comp +/-- Compatibility isomorphisms of `functorExtension₁` with respect to the +composition of functors. -/ +def functorExtension₁Comp (F : C ⥤ Karoubi D) (G : D ⥤ Karoubi E) : + (functorExtension₁ C E).obj (F ⋙ (functorExtension₁ D E).obj G) ≅ + (functorExtension₁ C D).obj F ⋙ (functorExtension₁ D E).obj G := + Iso.refl _ /-- The canonical functor `(C ⥤ D) ⥤ (Karoubi C ⥤ Karoubi D)` -/ @[simps!] @@ -204,20 +178,19 @@ def functorExtension₂ : (C ⥤ D) ⥤ Karoubi C ⥤ Karoubi D := (whiskeringRight C D (Karoubi D)).obj (toKaroubi D) ⋙ functorExtension₁ C D #align category_theory.idempotents.functor_extension₂ CategoryTheory.Idempotents.functorExtension₂ -theorem functorExtension₂_comp_whiskeringLeft_toKaroubi : - functorExtension₂ C D ⋙ (whiskeringLeft C (Karoubi C) (Karoubi D)).obj (toKaroubi C) = - (whiskeringRight C D (Karoubi D)).obj (toKaroubi D) := by - simp only [functorExtension₂, Functor.assoc, functorExtension₁_comp_whiskeringLeft_toKaroubi, - Functor.comp_id] -#align category_theory.idempotents.functor_extension₂_comp_whiskering_left_to_karoubi CategoryTheory.Idempotents.functorExtension₂_comp_whiskeringLeft_toKaroubi - /-- The natural isomorphism expressing that functors `Karoubi C ⥤ Karoubi D` obtained using `functorExtension₂` actually extends the original functors `C ⥤ D`. -/ @[simps!] def functorExtension₂CompWhiskeringLeftToKaroubiIso : functorExtension₂ C D ⋙ (whiskeringLeft C (Karoubi C) (Karoubi D)).obj (toKaroubi C) ≅ (whiskeringRight C D (Karoubi D)).obj (toKaroubi D) := - eqToIso (functorExtension₂_comp_whiskeringLeft_toKaroubi C D) + NatIso.ofComponents + (fun F => NatIso.ofComponents + (fun X => + { hom := { f := 𝟙 _ } + inv := { f := 𝟙 _ } }) + (by aesop_cat)) + (by aesop_cat) #align category_theory.idempotents.functor_extension₂_comp_whiskering_left_to_karoubi_iso CategoryTheory.Idempotents.functorExtension₂CompWhiskeringLeftToKaroubiIso section IsIdempotentComplete diff --git a/Mathlib/CategoryTheory/Limits/ConcreteCategory.lean b/Mathlib/CategoryTheory/Limits/ConcreteCategory.lean index 63f63b2e969de..5d666c3a85a6f 100644 --- a/Mathlib/CategoryTheory/Limits/ConcreteCategory.lean +++ b/Mathlib/CategoryTheory/Limits/ConcreteCategory.lean @@ -3,12 +3,9 @@ Copyright (c) 2017 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison, Adam Topaz -/ +import Mathlib.CategoryTheory.ConcreteCategory.Basic import Mathlib.CategoryTheory.Limits.Preserves.Basic import Mathlib.CategoryTheory.Limits.Types -import Mathlib.CategoryTheory.Limits.Shapes.WidePullbacks -import Mathlib.CategoryTheory.Limits.Shapes.Multiequalizer -import Mathlib.CategoryTheory.ConcreteCategory.Basic -import Mathlib.CategoryTheory.Limits.Shapes.Kernels import Mathlib.Tactic.ApplyFun #align_import category_theory.limits.concrete_category from "leanprover-community/mathlib"@"c3019c79074b0619edb4b27553a91b2e82242395" @@ -57,112 +54,10 @@ theorem Concrete.limit_ext [HasLimit F] (x y : ↑(limit F)) : Concrete.isLimit_ext F (limit.isLimit _) _ _ #align category_theory.limits.concrete.limit_ext CategoryTheory.Limits.Concrete.limit_ext -section WidePullback - -open WidePullback - -open WidePullbackShape - -theorem Concrete.widePullback_ext {B : C} {ι : Type w} {X : ι → C} (f : ∀ j : ι, X j ⟶ B) - [HasWidePullback B X f] [PreservesLimit (wideCospan B X f) (forget C)] - (x y : ↑(widePullback B X f)) (h₀ : base f x = base f y) (h : ∀ j, π f j x = π f j y) : - x = y := by - apply Concrete.limit_ext - rintro (_ | j) - · exact h₀ - · apply h -#align category_theory.limits.concrete.wide_pullback_ext CategoryTheory.Limits.Concrete.widePullback_ext - -theorem Concrete.widePullback_ext' {B : C} {ι : Type w} [Nonempty ι] {X : ι → C} - (f : ∀ j : ι, X j ⟶ B) [HasWidePullback.{w} B X f] - [PreservesLimit (wideCospan B X f) (forget C)] (x y : ↑(widePullback B X f)) - (h : ∀ j, π f j x = π f j y) : x = y := by - apply Concrete.widePullback_ext _ _ _ _ h - inhabit ι - simp only [← π_arrow f default, comp_apply, h] -#align category_theory.limits.concrete.wide_pullback_ext' CategoryTheory.Limits.Concrete.widePullback_ext' - -end WidePullback - -section Multiequalizer - -theorem Concrete.multiequalizer_ext {I : MulticospanIndex.{w} C} [HasMultiequalizer I] - [PreservesLimit I.multicospan (forget C)] (x y : ↑(multiequalizer I)) - (h : ∀ t : I.L, Multiequalizer.ι I t x = Multiequalizer.ι I t y) : x = y := by - apply Concrete.limit_ext - rintro (a | b) - · apply h - · rw [← limit.w I.multicospan (WalkingMulticospan.Hom.fst b), comp_apply, comp_apply] - simp [h] -#align category_theory.limits.concrete.multiequalizer_ext CategoryTheory.Limits.Concrete.multiequalizer_ext - -/-- An auxiliary equivalence to be used in `multiequalizerEquiv` below.-/ -def Concrete.multiequalizerEquivAux (I : MulticospanIndex C) : - (I.multicospan ⋙ forget C).sections ≃ - { x : ∀ i : I.L, I.left i // ∀ i : I.R, I.fst i (x _) = I.snd i (x _) } where - toFun x := - ⟨fun i => x.1 (WalkingMulticospan.left _), fun i => by - have a := x.2 (WalkingMulticospan.Hom.fst i) - have b := x.2 (WalkingMulticospan.Hom.snd i) - rw [← b] at a - exact a⟩ - invFun x := - { val := fun j => - match j with - | WalkingMulticospan.left a => x.1 _ - | WalkingMulticospan.right b => I.fst b (x.1 _) - property := by - rintro (a | b) (a' | b') (f | f | f) - · simp - · rfl - · dsimp - exact (x.2 b').symm - · simp } - left_inv := by - intro x; ext (a | b) - · rfl - · rw [← x.2 (WalkingMulticospan.Hom.fst b)] - rfl - right_inv := by - intro x - ext i - rfl -#align category_theory.limits.concrete.multiequalizer_equiv_aux CategoryTheory.Limits.Concrete.multiequalizerEquivAux - -/-- The equivalence between the noncomputable multiequalizer and -the concrete multiequalizer. -/ -noncomputable def Concrete.multiequalizerEquiv (I : MulticospanIndex.{w} C) [HasMultiequalizer I] - [PreservesLimit I.multicospan (forget C)] : - (multiequalizer I : C) ≃ - { x : ∀ i : I.L, I.left i // ∀ i : I.R, I.fst i (x _) = I.snd i (x _) } := by - let h1 := limit.isLimit I.multicospan - let h2 := isLimitOfPreserves (forget C) h1 - let E := h2.conePointUniqueUpToIso (Types.limitConeIsLimit.{w, v} _) - exact Equiv.trans E.toEquiv (Concrete.multiequalizerEquivAux.{w, v} I) -#align category_theory.limits.concrete.multiequalizer_equiv CategoryTheory.Limits.Concrete.multiequalizerEquiv - -@[simp] -theorem Concrete.multiequalizerEquiv_apply (I : MulticospanIndex.{w} C) [HasMultiequalizer I] - [PreservesLimit I.multicospan (forget C)] (x : ↑(multiequalizer I)) (i : I.L) : - ((Concrete.multiequalizerEquiv I) x : ∀ i : I.L, I.left i) i = Multiequalizer.ι I i x := - rfl -#align category_theory.limits.concrete.multiequalizer_equiv_apply CategoryTheory.Limits.Concrete.multiequalizerEquiv_apply - -end Multiequalizer - --- TODO: Add analogous lemmas about products and equalizers. end Limits section Colimits --- We don't mark this as an `@[ext]` lemma as we don't always want to work elementwise. -theorem cokernel_funext {C : Type*} [Category C] [HasZeroMorphisms C] [ConcreteCategory C] - {M N K : C} {f : M ⟶ N} [HasCokernel f] {g h : cokernel f ⟶ K} - (w : ∀ n : N, g (cokernel.π f n) = h (cokernel.π f n)) : g = h := by - ext x - simpa using w x -#align category_theory.limits.cokernel_funext CategoryTheory.Limits.cokernel_funext - variable {C : Type u} [Category.{v} C] [ConcreteCategory.{v} C] {J : Type v} [SmallCategory J] (F : J ⥤ C) [PreservesColimit F (forget C)] @@ -293,37 +188,6 @@ theorem Concrete.colimit_rep_eq_iff_exists [HasColimit F] {i j : J} (x : F.obj i end FilteredColimits -section WidePushout - -open WidePushout - -open WidePushoutShape - -theorem Concrete.widePushout_exists_rep {B : C} {α : Type _} {X : α → C} (f : ∀ j : α, B ⟶ X j) - [HasWidePushout.{v} B X f] [PreservesColimit (wideSpan B X f) (forget C)] - (x : ↑(widePushout B X f)) : (∃ y : B, head f y = x) ∨ ∃ (i : α) (y : X i), ι f i y = x := by - obtain ⟨_ | j, y, rfl⟩ := Concrete.colimit_exists_rep _ x - · left - use y - rfl - · right - use j, y - rfl -#align category_theory.limits.concrete.wide_pushout_exists_rep CategoryTheory.Limits.Concrete.widePushout_exists_rep - -theorem Concrete.widePushout_exists_rep' {B : C} {α : Type _} [Nonempty α] {X : α → C} - (f : ∀ j : α, B ⟶ X j) [HasWidePushout.{v} B X f] [PreservesColimit (wideSpan B X f) (forget C)] - (x : ↑(widePushout B X f)) : ∃ (i : α) (y : X i), ι f i y = x := by - rcases Concrete.widePushout_exists_rep f x with (⟨y, rfl⟩ | ⟨i, y, rfl⟩) - · inhabit α - use default, f _ y - simp only [← arrow_ι _ default, comp_apply] - · use i, y -#align category_theory.limits.concrete.wide_pushout_exists_rep' CategoryTheory.Limits.Concrete.widePushout_exists_rep' - -end WidePushout - --- TODO: Add analogous lemmas about coproducts and coequalizers. end Colimits end CategoryTheory.Limits diff --git a/Mathlib/CategoryTheory/Limits/Final.lean b/Mathlib/CategoryTheory/Limits/Final.lean index eed8e1e40809f..0a473c1a2426d 100644 --- a/Mathlib/CategoryTheory/Limits/Final.lean +++ b/Mathlib/CategoryTheory/Limits/Final.lean @@ -852,7 +852,7 @@ adjoints preserve filteredness), as right adjoints are always final, see `final_ -/ theorem IsFiltered.of_final (F : C ⥤ D) [Final F] [IsFiltered C] : IsFiltered D := { IsFilteredOrEmpty.of_final F with - Nonempty := Nonempty.map F.obj IsFiltered.Nonempty } + nonempty := Nonempty.map F.obj IsFiltered.nonempty } /-- Initial functors preserve cofilteredness. diff --git a/Mathlib/CategoryTheory/Limits/HasLimits.lean b/Mathlib/CategoryTheory/Limits/HasLimits.lean index ad52486fbe107..cfa27fa164d4f 100644 --- a/Mathlib/CategoryTheory/Limits/HasLimits.lean +++ b/Mathlib/CategoryTheory/Limits/HasLimits.lean @@ -5,6 +5,7 @@ Authors: Reid Barton, Mario Carneiro, Scott Morrison, Floris van Doorn -/ import Mathlib.CategoryTheory.Limits.IsLimit import Mathlib.CategoryTheory.Category.ULift +import Mathlib.CategoryTheory.EssentiallySmall #align_import category_theory.limits.has_limits from "leanprover-community/mathlib"@"2738d2ca56cbc63be80c3bd48e9ed90ad94e947d" @@ -617,12 +618,17 @@ theorem hasLimitsOfShape_of_equivalence {J' : Type u₂} [Category.{v₂} J'] (e variable (C) +/-- A category that has larger limits also has smaller limits. -/ +theorem hasLimitsOfSizeOfUnivLE [UnivLE.{v₂, v₁}] [UnivLE.{u₂, u₁}] + [HasLimitsOfSize.{v₁, u₁} C] : HasLimitsOfSize.{v₂, u₂} C where + has_limits_of_shape J {_} := hasLimitsOfShape_of_equivalence + ((ShrinkHoms.equivalence J).trans <| Shrink.equivalence _).symm + /-- `hasLimitsOfSizeShrink.{v u} C` tries to obtain `HasLimitsOfSize.{v u} C` from some other `HasLimitsOfSize C`. -/ theorem hasLimitsOfSizeShrink [HasLimitsOfSize.{max v₁ v₂, max u₁ u₂} C] : - HasLimitsOfSize.{v₁, u₁} C := - ⟨fun J _ => hasLimitsOfShape_of_equivalence (ULiftHomULiftCategory.equiv.{v₂, u₂} J).symm⟩ + HasLimitsOfSize.{v₁, u₁} C := hasLimitsOfSizeOfUnivLE.{max v₁ v₂, max u₁ u₂} C #align category_theory.limits.has_limits_of_size_shrink CategoryTheory.Limits.hasLimitsOfSizeShrink instance (priority := 100) hasSmallestLimitsOfHasLimits [HasLimits C] : HasLimitsOfSize.{0, 0} C := @@ -1197,17 +1203,22 @@ theorem hasColimitsOfShape_of_equivalence {J' : Type u₂} [Category.{v₂} J'] variable (C) +/-- A category that has larger colimits also has smaller colimits. -/ +theorem hasColimitsOfSizeOfUnivLE [UnivLE.{v₂, v₁}] [UnivLE.{u₂, u₁}] + [HasColimitsOfSize.{v₁, u₁} C] : HasColimitsOfSize.{v₂, u₂} C where + has_colimits_of_shape J {_} := hasColimitsOfShape_of_equivalence + ((ShrinkHoms.equivalence J).trans <| Shrink.equivalence _).symm + /-- `hasColimitsOfSizeShrink.{v u} C` tries to obtain `HasColimitsOfSize.{v u} C` from some other `HasColimitsOfSize C`. -/ -theorem hasColimitsOfSize_shrink [HasColimitsOfSize.{max v₁ v₂, max u₁ u₂} C] : - HasColimitsOfSize.{v₁, u₁} C := - ⟨fun J _ => hasColimitsOfShape_of_equivalence (ULiftHomULiftCategory.equiv.{v₂, u₂} J).symm⟩ -#align category_theory.limits.has_colimits_of_size_shrink CategoryTheory.Limits.hasColimitsOfSize_shrink +theorem hasColimitsOfSizeShrink [HasColimitsOfSize.{max v₁ v₂, max u₁ u₂} C] : + HasColimitsOfSize.{v₁, u₁} C := hasColimitsOfSizeOfUnivLE.{max v₁ v₂, max u₁ u₂} C +#align category_theory.limits.has_colimits_of_size_shrink CategoryTheory.Limits.hasColimitsOfSizeShrink instance (priority := 100) hasSmallestColimitsOfHasColimits [HasColimits C] : HasColimitsOfSize.{0, 0} C := - hasColimitsOfSize_shrink.{0, 0} C + hasColimitsOfSizeShrink.{0, 0} C #align category_theory.limits.has_smallest_colimits_of_has_colimits CategoryTheory.Limits.hasSmallestColimitsOfHasColimits end Colimit diff --git a/Mathlib/CategoryTheory/Limits/Preserves/Basic.lean b/Mathlib/CategoryTheory/Limits/Preserves/Basic.lean index f683fb5e0d43f..7ef5183757571 100644 --- a/Mathlib/CategoryTheory/Limits/Preserves/Basic.lean +++ b/Mathlib/CategoryTheory/Limits/Preserves/Basic.lean @@ -271,13 +271,18 @@ def preservesLimitsOfShapeOfEquiv {J' : Type w₂} [Category.{w₂'} J'] (e : J simp [← Functor.map_comp] } #align category_theory.limits.preserves_limits_of_shape_of_equiv CategoryTheory.Limits.preservesLimitsOfShapeOfEquiv +/-- A functor preserving larger limits also preserves smaller limits. -/ +def preservesLimitsOfSizeOfUnivLE (F : C ⥤ D) [UnivLE.{w, w'}] [UnivLE.{w₂, w₂'}] + [PreservesLimitsOfSize.{w', w₂'} F] : PreservesLimitsOfSize.{w, w₂} F where + preservesLimitsOfShape {J} := preservesLimitsOfShapeOfEquiv + ((ShrinkHoms.equivalence J).trans <| Shrink.equivalence _).symm F + -- See library note [dsimp, simp]. /-- `PreservesLimitsOfSizeShrink.{w w'} F` tries to obtain `PreservesLimitsOfSize.{w w'} F` from some other `PreservesLimitsOfSize F`. -/ def preservesLimitsOfSizeShrink (F : C ⥤ D) [PreservesLimitsOfSize.{max w w₂, max w' w₂'} F] : - PreservesLimitsOfSize.{w, w'} F := - ⟨fun {J} _ => preservesLimitsOfShapeOfEquiv (ULiftHomULiftCategory.equiv.{w₂, w₂'} J).symm F⟩ + PreservesLimitsOfSize.{w, w'} F := preservesLimitsOfSizeOfUnivLE.{max w w₂, max w' w₂'} F #align category_theory.limits.preserves_limits_of_size_shrink CategoryTheory.Limits.preservesLimitsOfSizeShrink /-- Preserving limits at any universe level implies preserving limits in universe `0`. -/ @@ -335,15 +340,19 @@ def preservesColimitsOfShapeOfEquiv {J' : Type w₂} [Category.{w₂'} J'] (e : simp [← Functor.map_comp] } #align category_theory.limits.preserves_colimits_of_shape_of_equiv CategoryTheory.Limits.preservesColimitsOfShapeOfEquiv +/-- A functor preserving larger colimits also preserves smaller colimits. -/ +def preservesColimitsOfSizeOfUnivLE (F : C ⥤ D) [UnivLE.{w, w'}] [UnivLE.{w₂, w₂'}] + [PreservesColimitsOfSize.{w', w₂'} F] : PreservesColimitsOfSize.{w, w₂} F where + preservesColimitsOfShape {J} := preservesColimitsOfShapeOfEquiv + ((ShrinkHoms.equivalence J).trans <| Shrink.equivalence _).symm F + -- See library note [dsimp, simp]. /-- `PreservesColimitsOfSizeShrink.{w w'} F` tries to obtain `PreservesColimitsOfSize.{w w'} F` from some other `PreservesColimitsOfSize F`. -/ def preservesColimitsOfSizeShrink (F : C ⥤ D) [PreservesColimitsOfSize.{max w w₂, max w' w₂'} F] : - PreservesColimitsOfSize.{w, w'} F := - ⟨fun {J} => - preservesColimitsOfShapeOfEquiv (ULiftHomULiftCategory.equiv.{w₂, w₂'} J).symm F⟩ + PreservesColimitsOfSize.{w, w'} F := preservesColimitsOfSizeOfUnivLE.{max w w₂, max w' w₂'} F #align category_theory.limits.preserves_colimits_of_size_shrink CategoryTheory.Limits.preservesColimitsOfSizeShrink /-- Preserving colimits at any universe implies preserving colimits at universe `0`. -/ @@ -616,12 +625,17 @@ def reflectsLimitsOfShapeOfEquiv {J' : Type w₂} [Category.{w₂'} J'] (e : J exact IsLimit.whiskerEquivalence t _ } #align category_theory.limits.reflects_limits_of_shape_of_equiv CategoryTheory.Limits.reflectsLimitsOfShapeOfEquiv +/-- A functor reflecting larger limits also reflects smaller limits. -/ +def reflectsLimitsOfSizeOfUnivLE (F : C ⥤ D) [UnivLE.{w, w'}] [UnivLE.{w₂, w₂'}] + [ReflectsLimitsOfSize.{w', w₂'} F] : ReflectsLimitsOfSize.{w, w₂} F where + reflectsLimitsOfShape {J} := reflectsLimitsOfShapeOfEquiv + ((ShrinkHoms.equivalence J).trans <| Shrink.equivalence _).symm F + /-- `reflectsLimitsOfSizeShrink.{w w'} F` tries to obtain `reflectsLimitsOfSize.{w w'} F` from some other `reflectsLimitsOfSize F`. -/ def reflectsLimitsOfSizeShrink (F : C ⥤ D) [ReflectsLimitsOfSize.{max w w₂, max w' w₂'} F] : - ReflectsLimitsOfSize.{w, w'} F := - ⟨fun {J} => reflectsLimitsOfShapeOfEquiv (ULiftHomULiftCategory.equiv.{w₂, w₂'} J).symm F⟩ + ReflectsLimitsOfSize.{w, w'} F := reflectsLimitsOfSizeOfUnivLE.{max w w₂, max w' w₂'} F #align category_theory.limits.reflects_limits_of_size_shrink CategoryTheory.Limits.reflectsLimitsOfSizeShrink /-- Reflecting limits at any universe implies reflecting limits at universe `0`. -/ @@ -726,12 +740,17 @@ def reflectsColimitsOfShapeOfEquiv {J' : Type w₂} [Category.{w₂'} J'] (e : J exact IsColimit.whiskerEquivalence t _ } #align category_theory.limits.reflects_colimits_of_shape_of_equiv CategoryTheory.Limits.reflectsColimitsOfShapeOfEquiv +/-- A functor reflecting larger colimits also reflects smaller colimits. -/ +def reflectsColimitsOfSizeOfUnivLE (F : C ⥤ D) [UnivLE.{w, w'}] [UnivLE.{w₂, w₂'}] + [ReflectsColimitsOfSize.{w', w₂'} F] : ReflectsColimitsOfSize.{w, w₂} F where + reflectsColimitsOfShape {J} := reflectsColimitsOfShapeOfEquiv + ((ShrinkHoms.equivalence J).trans <| Shrink.equivalence _).symm F + /-- `reflectsColimitsOfSizeShrink.{w w'} F` tries to obtain `reflectsColimitsOfSize.{w w'} F` from some other `reflectsColimitsOfSize F`. -/ def reflectsColimitsOfSizeShrink (F : C ⥤ D) [ReflectsColimitsOfSize.{max w w₂, max w' w₂'} F] : - ReflectsColimitsOfSize.{w, w'} F := - ⟨fun {J} => reflectsColimitsOfShapeOfEquiv (ULiftHomULiftCategory.equiv.{w₂, w₂'} J).symm F⟩ + ReflectsColimitsOfSize.{w, w'} F := reflectsColimitsOfSizeOfUnivLE.{max w w₂, max w' w₂'} F #align category_theory.limits.reflects_colimits_of_size_shrink CategoryTheory.Limits.reflectsColimitsOfSizeShrink /-- Reflecting colimits at any universe implies reflecting colimits at universe `0`. -/ diff --git a/Mathlib/CategoryTheory/Limits/Shapes/BinaryProducts.lean b/Mathlib/CategoryTheory/Limits/Shapes/BinaryProducts.lean index e1cb50e44e115..ea1979e8cc0dd 100644 --- a/Mathlib/CategoryTheory/Limits/Shapes/BinaryProducts.lean +++ b/Mathlib/CategoryTheory/Limits/Shapes/BinaryProducts.lean @@ -1260,6 +1260,8 @@ def prodComparison (F : C ⥤ D) (A B : C) [HasBinaryProduct A B] prod.lift (F.map prod.fst) (F.map prod.snd) #align category_theory.limits.prod_comparison CategoryTheory.Limits.prodComparison +variable (A B) + @[reassoc (attr := simp)] theorem prodComparison_fst : prodComparison F A B ≫ prod.fst = F.map prod.fst := prod.lift_fst _ _ @@ -1272,6 +1274,8 @@ theorem prodComparison_snd : prodComparison F A B ≫ prod.snd = F.map prod.snd #align category_theory.limits.prod_comparison_snd CategoryTheory.Limits.prodComparison_snd #align category_theory.limits.prod_comparison_snd_assoc CategoryTheory.Limits.prodComparison_snd_assoc +variable {A B} + /-- Naturality of the `prodComparison` morphism in both arguments. -/ @[reassoc] theorem prodComparison_natural (f : A ⟶ A') (g : B ⟶ B') : diff --git a/Mathlib/CategoryTheory/Limits/Shapes/ConcreteCategory.lean b/Mathlib/CategoryTheory/Limits/Shapes/ConcreteCategory.lean new file mode 100644 index 0000000000000..9581753b7bb12 --- /dev/null +++ b/Mathlib/CategoryTheory/Limits/Shapes/ConcreteCategory.lean @@ -0,0 +1,288 @@ +/- +Copyright (c) 2023 Joël Riou. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Joël Riou, Scott Morrison, Adam Topaz +-/ +import Mathlib.CategoryTheory.Limits.Preserves.Shapes.BinaryProducts +import Mathlib.CategoryTheory.Limits.Preserves.Shapes.Products +import Mathlib.CategoryTheory.Limits.ConcreteCategory +import Mathlib.CategoryTheory.Limits.Shapes.Types +import Mathlib.CategoryTheory.Limits.Shapes.Multiequalizer +import Mathlib.CategoryTheory.Limits.Shapes.Kernels + +/-! +# Limits in concrete categories + +In this file, we combine the description of limits in `Types` and the API about +the preservation of products and pullbacks in order to describe these limits in a +concrete category `C`. + +If `F : J → C` is a family of objects in `C`, we define a bijection +`Limits.Concrete.productEquiv F : (forget C).obj (∏ F) ≃ ∀ j, F j`. + +Similarly, if `f₁ : X₁ ⟶ S` and `f₂ : X₂ ⟶ S` are two morphisms, the elements +in `pullback f₁ f₂` are identified by `Limits.Concrete.pullbackEquiv` +to compatible tuples of elements in `X₁ × X₂`. + +Some results are also obtained for the terminal object, binary products, +wide-pullbacks, wide-pushouts, multiequalizers and cokernels. + +-/ + +universe w v u + +namespace CategoryTheory.Limits.Concrete + +attribute [local instance] ConcreteCategory.funLike ConcreteCategory.hasCoeToSort + +variable {C : Type u} [Category.{v} C] + +section Products + +variable [ConcreteCategory.{max w v} C] {J : Type w} (F : J → C) + [HasProduct F] [PreservesLimit (Discrete.functor F) (forget C)] + +/-- The equivalence `(forget C).obj (∏ F) ≃ ∀ j, F j` if `F : J → C` is a family of objects +in a concrete category `C`. -/ +noncomputable def productEquiv : (forget C).obj (∏ F) ≃ ∀ j, F j := + ((PreservesProduct.iso (forget C) F) ≪≫ (Types.productIso.{w, v} (fun j => F j))).toEquiv + +@[simp] +lemma productEquiv_apply_apply (x : (forget C).obj (∏ F)) (j : J) : + productEquiv F x j = Pi.π F j x := + congr_fun (piComparison_comp_π (forget C) F j) x + +@[simp] +lemma productEquiv_symm_apply_π (x : ∀ j, F j) (j : J) : + Pi.π F j ((productEquiv F).symm x) = x j := by + rw [← productEquiv_apply_apply, Equiv.apply_symm_apply] + +end Products + +section Terminal + +variable (C) +variable [ConcreteCategory.{w} C] [HasTerminal C] + [PreservesLimit (Functor.empty.{0} C) (forget C)] + +/-- The equivalence `(forget C).obj (⊤_ C) ≃ PUnit` when `C` is a concrete category. -/ +noncomputable def terminalEquiv : (forget C).obj (⊤_ C) ≃ PUnit := + (PreservesTerminal.iso (forget C) ≪≫ Types.terminalIso).toEquiv + +noncomputable instance : Unique ((forget C).obj (⊤_ C)) where + default := (terminalEquiv C).symm PUnit.unit + uniq _ := (terminalEquiv C).injective (Subsingleton.elim _ _) + +end Terminal + +section BinaryProducts + +variable [ConcreteCategory.{w} C] (X₁ X₂ : C) [HasBinaryProduct X₁ X₂] + [PreservesLimit (pair X₁ X₂) (forget C)] + +/-- The equivalence `(forget C).obj (X₁ ⨯ X₂) ≃ ((forget C).obj X₁) × ((forget C).obj X₂)` +if `X₁` and `X₂` are objects in a concrete category `C`. -/ +noncomputable def prodEquiv : (forget C).obj (X₁ ⨯ X₂) ≃ X₁ × X₂ := + (PreservesLimitPair.iso (forget C) X₁ X₂ ≪≫ Types.binaryProductIso _ _).toEquiv + +@[simp] +lemma prodEquiv_apply_fst (x : (forget C).obj (X₁ ⨯ X₂)) : + (prodEquiv X₁ X₂ x).fst = (Limits.prod.fst : X₁ ⨯ X₂ ⟶ X₁) x := + congr_fun (prodComparison_fst (forget C) X₁ X₂) x + +@[simp] +lemma prodEquiv_apply_snd (x : (forget C).obj (X₁ ⨯ X₂)) : + (prodEquiv X₁ X₂ x).snd = (Limits.prod.snd : X₁ ⨯ X₂ ⟶ X₂) x := + congr_fun (prodComparison_snd (forget C) X₁ X₂) x + +@[simp] +lemma prodEquiv_symm_apply_fst (x : X₁ × X₂) : + (Limits.prod.fst : X₁ ⨯ X₂ ⟶ X₁) ((prodEquiv X₁ X₂).symm x) = x.1 := by + obtain ⟨y, rfl⟩ := (prodEquiv X₁ X₂).surjective x + simp + +@[simp] +lemma prodEquiv_symm_apply_snd (x : X₁ × X₂) : + (Limits.prod.snd : X₁ ⨯ X₂ ⟶ X₂) ((prodEquiv X₁ X₂).symm x) = x.2 := by + obtain ⟨y, rfl⟩ := (prodEquiv X₁ X₂).surjective x + simp + +end BinaryProducts + +section Pullbacks + +variable [ConcreteCategory.{v} C] {X₁ X₂ S : C} (f₁ : X₁ ⟶ S) (f₂ : X₂ ⟶ S) + [HasPullback f₁ f₂] [PreservesLimit (cospan f₁ f₂) (forget C)] + +/-- In a concrete category `C`, given two morphisms `f₁ : X₁ ⟶ S` and `f₂ : X₂ ⟶ S`, +the elements in `pullback f₁ f₁` can be identified to compatible tuples of +elements in `X₁` and `X₂`. -/ +noncomputable def pullbackEquiv : + (forget C).obj (pullback f₁ f₂) ≃ { p : X₁ × X₂ // f₁ p.1 = f₂ p.2 } := + (PreservesPullback.iso (forget C) f₁ f₂ ≪≫ + Types.pullbackIsoPullback ((forget C).map f₁) ((forget C).map f₂)).toEquiv + +/-- Constructor for elements in a pullback in a concrete category. -/ +noncomputable def pullbackMk (x₁ : X₁) (x₂ : X₂) (h : f₁ x₁ = f₂ x₂) : + (forget C).obj (pullback f₁ f₂) := + (pullbackEquiv f₁ f₂).symm ⟨⟨x₁, x₂⟩, h⟩ + +lemma pullbackMk_surjective (x : (forget C).obj (pullback f₁ f₂)) : + ∃ (x₁ : X₁) (x₂ : X₂) (h : f₁ x₁ = f₂ x₂), x = pullbackMk f₁ f₂ x₁ x₂ h := by + obtain ⟨⟨⟨x₁, x₂⟩, h⟩, rfl⟩ := (pullbackEquiv f₁ f₂).symm.surjective x + exact ⟨x₁, x₂, h, rfl⟩ + +@[simp] +lemma pullbackMk_fst (x₁ : X₁) (x₂ : X₂) (h : f₁ x₁ = f₂ x₂) : + @pullback.fst _ _ _ _ _ f₁ f₂ _ (pullbackMk f₁ f₂ x₁ x₂ h) = x₁ := + (congr_fun (PreservesPullback.iso_inv_fst (forget C) f₁ f₂) _).trans + (congr_fun (Types.pullbackIsoPullback_inv_fst ((forget C).map f₁) ((forget C).map f₂)) _) + +@[simp] +lemma pullbackMk_snd (x₁ : X₁) (x₂ : X₂) (h : f₁ x₁ = f₂ x₂) : + @pullback.snd _ _ _ _ _ f₁ f₂ _ (pullbackMk f₁ f₂ x₁ x₂ h) = x₂ := + (congr_fun (PreservesPullback.iso_inv_snd (forget C) f₁ f₂) _).trans + (congr_fun (Types.pullbackIsoPullback_inv_snd ((forget C).map f₁) ((forget C).map f₂)) _) + +end Pullbacks + +section WidePullback + +variable [ConcreteCategory.{max w v} C] + +open WidePullback + +open WidePullbackShape + +theorem widePullback_ext {B : C} {ι : Type w} {X : ι → C} (f : ∀ j : ι, X j ⟶ B) + [HasWidePullback B X f] [PreservesLimit (wideCospan B X f) (forget C)] + (x y : ↑(widePullback B X f)) (h₀ : base f x = base f y) (h : ∀ j, π f j x = π f j y) : + x = y := by + apply Concrete.limit_ext + rintro (_ | j) + · exact h₀ + · apply h +#align category_theory.limits.concrete.wide_pullback_ext CategoryTheory.Limits.Concrete.widePullback_ext + +theorem widePullback_ext' {B : C} {ι : Type w} [Nonempty ι] {X : ι → C} + (f : ∀ j : ι, X j ⟶ B) [HasWidePullback.{w} B X f] + [PreservesLimit (wideCospan B X f) (forget C)] (x y : ↑(widePullback B X f)) + (h : ∀ j, π f j x = π f j y) : x = y := by + apply Concrete.widePullback_ext _ _ _ _ h + inhabit ι + simp only [← π_arrow f default, comp_apply, h] +#align category_theory.limits.concrete.wide_pullback_ext' CategoryTheory.Limits.Concrete.widePullback_ext' + +end WidePullback + +section Multiequalizer + +variable [ConcreteCategory.{max w v} C] + +theorem multiequalizer_ext {I : MulticospanIndex.{w} C} [HasMultiequalizer I] + [PreservesLimit I.multicospan (forget C)] (x y : ↑(multiequalizer I)) + (h : ∀ t : I.L, Multiequalizer.ι I t x = Multiequalizer.ι I t y) : x = y := by + apply Concrete.limit_ext + rintro (a | b) + · apply h + · rw [← limit.w I.multicospan (WalkingMulticospan.Hom.fst b), comp_apply, comp_apply] + simp [h] +#align category_theory.limits.concrete.multiequalizer_ext CategoryTheory.Limits.Concrete.multiequalizer_ext + +/-- An auxiliary equivalence to be used in `multiequalizerEquiv` below.-/ +def multiequalizerEquivAux (I : MulticospanIndex C) : + (I.multicospan ⋙ forget C).sections ≃ + { x : ∀ i : I.L, I.left i // ∀ i : I.R, I.fst i (x _) = I.snd i (x _) } where + toFun x := + ⟨fun i => x.1 (WalkingMulticospan.left _), fun i => by + have a := x.2 (WalkingMulticospan.Hom.fst i) + have b := x.2 (WalkingMulticospan.Hom.snd i) + rw [← b] at a + exact a⟩ + invFun x := + { val := fun j => + match j with + | WalkingMulticospan.left a => x.1 _ + | WalkingMulticospan.right b => I.fst b (x.1 _) + property := by + rintro (a | b) (a' | b') (f | f | f) + · simp + · rfl + · dsimp + exact (x.2 b').symm + · simp } + left_inv := by + intro x; ext (a | b) + · rfl + · rw [← x.2 (WalkingMulticospan.Hom.fst b)] + rfl + right_inv := by + intro x + ext i + rfl +#align category_theory.limits.concrete.multiequalizer_equiv_aux CategoryTheory.Limits.Concrete.multiequalizerEquivAux + +/-- The equivalence between the noncomputable multiequalizer and +the concrete multiequalizer. -/ +noncomputable def multiequalizerEquiv (I : MulticospanIndex.{w} C) [HasMultiequalizer I] + [PreservesLimit I.multicospan (forget C)] : + (multiequalizer I : C) ≃ + { x : ∀ i : I.L, I.left i // ∀ i : I.R, I.fst i (x _) = I.snd i (x _) } := + letI h1 := limit.isLimit I.multicospan + letI h2 := isLimitOfPreserves (forget C) h1 + letI E := h2.conePointUniqueUpToIso (Types.limitConeIsLimit.{w, v} _) + Equiv.trans E.toEquiv (Concrete.multiequalizerEquivAux.{w, v} I) +#align category_theory.limits.concrete.multiequalizer_equiv CategoryTheory.Limits.Concrete.multiequalizerEquiv + +@[simp] +theorem multiequalizerEquiv_apply (I : MulticospanIndex.{w} C) [HasMultiequalizer I] + [PreservesLimit I.multicospan (forget C)] (x : ↑(multiequalizer I)) (i : I.L) : + ((Concrete.multiequalizerEquiv I) x : ∀ i : I.L, I.left i) i = Multiequalizer.ι I i x := + rfl +#align category_theory.limits.concrete.multiequalizer_equiv_apply CategoryTheory.Limits.Concrete.multiequalizerEquiv_apply + +end Multiequalizer + +section WidePushout + +open WidePushout + +open WidePushoutShape + +variable [ConcreteCategory.{v} C] + +theorem widePushout_exists_rep {B : C} {α : Type _} {X : α → C} (f : ∀ j : α, B ⟶ X j) + [HasWidePushout.{v} B X f] [PreservesColimit (wideSpan B X f) (forget C)] + (x : ↑(widePushout B X f)) : (∃ y : B, head f y = x) ∨ ∃ (i : α) (y : X i), ι f i y = x := by + obtain ⟨_ | j, y, rfl⟩ := Concrete.colimit_exists_rep _ x + · left + use y + rfl + · right + use j, y + rfl +#align category_theory.limits.concrete.wide_pushout_exists_rep CategoryTheory.Limits.Concrete.widePushout_exists_rep + +theorem widePushout_exists_rep' {B : C} {α : Type _} [Nonempty α] {X : α → C} + (f : ∀ j : α, B ⟶ X j) [HasWidePushout.{v} B X f] [PreservesColimit (wideSpan B X f) (forget C)] + (x : ↑(widePushout B X f)) : ∃ (i : α) (y : X i), ι f i y = x := by + rcases Concrete.widePushout_exists_rep f x with (⟨y, rfl⟩ | ⟨i, y, rfl⟩) + · inhabit α + use default, f _ y + simp only [← arrow_ι _ default, comp_apply] + · use i, y +#align category_theory.limits.concrete.wide_pushout_exists_rep' CategoryTheory.Limits.Concrete.widePushout_exists_rep' + +end WidePushout + +-- We don't mark this as an `@[ext]` lemma as we don't always want to work elementwise. +theorem cokernel_funext {C : Type*} [Category C] [HasZeroMorphisms C] [ConcreteCategory C] + {M N K : C} {f : M ⟶ N} [HasCokernel f] {g h : cokernel f ⟶ K} + (w : ∀ n : N, g (cokernel.π f n) = h (cokernel.π f n)) : g = h := by + ext x + simpa using w x +#align category_theory.limits.cokernel_funext CategoryTheory.Limits.Concrete.cokernel_funext + +-- TODO: Add analogous lemmas about coproducts and coequalizers. + +end CategoryTheory.Limits.Concrete diff --git a/Mathlib/CategoryTheory/Limits/Shapes/FiniteLimits.lean b/Mathlib/CategoryTheory/Limits/Shapes/FiniteLimits.lean index dbe86e885c12c..d4acfa61986c6 100644 --- a/Mathlib/CategoryTheory/Limits/Shapes/FiniteLimits.lean +++ b/Mathlib/CategoryTheory/Limits/Shapes/FiniteLimits.lean @@ -101,7 +101,7 @@ instance (priority := 100) hasColimitsOfShape_of_hasFiniteColimits (J : Type w) instance (priority := 100) hasFiniteColimits_of_hasColimitsOfSize [HasColimitsOfSize.{v', u'} C] : HasFiniteColimits C where out := fun J hJ hJ' => - haveI := hasColimitsOfSize_shrink.{0, 0} C + haveI := hasColimitsOfSizeShrink.{0, 0} C let F := @FinCategory.equivAsType J (@FinCategory.fintypeObj J hJ hJ') hJ hJ' @hasColimitsOfShape_of_equivalence (@FinCategory.AsType J (@FinCategory.fintypeObj J hJ hJ')) (@FinCategory.categoryAsType J (@FinCategory.fintypeObj J hJ hJ') hJ hJ') _ _ J hJ F _ diff --git a/Mathlib/CategoryTheory/Limits/Shapes/Types.lean b/Mathlib/CategoryTheory/Limits/Shapes/Types.lean index 4dfee874d17aa..8017f26ca3f0a 100644 --- a/Mathlib/CategoryTheory/Limits/Shapes/Types.lean +++ b/Mathlib/CategoryTheory/Limits/Shapes/Types.lean @@ -54,7 +54,7 @@ instance : HasProducts.{v} (Type v) := inferInstance /-- A restatement of `Types.Limit.lift_π_apply` that uses `Pi.π` and `Pi.lift`. -/ @[simp 1001] -theorem pi_lift_π_apply [UnivLE.{v, u}] {β : Type v} (f : β → Type u) {P : Type u} +theorem pi_lift_π_apply {β : Type v} [Small.{u} β] (f : β → Type u) {P : Type u} (s : ∀ b, P ⟶ f b) (b : β) (x : P) : (Pi.π f b : (piObj f) → f b) (@Pi.lift β _ _ f _ P s x) = s b x := congr_fun (limit.lift_π (Fan.mk P s) ⟨b⟩) x @@ -70,7 +70,7 @@ theorem pi_lift_π_apply' {β : Type v} (f : β → Type v) {P : Type v} /-- A restatement of `Types.Limit.map_π_apply` that uses `Pi.π` and `Pi.map`. -/ @[simp 1001] -theorem pi_map_π_apply [UnivLE.{v, u}] {β : Type v} {f g : β → Type u} +theorem pi_map_π_apply {β : Type v} [Small.{u} β] {f g : β → Type u} (α : ∀ j, f j ⟶ g j) (b : β) (x) : (Pi.π g b : ∏ g → g b) (Pi.map α x) = α b ((Pi.π f b : ∏ f → f b) x) := Limit.map_π_apply.{v, u} _ _ _ @@ -396,45 +396,48 @@ theorem productIso_inv_comp_π {J : Type v} (F : J → TypeMax.{v, u}) (j : J) : limit.isoLimitCone_inv_π (productLimitCone.{v, u} F) ⟨j⟩ #align category_theory.limits.types.product_iso_inv_comp_π CategoryTheory.Limits.Types.productIso_inv_comp_π -namespace UnivLE +namespace Small + +variable {J : Type v} (F : J → Type u) [Small.{u} J] /-- -A variant of `productLimitCone` using a `UnivLE` hypothesis rather than a function to `TypeMax`. +A variant of `productLimitCone` using a `Small` hypothesis rather than a function to `TypeMax`. -/ -noncomputable def productLimitCone {J : Type v} (F : J → Type u) [UnivLE.{v, u}] : +noncomputable def productLimitCone : Limits.LimitCone (Discrete.functor F) where cone := { pt := Shrink (∀ j, F j) - π := Discrete.natTrans (fun ⟨j⟩ f => (equivShrink _).symm f j) } + π := Discrete.natTrans (fun ⟨j⟩ f => (equivShrink (∀ j, F j)).symm f j) } isLimit := + have : Small.{u} (∀ j, F j) := inferInstance { lift := fun s x => (equivShrink _) (fun j => s.π.app ⟨j⟩ x) uniq := fun s m w => funext fun x => Shrink.ext <| funext fun j => by simpa using (congr_fun (w ⟨j⟩) x : _) } /-- The categorical product in `Type u` indexed in `Type v` is the type theoretic product `Π j, F j`, after shrinking back to `Type u`. -/ -noncomputable def productIso {J : Type v} (F : J → Type u) [UnivLE.{v, u}] : +noncomputable def productIso : (∏ F : Type u) ≅ Shrink.{u} (∀ j, F j) := limit.isoLimitCone (productLimitCone.{v, u} F) @[simp] -theorem productIso_hom_comp_eval {J : Type v} (F : J → Type u) [UnivLE.{v, u}] (j : J) : - ((productIso.{v, u} F).hom ≫ fun f => (equivShrink _).symm f j) = Pi.π F j := +theorem productIso_hom_comp_eval (j : J) : + ((productIso.{v, u} F).hom ≫ fun f => (equivShrink (∀ j, F j)).symm f j) = Pi.π F j := limit.isoLimitCone_hom_π (productLimitCone.{v, u} F) ⟨j⟩ -- Porting note: -- `elementwise` seems to be broken. Applied to the previous lemma, it should produce: @[simp] -theorem productIso_hom_comp_eval_apply {J : Type v} (F : J → Type u) [UnivLE.{v, u}] (j : J) (x) : - (equivShrink _).symm ((productIso F).hom x) j = Pi.π F j x := +theorem productIso_hom_comp_eval_apply (j : J) (x) : + (equivShrink (∀ j, F j)).symm ((productIso F).hom x) j = Pi.π F j x := congr_fun (productIso_hom_comp_eval F j) x @[elementwise (attr := simp)] -theorem productIso_inv_comp_π {J : Type v} (F : J → Type u) [UnivLE.{v, u}] (j : J) : - (productIso.{v, u} F).inv ≫ Pi.π F j = fun f => ((equivShrink _).symm f) j := +theorem productIso_inv_comp_π (j : J) : + (productIso.{v, u} F).inv ≫ Pi.π F j = fun f => ((equivShrink (∀ j, F j)).symm f) j := limit.isoLimitCone_inv_π (productLimitCone.{v, u} F) ⟨j⟩ -end UnivLE +end Small /-- The category of types has `Σ j, f j` as the coproduct of a type family `f : J → Type`. -/ diff --git a/Mathlib/CategoryTheory/Limits/Shapes/ZeroMorphisms.lean b/Mathlib/CategoryTheory/Limits/Shapes/ZeroMorphisms.lean index 5c2f1b0dda6fe..bf3472b2f108f 100644 --- a/Mathlib/CategoryTheory/Limits/Shapes/ZeroMorphisms.lean +++ b/Mathlib/CategoryTheory/Limits/Shapes/ZeroMorphisms.lean @@ -50,7 +50,7 @@ variable (D : Type u') [Category.{v'} D] and compositions of zero morphisms with anything give the zero morphism. -/ class HasZeroMorphisms where /-- Every morphism space has zero -/ - [Zero : ∀ X Y : C, Zero (X ⟶ Y)] + [zero : ∀ X Y : C, Zero (X ⟶ Y)] /-- `f` composed with `0` is `0` -/ comp_zero : ∀ {X Y : C} (f : X ⟶ Y) (Z : C), f ≫ (0 : Y ⟶ Z) = (0 : X ⟶ Z) := by aesop_cat /-- `0` composed with `f` is `0` -/ @@ -59,7 +59,7 @@ class HasZeroMorphisms where #align category_theory.limits.has_zero_morphisms.comp_zero' CategoryTheory.Limits.HasZeroMorphisms.comp_zero #align category_theory.limits.has_zero_morphisms.zero_comp' CategoryTheory.Limits.HasZeroMorphisms.zero_comp -attribute [instance] HasZeroMorphisms.Zero +attribute [instance] HasZeroMorphisms.zero variable {C} @@ -76,19 +76,19 @@ theorem zero_comp [HasZeroMorphisms C] {X : C} {Y Z : C} {f : Y ⟶ Z} : #align category_theory.limits.zero_comp CategoryTheory.Limits.zero_comp instance hasZeroMorphismsPEmpty : HasZeroMorphisms (Discrete PEmpty) where - Zero := by aesop_cat + zero := by aesop_cat #align category_theory.limits.has_zero_morphisms_pempty CategoryTheory.Limits.hasZeroMorphismsPEmpty instance hasZeroMorphismsPUnit : HasZeroMorphisms (Discrete PUnit) where - Zero := fun X Y => by repeat (constructor) + zero X Y := by repeat (constructor) #align category_theory.limits.has_zero_morphisms_punit CategoryTheory.Limits.hasZeroMorphismsPUnit namespace HasZeroMorphisms /-- This lemma will be immediately superseded by `ext`, below. -/ private theorem ext_aux (I J : HasZeroMorphisms C) - (w : ∀ X Y : C, (I.Zero X Y).zero = (J.Zero X Y).zero) : I = J := by - have : I.Zero = J.Zero := by + (w : ∀ X Y : C, (I.zero X Y).zero = (J.zero X Y).zero) : I = J := by + have : I.zero = J.zero := by funext X Y specialize w X Y apply congrArg Zero.mk w @@ -107,10 +107,10 @@ See, particularly, the note on `zeroMorphismsOfZeroObject` below. theorem ext (I J : HasZeroMorphisms C) : I = J := by apply ext_aux intro X Y - have : (I.Zero X Y).zero ≫ (J.Zero Y Y).zero = (I.Zero X Y).zero := by - apply I.zero_comp X (J.Zero Y Y).zero - have that : (I.Zero X Y).zero ≫ (J.Zero Y Y).zero = (J.Zero X Y).zero := by - apply J.comp_zero (I.Zero X Y).zero Y + have : (I.zero X Y).zero ≫ (J.zero Y Y).zero = (I.zero X Y).zero := by + apply I.zero_comp X (J.zero Y Y).zero + have that : (I.zero X Y).zero ≫ (J.zero Y Y).zero = (J.zero X Y).zero := by + apply J.comp_zero (I.zero X Y).zero Y rw[←this,←that] #align category_theory.limits.has_zero_morphisms.ext CategoryTheory.Limits.HasZeroMorphisms.ext @@ -122,7 +122,7 @@ end HasZeroMorphisms open Opposite HasZeroMorphisms instance hasZeroMorphismsOpposite [HasZeroMorphisms C] : HasZeroMorphisms Cᵒᵖ where - Zero X Y := ⟨(0 : unop Y ⟶ unop X).op⟩ + zero X Y := ⟨(0 : unop Y ⟶ unop X).op⟩ comp_zero f Z := congr_arg Quiver.Hom.op (HasZeroMorphisms.zero_comp (unop Z) f.unop) zero_comp X {Y Z} (f : Y ⟶ Z) := congrArg Quiver.Hom.op (HasZeroMorphisms.comp_zero f.unop (unop X)) @@ -163,7 +163,7 @@ section variable [HasZeroMorphisms D] instance : HasZeroMorphisms (C ⥤ D) where - Zero F G := ⟨{ app := fun X => 0 }⟩ + zero F G := ⟨{ app := fun X => 0 }⟩ comp_zero := fun η H => by ext X; dsimp; apply comp_zero zero_comp := fun F {G H} η => by @@ -256,7 +256,7 @@ end IsZero code should generally ask for an instance of `HasZeroMorphisms` separately, even if it already asks for an instance of `HasZeroObjects`. -/ def IsZero.hasZeroMorphisms {O : C} (hO : IsZero O) : HasZeroMorphisms C where - Zero X Y := { zero := hO.from_ X ≫ hO.to_ Y } + zero X Y := { zero := hO.from_ X ≫ hO.to_ Y } zero_comp X {Y Z} f := by change (hO.from_ X ≫ hO.to_ Y) ≫ f = hO.from_ X ≫ hO.to_ Z rw [Category.assoc] @@ -284,7 +284,7 @@ open ZeroObject code should generally ask for an instance of `HasZeroMorphisms` separately, even if it already asks for an instance of `HasZeroObjects`. -/ def zeroMorphismsOfZeroObject : HasZeroMorphisms C where - Zero X Y := { zero := (default : X ⟶ 0) ≫ default } + zero X Y := { zero := (default : X ⟶ 0) ≫ default } zero_comp X {Y Z} f := by change ((default : X ⟶ 0) ≫ default) ≫ f = (default : X ⟶ 0) ≫ default rw [Category.assoc] diff --git a/Mathlib/CategoryTheory/Limits/Types.lean b/Mathlib/CategoryTheory/Limits/Types.lean index 1f3849651075c..5eb0ec094524e 100644 --- a/Mathlib/CategoryTheory/Limits/Types.lean +++ b/Mathlib/CategoryTheory/Limits/Types.lean @@ -87,8 +87,8 @@ variable {J : Type v} [SmallCategory J] /-! We now provide two distinct implementations in the category of types. -The first, in the `CategoryTheory.Limits.Types.UnivLE` namespace, -assumes `UnivLE.{v, u}` and constructs `v`-small limits in `Type u`. +The first, in the `CategoryTheory.Limits.Types.Small` namespace, +assumes `Small.{u} J` and constructs `J`-indexed limits in `Type u`. The second, in the `CategoryTheory.Limits.Types.TypeMax` namespace constructs limits for functors `F : J ⥤ TypeMax.{v, u}`, for `J : Type v`. @@ -99,9 +99,9 @@ Hopefully we might be able to entirely remove the `TypeMax` constructions, but for now they are useful glue for the later parts of the library. -/ -namespace UnivLE +namespace Small -variable [UnivLE.{v, u}] +variable [Small.{u} J] /-- (internal implementation) the limit cone of a functor, implemented as flat sections of a pi type @@ -110,27 +110,27 @@ implemented as flat sections of a pi type noncomputable def limitCone (F : J ⥤ Type u) : Cone F where pt := Shrink F.sections π := - { app := fun j u => ((equivShrink _).symm u).val j + { app := fun j u => ((equivShrink F.sections).symm u).val j naturality := fun j j' f => by funext x simp } @[ext] lemma limitCone_pt_ext (F : J ⥤ Type u) {x y : (limitCone F).pt} - (w : (equivShrink _).symm x = (equivShrink _).symm y) : x = y := by + (w : (equivShrink F.sections).symm x = (equivShrink F.sections).symm y) : x = y := by aesop /-- (internal implementation) the fact that the proposed limit cone is the limit -/ @[simps] noncomputable def limitConeIsLimit (F : J ⥤ Type u) : IsLimit (limitCone.{v, u} F) where - lift s v := (equivShrink _) + lift s v := equivShrink F.sections { val := fun j => s.π.app j v property := fun f => congr_fun (Cone.w s f) _ } uniq := fun _ _ w => by ext x j simpa using congr_fun (w j) x -end UnivLE +end Small -- TODO: If `UnivLE` works out well, we will eventually want to deprecate these -- definitions, and probably as a first step put them in namespace or otherwise rename them. @@ -174,7 +174,11 @@ we leave them in the main `CategoryTheory.Limits.Types` namespace. section UnivLE open UnivLE -variable [UnivLE.{v, u}] + +instance hasLimit [Small.{u} J] (F : J ⥤ Type u) : HasLimit F := + HasLimit.mk + { cone := Small.limitCone.{v, u} F + isLimit := Small.limitConeIsLimit F } /-- The category of types has all limits. @@ -183,17 +187,11 @@ More specifically, when `UnivLE.{v, u}`, the category `Type u` has all `v`-small See . -/ -instance (priority := 1300) hasLimitsOfSize : HasLimitsOfSize.{v} (Type u) where - has_limits_of_shape _ := - { has_limit := fun F => - HasLimit.mk - { cone := UnivLE.limitCone.{v, u} F - isLimit := UnivLE.limitConeIsLimit F } } +instance (priority := 1300) hasLimitsOfSize [UnivLE.{v, u}] : HasLimitsOfSize.{v} (Type u) where + has_limits_of_shape _ := { } #align category_theory.limits.types.has_limits_of_size CategoryTheory.Limits.Types.hasLimitsOfSize -instance hasLimit (F : J ⥤ Type u) : HasLimit F := - (Types.hasLimitsOfSize.{v, u}.has_limits_of_shape J).has_limit F - +variable [Small.{u} J] /-- The equivalence between the abstract limit of `F` in `TypeMax.{v, u}` and the "concrete" definition as the sections of `F`. -/ diff --git a/Mathlib/CategoryTheory/Limits/VanKampen.lean b/Mathlib/CategoryTheory/Limits/VanKampen.lean index b95b00a45ef28..0905eefa290bc 100644 --- a/Mathlib/CategoryTheory/Limits/VanKampen.lean +++ b/Mathlib/CategoryTheory/Limits/VanKampen.lean @@ -3,13 +3,12 @@ Copyright (c) 2022 Andrew Yang. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Andrew Yang -/ +import Mathlib.CategoryTheory.Adjunction.FullyFaithful import Mathlib.CategoryTheory.Limits.Shapes.CommSq +import Mathlib.CategoryTheory.Limits.Shapes.Multiequalizer import Mathlib.CategoryTheory.Limits.Shapes.StrictInitial -import Mathlib.CategoryTheory.Limits.Shapes.Types -import Mathlib.Topology.Category.TopCat.Limits.Pullbacks import Mathlib.CategoryTheory.Limits.FunctorCategory import Mathlib.CategoryTheory.Limits.Constructions.FiniteProductsOfBinaryProducts -import Mathlib.CategoryTheory.Adjunction.FullyFaithful #align_import category_theory.extensive from "leanprover-community/mathlib"@"178a32653e369dce2da68dc6b2694e385d484ef1" diff --git a/Mathlib/CategoryTheory/Limits/Yoneda.lean b/Mathlib/CategoryTheory/Limits/Yoneda.lean index 2abc98651895e..909676dff7ed7 100644 --- a/Mathlib/CategoryTheory/Limits/Yoneda.lean +++ b/Mathlib/CategoryTheory/Limits/Yoneda.lean @@ -162,7 +162,4 @@ instance coyonedaFunctorReflectsLimits : ReflectsLimits (@coyoneda D _) := infer end CategoryTheory -assert_not_exists Set.range - --- Porting note: after the port see if this import can be removed --- assert_not_exists AddCommMonoid +assert_not_exists AddCommMonoid diff --git a/Mathlib/CategoryTheory/Preadditive/AdditiveFunctor.lean b/Mathlib/CategoryTheory/Preadditive/AdditiveFunctor.lean index 1fefebc07ebe0..684fdcade21fd 100644 --- a/Mathlib/CategoryTheory/Preadditive/AdditiveFunctor.lean +++ b/Mathlib/CategoryTheory/Preadditive/AdditiveFunctor.lean @@ -48,8 +48,8 @@ namespace Functor section -variable {C D : Type*} [Category C] [Category D] [Preadditive C] [Preadditive D] (F : C ⥤ D) - [Functor.Additive F] +variable {C D E : Type*} [Category C] [Category D] [Category E] + [Preadditive C] [Preadditive D] [Preadditive E] (F : C ⥤ D) [Functor.Additive F] @[simp] theorem map_add {X Y : C} {f g : X ⟶ Y} : F.map (f + g) = F.map f + F.map g := @@ -103,6 +103,36 @@ theorem map_sum {X Y : C} {α : Type*} (f : α → (X ⟶ Y)) (s : Finset α) : (F.mapAddHom : (X ⟶ Y) →+ _).map_sum f s #align category_theory.functor.map_sum CategoryTheory.Functor.map_sum +variable {F} + +lemma additive_of_iso {G : C ⥤ D} (e : F ≅ G) : G.Additive := by + constructor + intro X Y f g + simp only [← NatIso.naturality_1 e (f + g), map_add, Preadditive.add_comp, + NatTrans.naturality, Preadditive.comp_add, Iso.inv_hom_id_app_assoc] + +variable (F) + +lemma additive_of_full_essSurj_comp [Full F] [EssSurj F] (G : D ⥤ E) + [(F ⋙ G).Additive] : G.Additive where + map_add {X Y f g} := by + obtain ⟨f', hf'⟩ := F.map_surjective ((F.objObjPreimageIso X).hom ≫ f ≫ + (F.objObjPreimageIso Y).inv) + obtain ⟨g', hg'⟩ := F.map_surjective ((F.objObjPreimageIso X).hom ≫ g ≫ + (F.objObjPreimageIso Y).inv) + simp only [← cancel_mono (G.map (F.objObjPreimageIso Y).inv), + ← cancel_epi (G.map (F.objObjPreimageIso X).hom), + Preadditive.add_comp, Preadditive.comp_add, ← Functor.map_comp] + erw [← hf', ← hg', ← (F ⋙ G).map_add] + dsimp + rw [F.map_add] + +lemma additive_of_comp_faithful + (F : C ⥤ D) (G : D ⥤ E) [G.Additive] [(F ⋙ G).Additive] [Faithful G] : + F.Additive where + map_add {_ _ f₁ f₂} := G.map_injective (by + rw [← Functor.comp_map, G.map_add, (F ⋙ G).map_add, Functor.comp_map, Functor.comp_map]) + end section InducedCategory diff --git a/Mathlib/CategoryTheory/Preadditive/Basic.lean b/Mathlib/CategoryTheory/Preadditive/Basic.lean index bf7b43315c2e7..4bb3ec6272a39 100644 --- a/Mathlib/CategoryTheory/Preadditive/Basic.lean +++ b/Mathlib/CategoryTheory/Preadditive/Basic.lean @@ -198,7 +198,7 @@ instance {P Q : C} {f : P ⟶ Q} [Mono f] : Mono (-f) := ⟨fun g g' H => by rwa [comp_neg, comp_neg, ← neg_comp, ← neg_comp, cancel_mono, neg_inj] at H⟩ instance (priority := 100) preadditiveHasZeroMorphisms : HasZeroMorphisms C where - Zero := inferInstance + zero := inferInstance comp_zero f R := show leftComp R f 0 = 0 from map_zero _ zero_comp P _ _ f := show rightComp P f 0 = 0 from map_zero _ #align category_theory.preadditive.preadditive_has_zero_morphisms CategoryTheory.Preadditive.preadditiveHasZeroMorphisms diff --git a/Mathlib/CategoryTheory/Preadditive/Mat.lean b/Mathlib/CategoryTheory/Preadditive/Mat.lean index a70848c208816..fec2909e9bfae 100644 --- a/Mathlib/CategoryTheory/Preadditive/Mat.lean +++ b/Mathlib/CategoryTheory/Preadditive/Mat.lean @@ -66,12 +66,12 @@ variable (C : Type u₁) [Category.{v₁} C] [Preadditive C] -/ structure Mat_ where ι : Type - [F : Fintype ι] + [fintype : Fintype ι] X : ι → C set_option linter.uppercaseLean3 false in #align category_theory.Mat_ CategoryTheory.Mat_ -attribute [instance] Mat_.F +attribute [instance] Mat_.fintype namespace Mat_ diff --git a/Mathlib/CategoryTheory/Quotient.lean b/Mathlib/CategoryTheory/Quotient.lean index 88792adc0d496..6d6f65b09d557 100644 --- a/Mathlib/CategoryTheory/Quotient.lean +++ b/Mathlib/CategoryTheory/Quotient.lean @@ -121,7 +121,7 @@ noncomputable instance fullFunctor : Full (functor r) where dsimp [functor] simp -instance : EssSurj (functor r) where +instance essSurj_functor : EssSurj (functor r) where mem_essImage Y := ⟨Y.as, ⟨eqToIso (by ext diff --git a/Mathlib/CategoryTheory/Sites/CoverLifting.lean b/Mathlib/CategoryTheory/Sites/CoverLifting.lean index c2a1bcc0622e2..dcb6361621336 100644 --- a/Mathlib/CategoryTheory/Sites/CoverLifting.lean +++ b/Mathlib/CategoryTheory/Sites/CoverLifting.lean @@ -10,30 +10,28 @@ import Mathlib.CategoryTheory.Sites.CoverPreserving #align_import category_theory.sites.cover_lifting from "leanprover-community/mathlib"@"14b69e9f3c16630440a2cbd46f1ddad0d561dee7" /-! -# Cover-lifting functors between sites. +# Cocontinuous functors between sites. -We define cover-lifting functors between sites as functors that pull covering sieves back to -covering sieves. This concept is also known as *cocontinuous functors* or -*cover-reflecting functors*, but we have chosen this name following [MM92] in order to avoid -potential naming collision or confusion with the general definition of cocontinuous functors -between categories as functors preserving small colimits. - -The definition given here seems stronger than the definition found elsewhere, -but they are actually equivalent via `CategoryTheory.GrothendieckTopology.superset_covering`. -(The precise statement is not formalized, but follows from it quite trivially). +We define cocontinuous functors between sites as functors that pull covering sieves back to +covering sieves. This concept is also known as *cover-lifting* or +*cover-reflecting functors*. We use the original terminology and definition of SGA 4 III 2.1. +However, the notion of cocontinuous functor should not be confused with +the general definition of cocontinuous functors between categories as functors preserving +small colimits. ## Main definitions -* `CategoryTheory.CoverLifting`: a functor between sites is cover-lifting if it +* `CategoryTheory.Functor.IsCocontinuous`: a functor between sites is cocontinuous if it pulls back covering sieves to covering sieves -* `CategoryTheory.Sites.copullback`: A cover-lifting functor `G : (C, J) ⥤ (D, K)` induces a - morphism of sites in the same direction as the functor. +* `CategoryTheory.Functor.sheafPushforwardCocontinuous`: A cocontinuous functor + `G : (C, J) ⥤ (D, K)` induces a functor `Sheaf J A ⥤ Sheaf K A`. ## Main results -* `CategoryTheory.ran_isSheaf_of_coverLifting`: If `G : C ⥤ D` is cover_lifting, then +* `CategoryTheory.ran_isSheaf_of_isCocontinuous`: If `G : C ⥤ D` is cocontinuous, then `Ran G.op` (`ₚu`) as a functor `(Cᵒᵖ ⥤ A) ⥤ (Dᵒᵖ ⥤ A)` of presheaves maps sheaves to sheaves. -* `CategoryTheory.Sites.pullbackCopullbackAdjunction`: If `G : (C, J) ⥤ (D, K)` is cover-lifting, - cover-preserving, and compatible-preserving, then `pullback G` and `copullback G` are adjoint. +* `CategoryTheory.Sites.pullbackCopullbackAdjunction`: If `G : (C, J) ⥤ (D, K)` is cocontinuous + and continuous, then `G.sheafPushforwardContinuous A J K` and + `G.sheafPushforwardCocontinuous A J K` are adjoint. ## References @@ -60,40 +58,44 @@ open CategoryTheory.Limits namespace CategoryTheory -section CoverLifting +section IsCocontinuous -variable {C : Type*} [Category C] {D : Type*} [Category D] {E : Type*} [Category E] +variable {C : Type*} [Category C] {D : Type*} [Category D] {E : Type*} [Category E] (G : C ⥤ D) + (G' : D ⥤ E) variable (J : GrothendieckTopology C) (K : GrothendieckTopology D) variable {L : GrothendieckTopology E} -/-- A functor `G : (C, J) ⥤ (D, K)` between sites is called to have the cover-lifting property +/-- A functor `G : (C, J) ⥤ (D, K)` between sites is called cocontinuous (SGA 4 III 2.1) if for all covering sieves `R` in `D`, `R.pullback G` is a covering sieve in `C`. -/ -- porting note: removed `@[nolint has_nonempty_instance]` -structure CoverLifting (G : C ⥤ D) : Prop where +class Functor.IsCocontinuous : Prop where cover_lift : ∀ {U : C} {S : Sieve (G.obj U)} (_ : S ∈ K (G.obj U)), S.functorPullback G ∈ J U -#align category_theory.cover_lifting CategoryTheory.CoverLifting +#align category_theory.cover_lifting CategoryTheory.Functor.IsCocontinuous -/-- The identity functor on a site is cover-lifting. -/ -theorem idCoverLifting : CoverLifting J J (𝟭 _) := - ⟨fun h => by simpa using h⟩ -#align category_theory.id_cover_lifting CategoryTheory.idCoverLifting +lemma Functor.cover_lift [G.IsCocontinuous J K] {U : C} {S : Sieve (G.obj U)} + (hS : S ∈ K (G.obj U)) : S.functorPullback G ∈ J U := + IsCocontinuous.cover_lift hS -variable {J K} +/-- The identity functor on a site is cocontinuous. -/ +instance isCocontinuous_id : Functor.IsCocontinuous (𝟭 C) J J := + ⟨fun h => by simpa using h⟩ +#align category_theory.id_cover_lifting CategoryTheory.isCocontinuous_id -/-- The composition of two cover-lifting functors are cover-lifting -/ -theorem compCoverLifting {F : C ⥤ D} (hu : CoverLifting J K F) {G : D ⥤ E} - (hv : CoverLifting K L G) : CoverLifting J L (F ⋙ G) := - ⟨fun h => hu.cover_lift (hv.cover_lift h)⟩ -#align category_theory.comp_cover_lifting CategoryTheory.compCoverLifting +/-- The composition of two cocontinuous functors is cocontinuous. -/ +theorem isCocontinuous_comp [G.IsCocontinuous J K] [G'.IsCocontinuous K L] : + (G ⋙ G').IsCocontinuous J L where + cover_lift h := G.cover_lift J K (G'.cover_lift K L h) +#align category_theory.comp_cover_lifting CategoryTheory.isCocontinuous_comp -end CoverLifting +end IsCocontinuous /-! -We will now prove that `Ran G.op` (`ₚu`) maps sheaves to sheaves if `G` is cover-lifting. This can -be found in . However, the proof given here uses the +We will now prove that `Ran G.op` (`ₚu`) maps sheaves to sheaves if `G` +is cocontinuous (SGA 4 III 2.2). This can also be be found in +. However, the proof given there uses the amalgamation definition of sheaves, and thus does not require that `C` or `D` has categorical pullbacks. @@ -105,7 +107,7 @@ we can glue them into a morphism `X ⟶ 𝒢(U)`. Since the presheaf `𝒢 := (Ran G.op).obj ℱ.val` is defined via `𝒢(U) = lim_{G(V) ⊆ U} ℱ(V)`, for gluing the family `x` into a `X ⟶ 𝒢(U)`, it suffices to provide a `X ⟶ ℱ(Y)` for each `G(Y) ⊆ U`. This can be done since `{ Y' ⊆ Y : G(Y') ⊆ U ∈ S}` is a covering sieve for `Y` on -`C` (by the cover-lifting property of `G`). Thus the morphisms `X ⟶ 𝒢(G(Y')) ⟶ ℱ(Y')` can be +`C` (by the cocontinuity `G`). Thus the morphisms `X ⟶ 𝒢(G(Y')) ⟶ ℱ(Y')` can be glued into a morphism `X ⟶ ℱ(Y)`. This is done in `get_sections`. In `glued_limit_cone`, we verify these obtained sections are indeed compatible, and thus we obtain @@ -113,15 +115,17 @@ A `X ⟶ 𝒢(U)`. The remaining work is to verify that this is indeed the amalg -/ -variable {C D : Type u} [Category.{v} C] [Category.{v} D] +variable {C D : Type u} [Category.{v} C] [Category.{v} D] (G : C ⥤ D) variable {A : Type w} [Category.{max u v} A] [HasLimits A] variable {J : GrothendieckTopology C} {K : GrothendieckTopology D} + [G.IsCocontinuous J K] -namespace RanIsSheafOfCoverLifting +namespace RanIsSheafOfIsCocontinuous -variable {G : C ⥤ D} (hu : CoverLifting J K G) (ℱ : Sheaf J A) +variable {G} +variable (ℱ : Sheaf J A) variable {X : A} {U : D} (S : Sieve U) (hS : S ∈ K U) @@ -138,7 +142,7 @@ def pulledbackFamily (Y : StructuredArrow (op U) G.op) := ((x.pullback Y.hom.unop).functorPullback G).compPresheafMap (show _ ⟶ _ from whiskerRight ((Ran.adjunction A G.op).counit.app ℱ.val) (coyoneda.obj (op X))) set_option linter.uppercaseLean3 false in -#align category_theory.Ran_is_sheaf_of_cover_lifting.pulledback_family CategoryTheory.RanIsSheafOfCoverLifting.pulledbackFamily +#align category_theory.Ran_is_sheaf_of_cover_lifting.pulledback_family CategoryTheory.RanIsSheafOfIsCocontinuous.pulledbackFamily @[simp] theorem pulledbackFamily_apply (Y : StructuredArrow (op U) G.op) {W} {f : W ⟶ _} (Hf) : @@ -146,7 +150,7 @@ theorem pulledbackFamily_apply (Y : StructuredArrow (op U) G.op) {W} {f : W ⟶ x (G.map f ≫ Y.hom.unop) Hf ≫ ((Ran.adjunction A G.op).counit.app ℱ.val).app (op W) := rfl set_option linter.uppercaseLean3 false in -#align category_theory.Ran_is_sheaf_of_cover_lifting.pulledback_family_apply CategoryTheory.RanIsSheafOfCoverLifting.pulledbackFamily_apply +#align category_theory.Ran_is_sheaf_of_cover_lifting.pulledback_family_apply CategoryTheory.RanIsSheafOfIsCocontinuous.pulledbackFamily_apply variable {x} {S} @@ -158,35 +162,35 @@ def getSection (Y : StructuredArrow (op U) G.op) : X ⟶ ℱ.val.obj Y.right := letI hom_sh := whiskerRight ((Ran.adjunction A G.op).counit.app ℱ.val) (coyoneda.obj (op X)) haveI S' := K.pullback_stable Y.hom.unop hS haveI hs' := ((hx.pullback Y.3.unop).functorPullback G).compPresheafMap hom_sh - exact (ℱ.2 X _ (hu.cover_lift S')).amalgamate _ hs' + exact (ℱ.2 X _ (G.cover_lift _ _ S')).amalgamate _ hs' set_option linter.uppercaseLean3 false in -#align category_theory.Ran_is_sheaf_of_cover_lifting.get_section CategoryTheory.RanIsSheafOfCoverLifting.getSection +#align category_theory.Ran_is_sheaf_of_cover_lifting.get_section CategoryTheory.RanIsSheafOfIsCocontinuous.getSection theorem getSection_isAmalgamation (Y : StructuredArrow (op U) G.op) : - (pulledbackFamily ℱ S x Y).IsAmalgamation (getSection hu ℱ hS hx Y) := + (pulledbackFamily ℱ S x Y).IsAmalgamation (getSection ℱ hS hx Y) := IsSheafFor.isAmalgamation _ _ set_option linter.uppercaseLean3 false in -#align category_theory.Ran_is_sheaf_of_cover_lifting.get_section_is_amalgamation CategoryTheory.RanIsSheafOfCoverLifting.getSection_isAmalgamation +#align category_theory.Ran_is_sheaf_of_cover_lifting.get_section_is_amalgamation CategoryTheory.RanIsSheafOfIsCocontinuous.getSection_isAmalgamation theorem getSection_is_unique (Y : StructuredArrow (op U) G.op) {y} - (H : (pulledbackFamily ℱ S x Y).IsAmalgamation y) : y = getSection hu ℱ hS hx Y := by + (H : (pulledbackFamily ℱ S x Y).IsAmalgamation y) : y = getSection ℱ hS hx Y := by apply IsSheafFor.isSeparatedFor _ (pulledbackFamily ℱ S x Y) · exact H · apply getSection_isAmalgamation - · exact ℱ.2 X _ (hu.cover_lift (K.pullback_stable Y.hom.unop hS)) + · exact ℱ.2 X _ (G.cover_lift _ _ (K.pullback_stable Y.hom.unop hS)) set_option linter.uppercaseLean3 false in -#align category_theory.Ran_is_sheaf_of_cover_lifting.get_section_is_unique CategoryTheory.RanIsSheafOfCoverLifting.getSection_is_unique +#align category_theory.Ran_is_sheaf_of_cover_lifting.get_section_is_unique CategoryTheory.RanIsSheafOfIsCocontinuous.getSection_is_unique @[simp] theorem getSection_commute {Y Z : StructuredArrow (op U) G.op} (f : Y ⟶ Z) : - getSection hu ℱ hS hx Y ≫ ℱ.val.map f.right = getSection hu ℱ hS hx Z := by + getSection ℱ hS hx Y ≫ ℱ.val.map f.right = getSection ℱ hS hx Z := by apply getSection_is_unique intro V' fV' hV' have eq : Z.hom = Y.hom ≫ (G.map f.right.unop).op := by convert f.w erw [Category.id_comp] rw [eq] at hV' - convert getSection_isAmalgamation hu ℱ hS hx Y (fV' ≫ f.right.unop) _ using 1 + convert getSection_isAmalgamation ℱ hS hx Y (fV' ≫ f.right.unop) _ using 1 · aesop_cat -- porting note: the below proof was mildly rewritten because `simp` changed behaviour -- slightly (a rewrite which seemed to work in Lean 3, didn't work in Lean 4 because of @@ -197,26 +201,26 @@ theorem getSection_commute {Y Z : StructuredArrow (op U) G.op} (f : Y ⟶ Z) : · change S (G.map _ ≫ Y.hom.unop) simpa only [Functor.map_comp, Category.assoc] using hV' set_option linter.uppercaseLean3 false in -#align category_theory.Ran_is_sheaf_of_cover_lifting.get_section_commute CategoryTheory.RanIsSheafOfCoverLifting.getSection_commute +#align category_theory.Ran_is_sheaf_of_cover_lifting.get_section_commute CategoryTheory.RanIsSheafOfIsCocontinuous.getSection_commute /-- The limit cone in order to glue the sections obtained via `get_section`. -/ def gluedLimitCone : Limits.Cone (Ran.diagram G.op ℱ.val (op U)) := { pt := X -- porting note: autoporter got this wrong - π := { app := fun Y => getSection hu ℱ hS hx Y } } + π := { app := fun Y => getSection ℱ hS hx Y } } set_option linter.uppercaseLean3 false in -#align category_theory.Ran_is_sheaf_of_cover_lifting.glued_limit_cone CategoryTheory.RanIsSheafOfCoverLifting.gluedLimitCone +#align category_theory.Ran_is_sheaf_of_cover_lifting.glued_limit_cone CategoryTheory.RanIsSheafOfIsCocontinuous.gluedLimitCone @[simp] -theorem gluedLimitCone_π_app (W) : (gluedLimitCone hu ℱ hS hx).π.app W = getSection hu ℱ hS hx W := +theorem gluedLimitCone_π_app (W) : (gluedLimitCone ℱ hS hx).π.app W = getSection ℱ hS hx W := rfl set_option linter.uppercaseLean3 false in -#align category_theory.Ran_is_sheaf_of_cover_lifting.glued_limit_cone_π_app CategoryTheory.RanIsSheafOfCoverLifting.gluedLimitCone_π_app +#align category_theory.Ran_is_sheaf_of_cover_lifting.glued_limit_cone_π_app CategoryTheory.RanIsSheafOfIsCocontinuous.gluedLimitCone_π_app /-- The section obtained by passing `glued_limit_cone` into `CategoryTheory.Limits.limit.lift`. -/ def gluedSection : X ⟶ ((ran G.op).obj ℱ.val).obj (op U) := - limit.lift _ (gluedLimitCone hu ℱ hS hx) + limit.lift _ (gluedLimitCone ℱ hS hx) set_option linter.uppercaseLean3 false in -#align category_theory.Ran_is_sheaf_of_cover_lifting.glued_section CategoryTheory.RanIsSheafOfCoverLifting.gluedSection +#align category_theory.Ran_is_sheaf_of_cover_lifting.glued_section CategoryTheory.RanIsSheafOfIsCocontinuous.gluedSection /-- A helper lemma for the following two lemmas. Basically stating that if the section `y : X ⟶ 𝒢(V)` @@ -227,9 +231,9 @@ in order to be applied in the following lemmas easier. theorem helper {V} (f : V ⟶ U) (y : X ⟶ ((ran G.op).obj ℱ.val).obj (op V)) (W) (H : ∀ {V'} {fV : G.obj V' ⟶ V} (hV), y ≫ ((ran G.op).obj ℱ.val).map fV.op = x (fV ≫ f) hV) : y ≫ limit.π (Ran.diagram G.op ℱ.val (op V)) W = - (gluedLimitCone hu ℱ hS hx).π.app ((StructuredArrow.map f.op).obj W) := by + (gluedLimitCone ℱ hS hx).π.app ((StructuredArrow.map f.op).obj W) := by dsimp only [gluedLimitCone_π_app] - apply getSection_is_unique hu ℱ hS hx ((StructuredArrow.map f.op).obj W) + apply getSection_is_unique ℱ hS hx ((StructuredArrow.map f.op).obj W) intro V' fV' hV' dsimp only [Ran.adjunction, Ran.equiv, pulledbackFamily_apply] erw [Adjunction.adjunctionOfEquivRight_counit_app] @@ -249,10 +253,10 @@ theorem helper {V} (f : V ⟶ U) (y : X ⟶ ((ran G.op).obj ℱ.val).obj (op V)) erw [Category.comp_id] simp only [Quiver.Hom.unop_op, Functor.op_map, Quiver.Hom.op_unop] set_option linter.uppercaseLean3 false in -#align category_theory.Ran_is_sheaf_of_cover_lifting.helper CategoryTheory.RanIsSheafOfCoverLifting.helper +#align category_theory.Ran_is_sheaf_of_cover_lifting.helper CategoryTheory.RanIsSheafOfIsCocontinuous.helper /-- Verify that the `glued_section` is an amalgamation of `x`. -/ -theorem gluedSection_isAmalgamation : x.IsAmalgamation (gluedSection hu ℱ hS hx) := by +theorem gluedSection_isAmalgamation : x.IsAmalgamation (gluedSection ℱ hS hx) := by intro V fV hV -- porting note: next line was `ext W` -- Now `ext` can't see that `ran` is defined as a limit. @@ -261,65 +265,80 @@ theorem gluedSection_isAmalgamation : x.IsAmalgamation (gluedSection hu ℱ hS h simp only [Functor.comp_map, limit.lift_pre, coyoneda_obj_map, ran_obj_map, gluedSection] erw [limit.lift_π] symm - convert helper hu ℱ hS hx _ (x fV hV) _ _ using 1 + convert helper ℱ hS hx _ (x fV hV) _ _ using 1 intro V' fV' hV' convert hx fV' (𝟙 _) hV hV' (by rw [Category.id_comp]) simp only [op_id, FunctorToTypes.map_id_apply] set_option linter.uppercaseLean3 false in -#align category_theory.Ran_is_sheaf_of_cover_lifting.glued_section_is_amalgamation CategoryTheory.RanIsSheafOfCoverLifting.gluedSection_isAmalgamation +#align category_theory.Ran_is_sheaf_of_cover_lifting.glued_section_is_amalgamation CategoryTheory.RanIsSheafOfIsCocontinuous.gluedSection_isAmalgamation /-- Verify that the amalgamation is indeed unique. -/ -theorem gluedSection_is_unique (y) (hy : x.IsAmalgamation y) : y = gluedSection hu ℱ hS hx := by +theorem gluedSection_is_unique (y) (hy : x.IsAmalgamation y) : y = gluedSection ℱ hS hx := by unfold gluedSection limit.lift -- porting note: next line was `ext W` -- Now `ext` can't see that `ran` is defined as a limit. -- See https://github.com/leanprover-community/mathlib4/issues/5229 refine limit.hom_ext (λ (W : StructuredArrow (op U) G.op) => ?_) erw [limit.lift_π] - convert helper hu ℱ hS hx (𝟙 _) y W _ + convert helper ℱ hS hx (𝟙 _) y W _ · simp only [op_id, StructuredArrow.map_id] · intro V' fV' hV' convert hy fV' (by simpa only [Category.comp_id] using hV') erw [Category.comp_id] set_option linter.uppercaseLean3 false in -#align category_theory.Ran_is_sheaf_of_cover_lifting.glued_section_is_unique CategoryTheory.RanIsSheafOfCoverLifting.gluedSection_is_unique +#align category_theory.Ran_is_sheaf_of_cover_lifting.glued_section_is_unique CategoryTheory.RanIsSheafOfIsCocontinuous.gluedSection_is_unique -end RanIsSheafOfCoverLifting +end RanIsSheafOfIsCocontinuous -/-- If `G` is cover_lifting, then `Ran G.op` pushes sheaves to sheaves. +variable (K) + +/-- If `G` is cocontinuous, then `Ran G.op` pushes sheaves to sheaves. This result is basically https://stacks.math.columbia.edu/tag/00XK, but without the condition that `C` or `D` has pullbacks. -/ -theorem ran_isSheaf_of_coverLifting {G : C ⥤ D} (hG : CoverLifting J K G) (ℱ : Sheaf J A) : +theorem ran_isSheaf_of_isCocontinuous (ℱ : Sheaf J A) : Presheaf.IsSheaf K ((ran G.op).obj ℱ.val) := by intro X U S hS x hx constructor; swap - · apply RanIsSheafOfCoverLifting.gluedSection hG ℱ hS hx + · apply RanIsSheafOfIsCocontinuous.gluedSection ℱ hS hx constructor - · apply RanIsSheafOfCoverLifting.gluedSection_isAmalgamation - · apply RanIsSheafOfCoverLifting.gluedSection_is_unique + · apply RanIsSheafOfIsCocontinuous.gluedSection_isAmalgamation + · apply RanIsSheafOfIsCocontinuous.gluedSection_is_unique set_option linter.uppercaseLean3 false in -#align category_theory.Ran_is_sheaf_of_cover_lifting CategoryTheory.ran_isSheaf_of_coverLifting +#align category_theory.Ran_is_sheaf_of_cover_lifting CategoryTheory.ran_isSheaf_of_isCocontinuous -variable (A) +variable (A J) -/-- A cover-lifting functor induces a morphism of sites in the same direction as the functor. -/ -def Sites.copullback {G : C ⥤ D} (hG : CoverLifting J K G) : Sheaf J A ⥤ Sheaf K A where - obj ℱ := ⟨(ran G.op).obj ℱ.val, ran_isSheaf_of_coverLifting hG ℱ⟩ +/-- A cover-lifting functor induces a pushforward functor on categories of sheaves. -/ +def Functor.sheafPushforwardCocontinuous : Sheaf J A ⥤ Sheaf K A where + obj ℱ := ⟨(ran G.op).obj ℱ.val, ran_isSheaf_of_isCocontinuous _ K ℱ⟩ map f := ⟨(ran G.op).map f.val⟩ map_id ℱ := Sheaf.Hom.ext _ _ <| (ran G.op).map_id ℱ.val map_comp f g := Sheaf.Hom.ext _ _ <| (ran G.op).map_comp f.val g.val -#align category_theory.sites.copullback CategoryTheory.Sites.copullback +#align category_theory.sites.copullback CategoryTheory.Functor.sheafPushforwardCocontinuous + +/- + +Given a cocontinuous functor `G`, the precomposition with `G.op` induces a functor +on presheaves with leads to a "pullback" functor `Sheaf K A ⥤ Sheaf J A` (TODO: formalize +this as `G.sheafPullbackCocontinuous A J K`) using the associated sheaf functor. +It is shown in SGA 4 III 2.3 that this pullback functor is +left adjoint to `G.sheafPushforwardCocontinuous A J K`. This adjunction may replace +`Functor.sheafAdjunctionCocontinuous` below, and then, it could be shown that if +`G` is also continuous, then we have an isomorphism +`G.sheafPullbackCocontinuous A J K ≅ G.sheafPushforwardContinuous A J K` (TODO). -/-- -Given a functor between sites that is cover-preserving, cover-lifting, and compatible-preserving, -the pullback and copullback along `G` are adjoint to each other -/ + +/-- +Given a functor between sites that is continuous and cocontinuous, +the pushforward for the continuous functor `G` is left adjoint to +the pushforward for the cocontinuous functor `G`. -/ @[simps unit_app_val counit_app_val] -noncomputable def Sites.pullbackCopullbackAdjunction {G : C ⥤ D} (Hp : CoverPreserving J K G) - (Hl : CoverLifting J K G) (Hc : CompatiblePreserving K G) : - Sites.pullback A Hc Hp ⊣ Sites.copullback A Hl where +noncomputable def Functor.sheafAdjunctionCocontinuous [G.IsCocontinuous J K] + [G.IsContinuous J K] : + G.sheafPushforwardContinuous A J K ⊣ G.sheafPushforwardCocontinuous A J K where homEquiv X Y := { toFun := fun f => ⟨(Ran.adjunction A G.op).homEquiv X.val Y.val f.val⟩ invFun := fun f => ⟨((Ran.adjunction A G.op).homEquiv X.val Y.val).symm f.val⟩ @@ -347,9 +366,7 @@ noncomputable def Sites.pullbackCopullbackAdjunction {G : C ⥤ D} (Hp : CoverPr -- porting note: next line was `ext1` refine Sheaf.Hom.ext _ _ ?_ apply (Ran.adjunction A G.op).homEquiv_counit -#align category_theory.sites.pullback_copullback_adjunction CategoryTheory.Sites.pullbackCopullbackAdjunction - -namespace Sites +#align category_theory.sites.pullback_copullback_adjunction CategoryTheory.Functor.sheafAdjunctionCocontinuous variable [ConcreteCategory.{max v u} A] @@ -359,30 +376,27 @@ variable [∀ (X : C), HasColimitsOfShape (J.Cover X)ᵒᵖ A] [∀ (X : D), PreservesColimitsOfShape (K.Cover X)ᵒᵖ (forget A)] [∀ (X : D), HasColimitsOfShape (K.Cover X)ᵒᵖ A] + [G.IsCocontinuous J K] [G.IsContinuous J K] -/-- The natural isomorphism exhibiting compatibility between pullback and sheafification. -/ -def pullbackSheafificationCompatibility - {G : C ⥤ D} (Hp : CoverPreserving J K G) - (Hl : CoverLifting J K G) (Hc : CompatiblePreserving K G) : +/-- The natural isomorphism exhibiting compatibility between pushforward and sheafification. -/ +def Functor.pushforwardContinuousSheafificationCompatibility : (whiskeringLeft _ _ A).obj G.op ⋙ presheafToSheaf J A ≅ - presheafToSheaf K A ⋙ pullback A Hc Hp := + presheafToSheaf K A ⋙ G.sheafPushforwardContinuous A J K := letI A1 : (whiskeringLeft _ _ A).obj G.op ⊣ _ := Ran.adjunction _ _ letI A2 : presheafToSheaf J A ⊣ _ := sheafificationAdjunction _ _ letI B1 : presheafToSheaf K A ⊣ _ := sheafificationAdjunction _ _ - letI B2 : pullback A Hc Hp ⊣ _ := pullbackCopullbackAdjunction _ _ Hl _ + letI B2 := G.sheafAdjunctionCocontinuous A J K letI A12 := A1.comp A2 letI B12 := B1.comp B2 A12.leftAdjointUniq B12 /- Implementation: This is primarily used to prove the lemma `pullbackSheafificationCompatibility_hom_app_val`. -/ -lemma toSheafify_pullbackSheafificationCompatibility - {G : C ⥤ D} (Hp : CoverPreserving J K G) - (Hl : CoverLifting J K G) (Hc : CompatiblePreserving K G) (F) : +lemma Functor.toSheafify_pullbackSheafificationCompatibility (F : Dᵒᵖ ⥤ A) : J.toSheafify (G.op ⋙ F) ≫ - ((pullbackSheafificationCompatibility.{w, v, u} A Hp Hl Hc).hom.app F).val = + ((G.pushforwardContinuousSheafificationCompatibility A J K).hom.app F).val = whiskerLeft _ (K.toSheafify _) := by - dsimp [pullbackSheafificationCompatibility, Adjunction.leftAdjointUniq] + dsimp [pushforwardContinuousSheafificationCompatibility, Adjunction.leftAdjointUniq] apply Quiver.Hom.op_inj apply coyoneda.map_injective ext E : 2 @@ -394,7 +408,7 @@ lemma toSheafify_pullbackSheafificationCompatibility GrothendieckTopology.sheafifyMap_sheafifyLift, Category.id_comp, Category.assoc, GrothendieckTopology.toSheafify_sheafifyLift] ext t s : 3 - dsimp [pullbackSheaf] + dsimp [sheafPushforwardContinuous] congr 1 simp only [← Category.assoc] convert Category.id_comp (obj := A) _ @@ -403,15 +417,11 @@ lemma toSheafify_pullbackSheafificationCompatibility exact this @[simp] -lemma pullbackSheafificationCompatibility_hom_app_val - {G : C ⥤ D} (Hp : CoverPreserving J K G) - (Hl : CoverLifting J K G) (Hc : CompatiblePreserving K G) (F : Dᵒᵖ ⥤ A) : - ((pullbackSheafificationCompatibility.{w, v, u} A Hp Hl Hc).hom.app F).val = +lemma Functor.pushforwardContinuousSheafificationCompatibility_hom_app_val (F : Dᵒᵖ ⥤ A) : + ((G.pushforwardContinuousSheafificationCompatibility A J K).hom.app F).val = J.sheafifyLift (whiskerLeft G.op <| K.toSheafify F) - ((presheafToSheaf K A ⋙ pullback A Hc Hp).obj F).cond := by + ((presheafToSheaf K A ⋙ G.sheafPushforwardContinuous A J K).obj F).cond := by apply J.sheafifyLift_unique apply toSheafify_pullbackSheafificationCompatibility -end Sites - end CategoryTheory diff --git a/Mathlib/CategoryTheory/Sites/CoverPreserving.lean b/Mathlib/CategoryTheory/Sites/CoverPreserving.lean index 880f34b924894..6f29ac9836a05 100644 --- a/Mathlib/CategoryTheory/Sites/CoverPreserving.lean +++ b/Mathlib/CategoryTheory/Sites/CoverPreserving.lean @@ -9,33 +9,39 @@ import Mathlib.Tactic.ApplyFun #align_import category_theory.sites.cover_preserving from "leanprover-community/mathlib"@"f0c8bf9245297a541f468be517f1bde6195105e9" /-! -# Cover-preserving functors between sites. +# Cover-preserving and continuous functors between sites. -We define cover-preserving functors between sites as functors that push covering sieves to -covering sieves. A cover-preserving and compatible-preserving functor `G : C ⥤ D` then pulls -sheaves on `D` back to sheaves on `C` via `G.op ⋙ -`. +We define the notion of continuous functor between sites: these are functors `G` such that +the precomposition with `G.op` preserves sheaves of types (and actually sheaves in any +category). + +In order to show that a functor is continuous, we define cover-preserving functors +between sites as functors that push covering sieves to covering sieves. +Then, a cover-preserving and compatible-preserving functor is continuous. ## Main definitions +* `CategoryTheory.Functor.IsContinuous`: a functor between sites is continuous if the +precomposition with this functor preserves sheaves. * `CategoryTheory.CoverPreserving`: a functor between sites is cover-preserving if it pushes covering sieves to covering sieves * `CategoryTheory.CompatiblePreserving`: a functor between sites is compatible-preserving if it pushes compatible families of elements to compatible families. -* `CategoryTheory.pullbackSheaf`: the pullback of a sheaf along a cover-preserving and -compatible-preserving functor. -* `CategoryTheory.Sites.pullback`: the induced functor `Sheaf K A ⥤ Sheaf J A` for a -cover-preserving and compatible-preserving functor `G : (C, J) ⥤ (D, K)`. +* `CategoryTheory.Functor.sheafPushforwardContinuous`: the induced functor +`Sheaf K A ⥤ Sheaf J A` for a continuous functor `G : (C, J) ⥤ (D, K)`. In case this is +part of a morphism of sites, this would be understood as the pushforward functor +even though it goes in the opposite direction as the functor `G`. ## Main results -- `CategoryTheory.pullback_isSheaf_of_coverPreserving`: If `G : C ⥤ D` is -cover-preserving and compatible-preserving, then `G ⋙ -` (`uᵖ`) as a functor -`(Dᵒᵖ ⥤ A) ⥤ (Cᵒᵖ ⥤ A)` of presheaves maps sheaves to sheaves. +- `CategoryTheory.isContinuous_of_coverPreserving`: If `G : C ⥤ D` is +cover-preserving and compatible-preserving, then `G` is a continuous functor, +i.e. `G.op ⋙ -` as a functor `(Dᵒᵖ ⥤ A) ⥤ (Cᵒᵖ ⥤ A)` of presheaves maps sheaves to sheaves. ## References * [Elephant]: *Sketches of an Elephant*, P. T. Johnstone: C2.3. -* https://stacks.math.columbia.edu/tag/00WW +* https://stacks.math.columbia.edu/tag/00WU -/ @@ -49,7 +55,7 @@ open CategoryTheory Opposite CategoryTheory.Presieve.FamilyOfElements CategoryTh namespace CategoryTheory -variable {C : Type u₁} [Category.{v₁} C] {D : Type u₂} [Category.{v₂} D] +variable {C : Type u₁} [Category.{v₁} C] {D : Type u₂} [Category.{v₂} D] (F : C ⥤ D) variable {A : Type u₃} [Category.{v₃} A] @@ -86,7 +92,7 @@ This is actually stronger than merely preserving compatible families because of -/ -- porting note: this doesn't work yet @[nolint has_nonempty_instance] structure CompatiblePreserving (K : GrothendieckTopology D) (G : C ⥤ D) : Prop where - Compatible : + compatible : ∀ (ℱ : SheafOfTypes.{w} K) {Z} {T : Presieve Z} {x : FamilyOfElements (G.op ⋙ ℱ.val) T} (_ : x.Compatible) {Y₁ Y₂} {X} (f₁ : X ⟶ G.obj Y₁) (f₂ : X ⟶ G.obj Y₂) {g₁ : Y₁ ⟶ Z} {g₂ : Y₂ ⟶ Z} (hg₁ : T g₁) (hg₂ : T g₂) (_ : f₁ ≫ G.map g₁ = f₂ ≫ G.map g₂), @@ -106,7 +112,7 @@ theorem Presieve.FamilyOfElements.Compatible.functorPushforward : rcases getFunctorPushforwardStructure H₂ with ⟨X₂, f₂, h₂, hf₂, rfl⟩ suffices ℱ.val.map (g₁ ≫ h₁).op (x f₁ hf₁) = ℱ.val.map (g₂ ≫ h₂).op (x f₂ hf₂) by simpa using this - apply hG.Compatible ℱ h _ _ hf₁ hf₂ + apply hG.compatible ℱ h _ _ hf₁ hf₂ simpa using eq #align category_theory.presieve.family_of_elements.compatible.functor_pushforward CategoryTheory.Presieve.FamilyOfElements.Compatible.functorPushforward @@ -116,7 +122,7 @@ theorem CompatiblePreserving.apply_map {Y : C} {f : Y ⟶ Z} (hf : T f) : unfold FamilyOfElements.functorPushforward rcases e₁ : getFunctorPushforwardStructure (image_mem_functorPushforward G T hf) with ⟨X, g, f', hg, eq⟩ - simpa using hG.Compatible ℱ h f' (𝟙 _) hg hf (by simp [eq]) + simpa using hG.compatible ℱ h f' (𝟙 _) hg hf (by simp [eq]) #align category_theory.compatible_preserving.apply_map CategoryTheory.CompatiblePreserving.apply_map open Limits.WalkingCospan @@ -168,57 +174,85 @@ theorem compatiblePreservingOfDownwardsClosed (F : C ⥤ D) [Full F] [Faithful F (F.map_injective <| by simpa using he) #align category_theory.compatible_preserving_of_downwards_closed CategoryTheory.compatiblePreservingOfDownwardsClosed -/-- If `G` is cover-preserving and compatible-preserving, -then `G.op ⋙ _` pulls sheaves back to sheaves. +variable (J K) -This result is basically . --/ -theorem pullback_isSheaf_of_coverPreserving {G : C ⥤ D} (hG₁ : CompatiblePreserving.{v₃} K G) - (hG₂ : CoverPreserving J K G) (ℱ : Sheaf K A) : Presheaf.IsSheaf J (G.op ⋙ ℱ.val) := by - intro X U S hS x hx - change FamilyOfElements (G.op ⋙ ℱ.val ⋙ coyoneda.obj (op X)) _ at x - let H := ℱ.2 X _ (hG₂.cover_preserve hS) - let hx' := hx.functorPushforward hG₁ (sheafOver ℱ X) - constructor; swap - · apply H.amalgamate (x.functorPushforward G) - exact hx' - constructor - · intro V f hf - convert H.isAmalgamation hx' (G.map f) (image_mem_functorPushforward G S hf) - rw [hG₁.apply_map (sheafOver ℱ X) hx] - · intro y hy - refine' - H.isSeparatedFor _ y _ _ (H.isAmalgamation (hx.functorPushforward hG₁ (sheafOver ℱ X))) - rintro V f ⟨Z, f', g', h, rfl⟩ - -- porting note: didn't need coercion (S : Presieve U) in Lean 3 - erw [FamilyOfElements.comp_of_compatible (S.functorPushforward G) hx' - (image_mem_functorPushforward G (S : Presieve U) h) g'] - dsimp - simp [hG₁.apply_map (sheafOver ℱ X) hx h, ← hy f' h] -#align category_theory.pullback_is_sheaf_of_cover_preserving CategoryTheory.pullback_isSheaf_of_coverPreserving +/-- A functor `F` is continuous if the precomposition with `F.op` sends sheaves of `Type w` +to sheaves. -/ +class Functor.IsContinuous : Prop where + op_comp_isSheafOfTypes (G : SheafOfTypes.{w} K) : Presieve.IsSheaf J (F.op ⋙ G.val) + +lemma Functor.op_comp_isSheafOfTypes [Functor.IsContinuous.{w} F J K] + (G : SheafOfTypes.{w} K) : + Presieve.IsSheaf J (F.op ⋙ G.val) := + Functor.IsContinuous.op_comp_isSheafOfTypes _ + +lemma Functor.isContinuous_of_iso {F₁ F₂ : C ⥤ D} (e : F₁ ≅ F₂) + (J : GrothendieckTopology C) (K : GrothendieckTopology D) + [Functor.IsContinuous.{w} F₁ J K] : Functor.IsContinuous.{w} F₂ J K where + op_comp_isSheafOfTypes G := + Presieve.isSheaf_iso J (isoWhiskerRight (NatIso.op e.symm) _) + (F₁.op_comp_isSheafOfTypes J K G) -/-- The pullback of a sheaf along a cover-preserving and compatible-preserving functor. -/ -def pullbackSheaf {G : C ⥤ D} (hG₁ : CompatiblePreserving K G) (hG₂ : CoverPreserving J K G) - (ℱ : Sheaf K A) : Sheaf J A := - ⟨G.op ⋙ ℱ.val, pullback_isSheaf_of_coverPreserving hG₁ hG₂ ℱ⟩ -#align category_theory.pullback_sheaf CategoryTheory.pullbackSheaf +instance Functor.isContinuous_id : Functor.IsContinuous.{w} (𝟭 C) J J where + op_comp_isSheafOfTypes G := G.2 -variable (A) +lemma Functor.isContinuous_comp (F₁ : C ⥤ D) (F₂ : D ⥤ A) (J : GrothendieckTopology C) + (K : GrothendieckTopology D) (L : GrothendieckTopology A) + [Functor.IsContinuous.{w} F₁ J K] [Functor.IsContinuous.{w} F₂ K L] : + Functor.IsContinuous.{w} (F₁ ⋙ F₂) J L where + op_comp_isSheafOfTypes G := F₁.op_comp_isSheafOfTypes J K ⟨_, F₂.op_comp_isSheafOfTypes K L G⟩ -/-- The induced functor from `Sheaf K A ⥤ Sheaf J A` given by `G.op ⋙ _` -if `G` is cover-preserving and compatible-preserving. +lemma Functor.isContinuous_comp' {F₁ : C ⥤ D} {F₂ : D ⥤ A} {F₁₂ : C ⥤ A} + (e : F₁ ⋙ F₂ ≅ F₁₂) (J : GrothendieckTopology C) + (K : GrothendieckTopology D) (L : GrothendieckTopology A) + [Functor.IsContinuous.{w} F₁ J K] [Functor.IsContinuous.{w} F₂ K L] : + Functor.IsContinuous.{w} F₁₂ J L := by + have := Functor.isContinuous_comp F₁ F₂ J K L + apply Functor.isContinuous_of_iso e + +lemma Functor.op_comp_isSheaf [Functor.IsContinuous.{v₃} F J K] (G : Sheaf K A) : + Presheaf.IsSheaf J (F.op ⋙ G.val) := + fun T => F.op_comp_isSheafOfTypes J K ⟨_, G.cond T⟩ + +variable {F J K} + +/-- If `F` is cover-preserving and compatible-preserving, +then `F` is a continuous functor. + +This result is basically . +-/ +lemma Functor.isContinuous_of_coverPreserving (hF₁ : CompatiblePreserving.{w} K F) + (hF₂ : CoverPreserving J K F) : Functor.IsContinuous.{w} F J K where + op_comp_isSheafOfTypes G X S hS x hx := by + apply exists_unique_of_exists_of_unique + · have H := G.2 _ (hF₂.cover_preserve hS) + exact ⟨H.amalgamate (x.functorPushforward F) (hx.functorPushforward hF₁), + fun V f hf => (H.isAmalgamation (hx.functorPushforward hF₁) (F.map f) _).trans + (hF₁.apply_map _ hx hf)⟩ + · intro y₁ y₂ hy₁ hy₂ + apply (Presieve.isSeparated_of_isSheaf _ _ G.cond _ (hF₂.cover_preserve hS)).ext + rintro Y _ ⟨Z, g, h, hg, rfl⟩ + dsimp + simp only [Functor.map_comp, types_comp_apply] + have H := (hy₁ g hg).trans (hy₂ g hg).symm + dsimp at H + rw [H] + +variable (F J K A) + +/-- The induced functor `Sheaf K A ⥤ Sheaf J A` given by `G.op ⋙ _` +if `G` is a continuous functor. -/ -@[simps] -def Sites.pullback {G : C ⥤ D} (hG₁ : CompatiblePreserving K G) (hG₂ : CoverPreserving J K G) : +def Functor.sheafPushforwardContinuous [Functor.IsContinuous.{v₃} F J K] : Sheaf K A ⥤ Sheaf J A where - obj ℱ := pullbackSheaf hG₁ hG₂ ℱ - map f := ⟨((whiskeringLeft _ _ _).obj G.op).map f.val⟩ + obj ℱ := ⟨F.op ⋙ ℱ.val, F.op_comp_isSheaf J K ℱ⟩ + map f := ⟨((whiskeringLeft _ _ _).obj F.op).map f.val⟩ map_id ℱ := by ext1 - apply ((whiskeringLeft _ _ _).obj G.op).map_id + apply ((whiskeringLeft _ _ _).obj F.op).map_id map_comp f g := by ext1 - apply ((whiskeringLeft _ _ _).obj G.op).map_comp -#align category_theory.sites.pullback CategoryTheory.Sites.pullback + apply ((whiskeringLeft _ _ _).obj F.op).map_comp +#align category_theory.sites.pullback CategoryTheory.Functor.sheafPushforwardContinuous end CategoryTheory diff --git a/Mathlib/CategoryTheory/Sites/DenseSubsite.lean b/Mathlib/CategoryTheory/Sites/DenseSubsite.lean index 2c57abdb7f0af..290ec99123a7e 100644 --- a/Mathlib/CategoryTheory/Sites/DenseSubsite.lean +++ b/Mathlib/CategoryTheory/Sites/DenseSubsite.lean @@ -11,7 +11,7 @@ import Mathlib.CategoryTheory.Adjunction.FullyFaithful /-! # Dense subsites -We define `CoverDense` functors into sites as functors such that there exists a covering sieve +We define `IsCoverDense` functors into sites as functors such that there exists a covering sieve that factors through images of the functor for each object in `D`. We will primarily consider cover-dense functors that are also full, since this notion is in general @@ -21,15 +21,16 @@ we would need, and some sheafification would be needed for here and there. ## Main results -- `CategoryTheory.CoverDense.Types.presheafHom`: If `G : C ⥤ (D, K)` is full and cover-dense, - then given any presheaf `ℱ` and sheaf `ℱ'` on `D`, and a morphism `α : G ⋙ ℱ ⟶ G ⋙ ℱ'`, - we may glue them together to obtain a morphism of presheaves `ℱ ⟶ ℱ'`. -- `CategoryTheory.CoverDense.sheafIso`: If `ℱ` above is a sheaf and `α` is an iso, +- `CategoryTheory.Functor.IsCoverDense.Types.presheafHom`: If `G : C ⥤ (D, K)` is full + and cover-dense, then given any presheaf `ℱ` and sheaf `ℱ'` on `D`, + and a morphism `α : G ⋙ ℱ ⟶ G ⋙ ℱ'`, we may glue them together to obtain + a morphism of presheaves `ℱ ⟶ ℱ'`. +- `CategoryTheory.Functor.IsCoverDense.sheafIso`: If `ℱ` above is a sheaf and `α` is an iso, then the result is also an iso. -- `CategoryTheory.CoverDense.iso_of_restrict_iso`: If `G : C ⥤ (D, K)` is full and cover-dense, - then given any sheaves `ℱ, ℱ'` on `D`, and a morphism `α : ℱ ⟶ ℱ'`, then `α` is an iso if - `G ⋙ ℱ ⟶ G ⋙ ℱ'` is iso. -- `CategoryTheory.CoverDense.sheafEquivOfCoverPreservingCoverLifting`: +- `CategoryTheory.Functor.IsCoverDense.iso_of_restrict_iso`: If `G : C ⥤ (D, K)` is full + and cover-dense, then given any sheaves `ℱ, ℱ'` on `D`, and a morphism `α : ℱ ⟶ ℱ'`, + then `α` is an iso if `G ⋙ ℱ ⟶ G ⋙ ℱ'` is iso. +- `CategoryTheory.Functor.IsCoverDense.sheafEquivOfCoverPreservingCoverLifting`: If `G : (C, J) ⥤ (D, K)` is fully-faithful, cover-lifting, cover-preserving, and cover-dense, then it will induce an equivalence of categories of sheaves valued in a complete category. @@ -86,42 +87,49 @@ theorem Presieve.in_coverByImage (G : C ⥤ D) {X : D} {Y : C} (f : G.obj Y ⟶ ⟨⟨Y, 𝟙 _, f, by simp⟩⟩ #align category_theory.presieve.in_cover_by_image CategoryTheory.Presieve.in_coverByImage -/-- A functor `G : (C, J) ⥤ (D, K)` is called `CoverDense` if for each object in `D`, +/-- A functor `G : (C, J) ⥤ (D, K)` is cover dense if for each object in `D`, there exists a covering sieve in `D` that factors through images of `G`. This definition can be found in https://ncatlab.org/nlab/show/dense+sub-site Definition 2.2. -/ -structure CoverDense (K : GrothendieckTopology D) (G : C ⥤ D) : Prop where +class Functor.IsCoverDense (G : C ⥤ D) (K : GrothendieckTopology D) : Prop where is_cover : ∀ U : D, Sieve.coverByImage G U ∈ K U -#align category_theory.cover_dense CategoryTheory.CoverDense +#align category_theory.cover_dense CategoryTheory.Functor.IsCoverDense -attribute [nolint docBlame] CategoryTheory.CoverDense.is_cover +lemma Functor.is_cover_of_isCoverDense (G : C ⥤ D) (K : GrothendieckTopology D) + [G.IsCoverDense K] (U : D) : Sieve.coverByImage G U ∈ K U := by + apply Functor.IsCoverDense.is_cover + +attribute [nolint docBlame] CategoryTheory.Functor.IsCoverDense.is_cover open Presieve Opposite -namespace CoverDense +namespace Functor.IsCoverDense variable {K} -variable {A : Type*} [Category A] {G : C ⥤ D} (H : CoverDense K G) +variable {A : Type*} [Category A] (G : C ⥤ D) [G.IsCoverDense K] -- this is not marked with `@[ext]` because `H` can not be inferred from the type -theorem ext (H : CoverDense K G) (ℱ : SheafOfTypes K) (X : D) {s t : ℱ.val.obj (op X)} +theorem ext (ℱ : SheafOfTypes K) (X : D) {s t : ℱ.val.obj (op X)} (h : ∀ ⦃Y : C⦄ (f : G.obj Y ⟶ X), ℱ.val.map f.op s = ℱ.val.map f.op t) : s = t := by - apply (ℱ.cond (Sieve.coverByImage G X) (H.is_cover X)).isSeparatedFor.ext + apply (ℱ.cond (Sieve.coverByImage G X) (G.is_cover_of_isCoverDense K X)).isSeparatedFor.ext rintro Y _ ⟨Z, f₁, f₂, ⟨rfl⟩⟩ simp [h f₂] -#align category_theory.cover_dense.ext CategoryTheory.CoverDense.ext +#align category_theory.cover_dense.ext CategoryTheory.Functor.IsCoverDense.ext + +variable {G} -theorem functorPullback_pushforward_covering [Full G] (H : CoverDense K G) {X : C} +theorem functorPullback_pushforward_covering [Full G] {X : C} (T : K (G.obj X)) : (T.val.functorPullback G).functorPushforward G ∈ K (G.obj X) := by - refine' K.superset_covering _ (K.bind_covering T.property fun Y f _ => H.is_cover Y) + refine' K.superset_covering _ (K.bind_covering T.property + fun Y f _ => G.is_cover_of_isCoverDense K Y) rintro Y _ ⟨Z, _, f, hf, ⟨W, g, f', ⟨rfl⟩⟩, rfl⟩ use W; use G.preimage (f' ≫ f); use g constructor · simpa using T.val.downward_closed hf f' · simp -#align category_theory.cover_dense.functor_pullback_pushforward_covering CategoryTheory.CoverDense.functorPullback_pushforward_covering +#align category_theory.cover_dense.functor_pullback_pushforward_covering CategoryTheory.Functor.IsCoverDense.functorPullback_pushforward_covering /-- (Implementation). Given a hom between the pullbacks of two sheaves, we can whisker it with `coyoneda` to obtain a hom between the pullbacks of the sheaves of maps from `X`. @@ -130,7 +138,7 @@ theorem functorPullback_pushforward_covering [Full G] (H : CoverDense K G) {X : def homOver {ℱ : Dᵒᵖ ⥤ A} {ℱ' : Sheaf K A} (α : G.op ⋙ ℱ ⟶ G.op ⋙ ℱ'.val) (X : A) : G.op ⋙ ℱ ⋙ coyoneda.obj (op X) ⟶ G.op ⋙ (sheafOver ℱ' X).val := whiskerRight α (coyoneda.obj (op X)) -#align category_theory.cover_dense.hom_over CategoryTheory.CoverDense.homOver +#align category_theory.cover_dense.hom_over CategoryTheory.Functor.IsCoverDense.homOver /-- (Implementation). Given an iso between the pullbacks of two sheaves, we can whisker it with `coyoneda` to obtain an iso between the pullbacks of the sheaves of maps from `X`. @@ -139,13 +147,13 @@ def homOver {ℱ : Dᵒᵖ ⥤ A} {ℱ' : Sheaf K A} (α : G.op ⋙ ℱ ⟶ G.op def isoOver {ℱ ℱ' : Sheaf K A} (α : G.op ⋙ ℱ.val ≅ G.op ⋙ ℱ'.val) (X : A) : G.op ⋙ (sheafOver ℱ X).val ≅ G.op ⋙ (sheafOver ℱ' X).val := isoWhiskerRight α (coyoneda.obj (op X)) -#align category_theory.cover_dense.iso_over CategoryTheory.CoverDense.isoOver +#align category_theory.cover_dense.iso_over CategoryTheory.Functor.IsCoverDense.isoOver theorem sheaf_eq_amalgamation (ℱ : Sheaf K A) {X : A} {U : D} {T : Sieve U} (hT) (x : FamilyOfElements _ T) (hx) (t) (h : x.IsAmalgamation t) : t = (ℱ.cond X T hT).amalgamate x hx := (ℱ.cond X T hT).isSeparatedFor x t _ h ((ℱ.cond X T hT).isAmalgamation hx) -#align category_theory.cover_dense.sheaf_eq_amalgamation CategoryTheory.CoverDense.sheaf_eq_amalgamation +#align category_theory.cover_dense.sheaf_eq_amalgamation CategoryTheory.Functor.IsCoverDense.sheaf_eq_amalgamation variable [Full G] @@ -160,7 +168,7 @@ that is defined on a cover generated by the images of `G`. -/ noncomputable def pushforwardFamily {X} (x : ℱ.obj (op X)) : FamilyOfElements ℱ'.val (coverByImage G X) := fun _ _ hf => ℱ'.val.map hf.some.lift.op <| α.app (op _) (ℱ.map hf.some.map.op x : _) -#align category_theory.cover_dense.types.pushforward_family CategoryTheory.CoverDense.Types.pushforwardFamily +#align category_theory.cover_dense.types.pushforward_family CategoryTheory.Functor.IsCoverDense.Types.pushforwardFamily -- porting note: there are various `include` and `omit`s in this file (e.g. one is removed here), -- none of which are needed in Lean 4. @@ -175,7 +183,7 @@ noncomputable def pushforwardFamily {X} (x : ℱ.obj (op X)) : theorem pushforwardFamily_compatible {X} (x : ℱ.obj (op X)) : (pushforwardFamily α x).Compatible := by intro Y₁ Y₂ Z g₁ g₂ f₁ f₂ h₁ h₂ e - apply H.ext + apply IsCoverDense.ext G intro Y f simp only [pushforwardFamily, ← FunctorToTypes.map_comp_apply, ← op_comp] change (ℱ.map _ ≫ α.app (op _) ≫ ℱ'.val.map _) _ = (ℱ.map _ ≫ α.app (op _) ≫ ℱ'.val.map _) _ @@ -192,13 +200,13 @@ theorem pushforwardFamily_compatible {X} (x : ℱ.obj (op X)) : simp only [← op_comp, G.image_preimage] congr 3 simp [e] -#align category_theory.cover_dense.types.pushforward_family_compatible CategoryTheory.CoverDense.Types.pushforwardFamily_compatible +#align category_theory.cover_dense.types.pushforward_family_compatible CategoryTheory.Functor.IsCoverDense.Types.pushforwardFamily_compatible /-- (Implementation). The morphism `ℱ(X) ⟶ ℱ'(X)` given by gluing the `pushforwardFamily`. -/ noncomputable def appHom (X : D) : ℱ.obj (op X) ⟶ ℱ'.val.obj (op X) := fun x => - (ℱ'.cond _ (H.is_cover X)).amalgamate (pushforwardFamily α x) - (pushforwardFamily_compatible H α x) -#align category_theory.cover_dense.types.app_hom CategoryTheory.CoverDense.Types.appHom + (ℱ'.cond _ (G.is_cover_of_isCoverDense _ X)).amalgamate (pushforwardFamily α x) + (pushforwardFamily_compatible α x) +#align category_theory.cover_dense.types.app_hom CategoryTheory.Functor.IsCoverDense.Types.appHom @[simp] theorem pushforwardFamily_apply {X} (x : ℱ.obj (op X)) {Y : C} (f : G.obj Y ⟶ X) : @@ -217,24 +225,22 @@ theorem pushforwardFamily_apply {X} (x : ℱ.obj (op X)) {Y : C} (f : G.obj Y erw [← α.naturality (G.preimage _).op] simp only [← Functor.map_comp, ← Category.assoc, Functor.comp_map, G.image_preimage, G.op_map, Quiver.Hom.unop_op, ← op_comp, Presieve.CoverByImageStructure.fac] -#align category_theory.cover_dense.types.pushforward_family_apply CategoryTheory.CoverDense.Types.pushforwardFamily_apply +#align category_theory.cover_dense.types.pushforward_family_apply CategoryTheory.Functor.IsCoverDense.Types.pushforwardFamily_apply @[simp] theorem appHom_restrict {X : D} {Y : C} (f : op X ⟶ op (G.obj Y)) (x) : - ℱ'.val.map f (appHom H α X x) = α.app (op Y) (ℱ.map f x) := by - refine' - ((ℱ'.cond _ (H.is_cover X)).valid_glue (pushforwardFamily_compatible H α x) f.unop - (Presieve.in_coverByImage G f.unop)).trans - _ - apply pushforwardFamily_apply -#align category_theory.cover_dense.types.app_hom_restrict CategoryTheory.CoverDense.Types.appHom_restrict + ℱ'.val.map f (appHom α X x) = α.app (op Y) (ℱ.map f x) := + ((ℱ'.cond _ (G.is_cover_of_isCoverDense _ X)).valid_glue + (pushforwardFamily_compatible α x) f.unop + (Presieve.in_coverByImage G f.unop)).trans (pushforwardFamily_apply _ _ _) +#align category_theory.cover_dense.types.app_hom_restrict CategoryTheory.Functor.IsCoverDense.Types.appHom_restrict @[simp] theorem appHom_valid_glue {X : D} {Y : C} (f : op X ⟶ op (G.obj Y)) : - appHom H α X ≫ ℱ'.val.map f = ℱ.map f ≫ α.app (op Y) := by + appHom α X ≫ ℱ'.val.map f = ℱ.map f ≫ α.app (op Y) := by ext apply appHom_restrict -#align category_theory.cover_dense.types.app_hom_valid_glue CategoryTheory.CoverDense.Types.appHom_valid_glue +#align category_theory.cover_dense.types.app_hom_valid_glue CategoryTheory.Functor.IsCoverDense.Types.appHom_valid_glue /-- (Implementation). The maps given in `appIso` is inverse to each other and gives a `ℱ(X) ≅ ℱ'(X)`. @@ -242,33 +248,33 @@ theorem appHom_valid_glue {X : D} {Y : C} (f : op X ⟶ op (G.obj Y)) : @[simps] noncomputable def appIso {ℱ ℱ' : SheafOfTypes.{v} K} (i : G.op ⋙ ℱ.val ≅ G.op ⋙ ℱ'.val) (X : D) : ℱ.val.obj (op X) ≅ ℱ'.val.obj (op X) where - hom := appHom H i.hom X - inv := appHom H i.inv X + hom := appHom i.hom X + inv := appHom i.inv X hom_inv_id := by ext x - apply H.ext + apply Functor.IsCoverDense.ext G intro Y f simp inv_hom_id := by ext x - apply H.ext + apply Functor.IsCoverDense.ext G intro Y f simp -#align category_theory.cover_dense.types.app_iso CategoryTheory.CoverDense.Types.appIso +#align category_theory.cover_dense.types.app_iso CategoryTheory.Functor.IsCoverDense.Types.appIso /-- Given a natural transformation `G ⋙ ℱ ⟶ G ⋙ ℱ'` between presheaves of types, where `G` is full and cover-dense, and `ℱ'` is a sheaf, we may obtain a natural transformation between sheaves. -/ @[simps] noncomputable def presheafHom (α : G.op ⋙ ℱ ⟶ G.op ⋙ ℱ'.val) : ℱ ⟶ ℱ'.val where - app X := appHom H α (unop X) + app X := appHom α (unop X) naturality X Y f := by ext x - apply H.ext ℱ' (unop Y) + apply Functor.IsCoverDense.ext G intro Y' f' simp only [appHom_restrict, types_comp_apply, ← FunctorToTypes.map_comp_apply] -- porting note: Lean 3 proof continued with a rewrite but we're done here -#align category_theory.cover_dense.types.presheaf_hom CategoryTheory.CoverDense.Types.presheafHom +#align category_theory.cover_dense.types.presheaf_hom CategoryTheory.Functor.IsCoverDense.Types.presheafHom /-- Given a natural isomorphism `G ⋙ ℱ ≅ G ⋙ ℱ'` between presheaves of types, where `G` is full and cover-dense, and `ℱ, ℱ'` are sheaves, we may obtain a natural isomorphism between presheaves. @@ -276,8 +282,8 @@ and cover-dense, and `ℱ, ℱ'` are sheaves, we may obtain a natural isomorphis @[simps!] noncomputable def presheafIso {ℱ ℱ' : SheafOfTypes.{v} K} (i : G.op ⋙ ℱ.val ≅ G.op ⋙ ℱ'.val) : ℱ.val ≅ ℱ'.val := - NatIso.ofComponents (fun X => appIso H i (unop X)) @(presheafHom H i.hom).naturality -#align category_theory.cover_dense.types.presheaf_iso CategoryTheory.CoverDense.Types.presheafIso + NatIso.ofComponents (fun X => appIso i (unop X)) @(presheafHom i.hom).naturality +#align category_theory.cover_dense.types.presheaf_iso CategoryTheory.Functor.IsCoverDense.Types.presheafIso /-- Given a natural isomorphism `G ⋙ ℱ ≅ G ⋙ ℱ'` between presheaves of types, where `G` is full and cover-dense, and `ℱ, ℱ'` are sheaves, we may obtain a natural isomorphism between sheaves. @@ -285,15 +291,15 @@ and cover-dense, and `ℱ, ℱ'` are sheaves, we may obtain a natural isomorphis @[simps] noncomputable def sheafIso {ℱ ℱ' : SheafOfTypes.{v} K} (i : G.op ⋙ ℱ.val ≅ G.op ⋙ ℱ'.val) : ℱ ≅ ℱ' where - hom := ⟨(presheafIso H i).hom⟩ - inv := ⟨(presheafIso H i).inv⟩ + hom := ⟨(presheafIso i).hom⟩ + inv := ⟨(presheafIso i).inv⟩ hom_inv_id := by ext1 - apply (presheafIso H i).hom_inv_id + apply (presheafIso i).hom_inv_id inv_hom_id := by ext1 - apply (presheafIso H i).inv_hom_id -#align category_theory.cover_dense.types.sheaf_iso CategoryTheory.CoverDense.Types.sheafIso + apply (presheafIso i).inv_hom_id +#align category_theory.cover_dense.types.sheaf_iso CategoryTheory.Functor.IsCoverDense.Types.sheafIso end Types @@ -306,17 +312,17 @@ variable {ℱ : Dᵒᵖ ⥤ A} {ℱ' : Sheaf K A} noncomputable def sheafCoyonedaHom (α : G.op ⋙ ℱ ⟶ G.op ⋙ ℱ'.val) : coyoneda ⋙ (whiskeringLeft Dᵒᵖ A (Type _)).obj ℱ ⟶ coyoneda ⋙ (whiskeringLeft Dᵒᵖ A (Type _)).obj ℱ'.val where - app X := presheafHom H (homOver α (unop X)) + app X := presheafHom (homOver α (unop X)) naturality X Y f := by ext U x change - appHom H (homOver α (unop Y)) (unop U) (f.unop ≫ x) = - f.unop ≫ appHom H (homOver α (unop X)) (unop U) x + appHom (homOver α (unop Y)) (unop U) (f.unop ≫ x) = + f.unop ≫ appHom (homOver α (unop X)) (unop U) x symm apply sheaf_eq_amalgamation - apply H.is_cover + apply G.is_cover_of_isCoverDense -- porting note: the following line closes a goal which didn't exist before reenableeta - · exact pushforwardFamily_compatible H (homOver α Y.unop) (f.unop ≫ x) + · exact pushforwardFamily_compatible (homOver α Y.unop) (f.unop ≫ x) intro Y' f' hf' change unop X ⟶ ℱ.obj (op (unop _)) at x dsimp @@ -325,16 +331,16 @@ noncomputable def sheafCoyonedaHom (α : G.op ⋙ ℱ ⟶ G.op ⋙ ℱ'.val) : conv_lhs => rw [← hf'.some.fac] simp only [← Category.assoc, op_comp, Functor.map_comp] congr 1 - refine' (appHom_restrict H (homOver α (unop X)) hf'.some.map.op x).trans _ + refine' (appHom_restrict (homOver α (unop X)) hf'.some.map.op x).trans _ simp -#align category_theory.cover_dense.sheaf_coyoneda_hom CategoryTheory.CoverDense.sheafCoyonedaHom +#align category_theory.cover_dense.sheaf_coyoneda_hom CategoryTheory.Functor.IsCoverDense.sheafCoyonedaHom /-- (Implementation). `sheafCoyonedaHom` but the order of the arguments of the functor are swapped. -/ noncomputable def sheafYonedaHom (α : G.op ⋙ ℱ ⟶ G.op ⋙ ℱ'.val) : ℱ ⋙ yoneda ⟶ ℱ'.val ⋙ yoneda := by - let α := sheafCoyonedaHom H α + let α := sheafCoyonedaHom α refine' { app := _ naturality := _ } @@ -345,17 +351,17 @@ noncomputable def sheafYonedaHom (α : G.op ⋙ ℱ ⟶ G.op ⋙ ℱ'.val) : · intro U V i ext X x exact congr_fun ((α.app X).naturality i) x -#align category_theory.cover_dense.sheaf_yoneda_hom CategoryTheory.CoverDense.sheafYonedaHom +#align category_theory.cover_dense.sheaf_yoneda_hom CategoryTheory.Functor.IsCoverDense.sheafYonedaHom /-- Given a natural transformation `G ⋙ ℱ ⟶ G ⋙ ℱ'` between presheaves of arbitrary category, where `G` is full and cover-dense, and `ℱ'` is a sheaf, we may obtain a natural transformation between presheaves. -/ noncomputable def sheafHom (α : G.op ⋙ ℱ ⟶ G.op ⋙ ℱ'.val) : ℱ ⟶ ℱ'.val := - let α' := sheafYonedaHom H α + let α' := sheafYonedaHom α { app := fun X => yoneda.preimage (α'.app X) naturality := fun X Y f => yoneda.map_injective (by simpa using α'.naturality f) } -#align category_theory.cover_dense.sheaf_hom CategoryTheory.CoverDense.sheafHom +#align category_theory.cover_dense.sheaf_hom CategoryTheory.Functor.IsCoverDense.sheafHom /-- Given a natural isomorphism `G ⋙ ℱ ≅ G ⋙ ℱ'` between presheaves of arbitrary category, where `G` is full and cover-dense, and `ℱ', ℱ` are sheaves, @@ -364,22 +370,22 @@ we may obtain a natural isomorphism between presheaves. @[simps!] noncomputable def presheafIso {ℱ ℱ' : Sheaf K A} (i : G.op ⋙ ℱ.val ≅ G.op ⋙ ℱ'.val) : ℱ.val ≅ ℱ'.val := by - have : ∀ X : Dᵒᵖ, IsIso ((sheafHom H i.hom).app X) := by + have : ∀ X : Dᵒᵖ, IsIso ((sheafHom i.hom).app X) := by intro X -- porting note: somehow `apply` in Lean 3 is leaving a typeclass goal, -- perhaps due to elaboration order. The corresponding `apply` in Lean 4 fails -- because the instance can't yet be synthesized. I hence reorder the proof. - suffices IsIso (yoneda.map ((sheafHom H i.hom).app X)) by + suffices IsIso (yoneda.map ((sheafHom i.hom).app X)) by apply isIso_of_reflects_iso _ yoneda - use (sheafYonedaHom H i.inv).app X + use (sheafYonedaHom i.inv).app X constructor <;> ext x : 2 <;> simp only [sheafHom, NatTrans.comp_app, NatTrans.id_app, Functor.image_preimage] - · exact ((Types.presheafIso H (isoOver i (unop x))).app X).hom_inv_id - · exact ((Types.presheafIso H (isoOver i (unop x))).app X).inv_hom_id + · exact ((Types.presheafIso (isoOver i (unop x))).app X).hom_inv_id + · exact ((Types.presheafIso (isoOver i (unop x))).app X).inv_hom_id -- porting note: Lean 4 proof is finished, Lean 3 needed `inferInstance` - haveI : IsIso (sheafHom H i.hom) := by apply NatIso.isIso_of_isIso_app - apply asIso (sheafHom H i.hom) -#align category_theory.cover_dense.presheaf_iso CategoryTheory.CoverDense.presheafIso + haveI : IsIso (sheafHom i.hom) := by apply NatIso.isIso_of_isIso_app + apply asIso (sheafHom i.hom) +#align category_theory.cover_dense.presheaf_iso CategoryTheory.Functor.IsCoverDense.presheafIso /-- Given a natural isomorphism `G ⋙ ℱ ≅ G ⋙ ℱ'` between presheaves of arbitrary category, where `G` is full and cover-dense, and `ℱ', ℱ` are sheaves, @@ -387,30 +393,30 @@ we may obtain a natural isomorphism between presheaves. -/ @[simps] noncomputable def sheafIso {ℱ ℱ' : Sheaf K A} (i : G.op ⋙ ℱ.val ≅ G.op ⋙ ℱ'.val) : ℱ ≅ ℱ' where - hom := ⟨(presheafIso H i).hom⟩ - inv := ⟨(presheafIso H i).inv⟩ + hom := ⟨(presheafIso i).hom⟩ + inv := ⟨(presheafIso i).inv⟩ hom_inv_id := by ext1 - apply (presheafIso H i).hom_inv_id + apply (presheafIso i).hom_inv_id inv_hom_id := by ext1 - apply (presheafIso H i).inv_hom_id -#align category_theory.cover_dense.sheaf_iso CategoryTheory.CoverDense.sheafIso + apply (presheafIso i).inv_hom_id +#align category_theory.cover_dense.sheaf_iso CategoryTheory.Functor.IsCoverDense.sheafIso /-- The constructed `sheafHom α` is equal to `α` when restricted onto `C`. -/ theorem sheafHom_restrict_eq (α : G.op ⋙ ℱ ⟶ G.op ⋙ ℱ'.val) : - whiskerLeft G.op (sheafHom H α) = α := by + whiskerLeft G.op (sheafHom α) = α := by ext X apply yoneda.map_injective ext U -- porting note: didn't need to provide the input to `image_preimage` in Lean 3 - erw [yoneda.image_preimage ((H.sheafYonedaHom α).app (G.op.obj X))] + erw [yoneda.image_preimage ((sheafYonedaHom α).app (G.op.obj X))] symm change (show (ℱ'.val ⋙ coyoneda.obj (op (unop U))).obj (op (G.obj (unop X))) from _) = _ - apply sheaf_eq_amalgamation ℱ' (H.is_cover _) + apply sheaf_eq_amalgamation ℱ' (G.is_cover_of_isCoverDense _ _) -- porting note: next line was not needed in mathlib3 - · exact (pushforwardFamily_compatible H _ _) + · exact (pushforwardFamily_compatible _ _) intro Y f hf conv_lhs => rw [← hf.some.fac] simp only [pushforwardFamily, Functor.comp_map, yoneda_map_app, coyoneda_obj_map, op_comp, @@ -422,54 +428,60 @@ theorem sheafHom_restrict_eq (α : G.op ⋙ ℱ ⟶ G.op ⋙ ℱ'.val) : symm apply α.naturality (G.preimage hf.some.map).op -- porting note; Lean 3 needed a random `inferInstance` for cleanup here; not necessary in lean 4 -#align category_theory.cover_dense.sheaf_hom_restrict_eq CategoryTheory.CoverDense.sheafHom_restrict_eq +#align category_theory.cover_dense.sheaf_hom_restrict_eq CategoryTheory.Functor.IsCoverDense.sheafHom_restrict_eq + +variable (G) /-- If the pullback map is obtained via whiskering, then the result `sheaf_hom (whisker_left G.op α)` is equal to `α`. -/ -theorem sheafHom_eq (α : ℱ ⟶ ℱ'.val) : sheafHom H (whiskerLeft G.op α) = α := by +theorem sheafHom_eq (α : ℱ ⟶ ℱ'.val) : sheafHom (whiskerLeft G.op α) = α := by ext X apply yoneda.map_injective -- porting note: deleted next line as it's not needed in Lean 4 ext U -- porting note: Lean 3 didn't need to be told the explicit input to image_preimage - erw [yoneda.image_preimage ((H.sheafYonedaHom (whiskerLeft G.op α)).app X)] + erw [yoneda.image_preimage ((sheafYonedaHom (whiskerLeft G.op α)).app X)] symm change (show (ℱ'.val ⋙ coyoneda.obj (op (unop U))).obj (op (unop X)) from _) = _ - apply sheaf_eq_amalgamation ℱ' (H.is_cover _) + apply sheaf_eq_amalgamation ℱ' (G.is_cover_of_isCoverDense _ _) -- porting note: next line was not needed in mathlib3 - · exact (pushforwardFamily_compatible H _ _) + · exact (pushforwardFamily_compatible _ _) intro Y f hf conv_lhs => rw [← hf.some.fac] dsimp simp -#align category_theory.cover_dense.sheaf_hom_eq CategoryTheory.CoverDense.sheafHom_eq +#align category_theory.cover_dense.sheaf_hom_eq CategoryTheory.Functor.IsCoverDense.sheafHom_eq + +variable {G} /-- A full and cover-dense functor `G` induces an equivalence between morphisms into a sheaf and morphisms over the restrictions via `G`. -/ noncomputable def restrictHomEquivHom : (G.op ⋙ ℱ ⟶ G.op ⋙ ℱ'.val) ≃ (ℱ ⟶ ℱ'.val) where - toFun := sheafHom H + toFun := sheafHom invFun := whiskerLeft G.op - left_inv := sheafHom_restrict_eq H - right_inv := sheafHom_eq H -#align category_theory.cover_dense.restrict_hom_equiv_hom CategoryTheory.CoverDense.restrictHomEquivHom + left_inv := sheafHom_restrict_eq + right_inv := sheafHom_eq _ +#align category_theory.cover_dense.restrict_hom_equiv_hom CategoryTheory.Functor.IsCoverDense.restrictHomEquivHom /-- Given a full and cover-dense functor `G` and a natural transformation of sheaves `α : ℱ ⟶ ℱ'`, if the pullback of `α` along `G` is iso, then `α` is also iso. -/ theorem iso_of_restrict_iso {ℱ ℱ' : Sheaf K A} (α : ℱ ⟶ ℱ') (i : IsIso (whiskerLeft G.op α.val)) : IsIso α := by - convert IsIso.of_iso (sheafIso H (asIso (whiskerLeft G.op α.val))) using 1 + convert IsIso.of_iso (sheafIso (asIso (whiskerLeft G.op α.val))) using 1 ext1 - apply (sheafHom_eq H _).symm -#align category_theory.cover_dense.iso_of_restrict_iso CategoryTheory.CoverDense.iso_of_restrict_iso + apply (sheafHom_eq _ _).symm +#align category_theory.cover_dense.iso_of_restrict_iso CategoryTheory.Functor.IsCoverDense.iso_of_restrict_iso + +variable (G K) /-- A fully faithful cover-dense functor preserves compatible families. -/ -theorem compatiblePreserving [Faithful G] : CompatiblePreserving K G := by +lemma compatiblePreserving [Faithful G] : CompatiblePreserving K G := by constructor intro ℱ Z T x hx Y₁ Y₂ X f₁ f₂ g₁ g₂ hg₁ hg₂ eq - apply H.ext + apply Functor.IsCoverDense.ext G intro W i simp only [← FunctorToTypes.map_comp_apply, ← op_comp] rw [← G.image_preimage (i ≫ f₁)] @@ -477,71 +489,61 @@ theorem compatiblePreserving [Faithful G] : CompatiblePreserving K G := by apply hx apply G.map_injective simp [eq] -#align category_theory.cover_dense.compatible_preserving CategoryTheory.CoverDense.compatiblePreserving +#align category_theory.cover_dense.compatible_preserving CategoryTheory.Functor.IsCoverDense.compatiblePreserving + +lemma isContinuous [Faithful G] (Hp : CoverPreserving J K G) : G.IsContinuous J K := + isContinuous_of_coverPreserving (compatiblePreserving K G) Hp -noncomputable instance Sites.Pullback.full [Faithful G] (Hp : CoverPreserving J K G) : - Full (Sites.pullback A H.compatiblePreserving Hp) where - preimage α := ⟨H.sheafHom α.val⟩ - witness α := Sheaf.Hom.ext _ _ <| H.sheafHom_restrict_eq α.val -#align category_theory.cover_dense.sites.pullback.full CategoryTheory.CoverDense.Sites.Pullback.full +noncomputable instance fullSheafPushforwardContinuous [G.IsContinuous J K] : + Full (G.sheafPushforwardContinuous A J K) where + preimage α := ⟨sheafHom α.val⟩ + witness α := Sheaf.Hom.ext _ _ <| sheafHom_restrict_eq α.val +#align category_theory.cover_dense.sites.pullback.full CategoryTheory.Functor.IsCoverDense.fullSheafPushforwardContinuous -instance Sites.Pullback.faithful [Faithful G] (Hp : CoverPreserving J K G) : - Faithful (Sites.pullback A H.compatiblePreserving Hp) where +instance faithful_sheafPushforwardContinuous [G.IsContinuous J K] : + Faithful (G.sheafPushforwardContinuous A J K) where map_injective := by intro ℱ ℱ' α β e ext1 apply_fun fun e => e.val at e - dsimp at e - rw [← H.sheafHom_eq α.val, ← H.sheafHom_eq β.val, e] -#align category_theory.cover_dense.sites.pullback.faithful CategoryTheory.CoverDense.Sites.Pullback.faithful + dsimp [sheafPushforwardContinuous] at e + rw [← sheafHom_eq G α.val, ← sheafHom_eq G β.val, e] +#align category_theory.cover_dense.sites.pullback.faithful CategoryTheory.Functor.IsCoverDense.faithful_sheafPushforwardContinuous -end CoverDense +end Functor.IsCoverDense end CategoryTheory -namespace CategoryTheory.CoverDense +namespace CategoryTheory.Functor.IsCoverDense open CategoryTheory variable {C D : Type u} [Category.{v} C] [Category.{v} D] -variable {G : C ⥤ D} [Full G] [Faithful G] +variable (G : C ⥤ D) [Full G] [Faithful G] -variable {J : GrothendieckTopology C} {K : GrothendieckTopology D} +variable (J : GrothendieckTopology C) (K : GrothendieckTopology D) variable {A : Type w} [Category.{max u v} A] [Limits.HasLimits A] -variable (Hd : CoverDense K G) (Hp : CoverPreserving J K G) (Hl : CoverLifting J K G) +variable [G.IsCoverDense K] [G.IsContinuous J K] [G.IsCocontinuous J K] + +instance (Y : Sheaf J A) : IsIso ((G.sheafAdjunctionCocontinuous A J K).counit.app Y) := by + let α := G.sheafAdjunctionCocontinuous A J K + haveI : IsIso ((sheafToPresheaf J A).map (α.counit.app Y)) := + IsIso.of_iso ((@asIso _ _ _ _ _ (Ran.reflective A G.op)).app Y.val) + apply ReflectsIsomorphisms.reflects (sheafToPresheaf J A) + +variable (A) /-- Given a functor between small sites that is cover-dense, cover-preserving, and cover-lifting, it induces an equivalence of category of sheaves valued in a complete category. -/ @[simps! functor inverse] -noncomputable def sheafEquivOfCoverPreservingCoverLifting : Sheaf J A ≌ Sheaf K A := by - symm - let α := Sites.pullbackCopullbackAdjunction.{w, v, u} A Hp Hl Hd.compatiblePreserving - have : ∀ X : Sheaf J A, IsIso (α.counit.app X) := by - intro ℱ - -- porting note: I don't know how to do `apply_with foo { instances := ff }` - -- so I just create the instance first - haveI : IsIso ((sheafToPresheaf J A).map (α.counit.app ℱ)) := - IsIso.of_iso ((@asIso _ _ _ _ _ (Ran.reflective A G.op)).app ℱ.val) - apply ReflectsIsomorphisms.reflects (sheafToPresheaf J A) - -- porting note: a bunch of instances are not synthesized in lean 4 for some reason - haveI : IsIso α.counit := NatIso.isIso_of_isIso_app _ - haveI : Full (Sites.pullback A Hd.compatiblePreserving Hp) := - CoverDense.Sites.Pullback.full J Hd Hp - haveI : Faithful (Sites.pullback A Hd.compatiblePreserving Hp) := - CoverDense.Sites.Pullback.faithful J Hd Hp - haveI : IsIso α.unit := CategoryTheory.unit_isIso_of_L_fully_faithful α - exact - { functor := Sites.pullback A Hd.compatiblePreserving Hp - inverse := Sites.copullback A Hl - unitIso := asIso α.unit - counitIso := asIso α.counit - functor_unitIso_comp := fun ℱ => by convert α.left_triangle_components } +noncomputable def sheafEquivOfCoverPreservingCoverLifting : Sheaf J A ≌ Sheaf K A := + (G.sheafAdjunctionCocontinuous A J K).toEquivalence.symm set_option linter.uppercaseLean3 false in -#align category_theory.cover_dense.Sheaf_equiv_of_cover_preserving_cover_lifting CategoryTheory.CoverDense.sheafEquivOfCoverPreservingCoverLifting +#align category_theory.cover_dense.Sheaf_equiv_of_cover_preserving_cover_lifting CategoryTheory.Functor.IsCoverDense.sheafEquivOfCoverPreservingCoverLifting variable [ConcreteCategory.{max v u} A] @@ -556,8 +558,8 @@ variable `sheafEquivOfCoverPreservingCoverLifting` with sheafification. -/ noncomputable abbrev sheafEquivOfCoverPreservingCoverLiftingSheafificationCompatibility : - (whiskeringLeft _ _ A).obj G.op ⋙ presheafToSheaf _ _ ≅ - presheafToSheaf _ _ ⋙ (sheafEquivOfCoverPreservingCoverLifting Hd Hp Hl).inverse := -Sites.pullbackSheafificationCompatibility _ _ Hl _ + (whiskeringLeft _ _ A).obj G.op ⋙ presheafToSheaf _ _ ≅ + presheafToSheaf _ _ ⋙ (sheafEquivOfCoverPreservingCoverLifting G J K A).inverse := by + apply Functor.pushforwardContinuousSheafificationCompatibility -end CategoryTheory.CoverDense +end CategoryTheory.Functor.IsCoverDense diff --git a/Mathlib/CategoryTheory/Sites/Grothendieck.lean b/Mathlib/CategoryTheory/Sites/Grothendieck.lean index 99d42586fb5d5..ad483f36f4d3d 100644 --- a/Mathlib/CategoryTheory/Sites/Grothendieck.lean +++ b/Mathlib/CategoryTheory/Sites/Grothendieck.lean @@ -306,7 +306,7 @@ instance : CompleteLattice (GrothendieckTopology C) := · intro X S hS rw [trivial_covering] at hS apply covering_of_eq_top _ hS - · refine' @CompleteLattice.bot_le _ (completeLatticeOfInf _ isGLB_sInf) (trivial C)) + · exact @CompleteLattice.bot_le _ (completeLatticeOfInf _ isGLB_sInf) (trivial C)) _ rfl _ rfl _ rfl sInf rfl instance : Inhabited (GrothendieckTopology C) := diff --git a/Mathlib/CategoryTheory/Sites/InducedTopology.lean b/Mathlib/CategoryTheory/Sites/InducedTopology.lean index 9cd1ed24d5c2d..4f6a8d31aa2c0 100644 --- a/Mathlib/CategoryTheory/Sites/InducedTopology.lean +++ b/Mathlib/CategoryTheory/Sites/InducedTopology.lean @@ -51,7 +51,7 @@ variable (A : Type v) [Category.{u} A] for each covering sieve `T` in `D`, `T ∩ mor(C)` generates a covering sieve in `D`. -/ def LocallyCoverDense (K : GrothendieckTopology D) (G : C ⥤ D) : Prop := - ∀ ⦃X⦄ (T : K (G.obj X)), (T.val.functorPullback G).functorPushforward G ∈ K (G.obj X) + ∀ ⦃X : C⦄ (T : K (G.obj X)), (T.val.functorPullback G).functorPushforward G ∈ K (G.obj X) #align category_theory.locally_cover_dense CategoryTheory.LocallyCoverDense namespace LocallyCoverDense @@ -98,9 +98,9 @@ def inducedTopology : GrothendieckTopology C where #align category_theory.locally_cover_dense.induced_topology CategoryTheory.LocallyCoverDense.inducedTopology /-- `G` is cover-lifting wrt the induced topology. -/ -theorem inducedTopology_coverLifting : CoverLifting Hld.inducedTopology K G := +theorem inducedTopology_isCocontinuous : G.IsCocontinuous Hld.inducedTopology K := ⟨@fun _ S hS => Hld ⟨S, hS⟩⟩ -#align category_theory.locally_cover_dense.induced_topology_cover_lifting CategoryTheory.LocallyCoverDense.inducedTopology_coverLifting +#align category_theory.locally_cover_dense.induced_topology_cover_lifting CategoryTheory.LocallyCoverDense.inducedTopology_isCocontinuous /-- `G` is cover-preserving wrt the induced topology. -/ theorem inducedTopology_coverPreserving : CoverPreserving Hld.inducedTopology K G := @@ -109,22 +109,26 @@ theorem inducedTopology_coverPreserving : CoverPreserving Hld.inducedTopology K end LocallyCoverDense -theorem CoverDense.locallyCoverDense [Full G] (H : CoverDense K G) : LocallyCoverDense K G := by +variable (G K) + +theorem Functor.locallyCoverDense_of_isCoverDense [Full G] [G.IsCoverDense K] : + LocallyCoverDense K G := by intro X T - refine' K.superset_covering _ (K.bind_covering T.property fun Y f _ => H.is_cover Y) + refine' K.superset_covering _ (K.bind_covering T.property + fun Y f _ => G.is_cover_of_isCoverDense _ Y) rintro Y _ ⟨Z, _, f, hf, ⟨W, g, f', rfl : _ = _⟩, rfl⟩ use W; use G.preimage (f' ≫ f); use g constructor simpa using T.val.downward_closed hf f' simp -#align category_theory.cover_dense.locally_cover_dense CategoryTheory.CoverDense.locallyCoverDense +#align category_theory.cover_dense.locally_cover_dense CategoryTheory.Functor.locallyCoverDense_of_isCoverDense /-- Given a fully faithful cover-dense functor `G : C ⥤ (D, K)`, we may induce a topology on `C`. -/ -abbrev CoverDense.inducedTopology [Full G] [Faithful G] (H : CoverDense K G) : +abbrev Functor.inducedTopologyOfIsCoverDense [Full G] [Faithful G] [G.IsCoverDense K] : GrothendieckTopology C := - H.locallyCoverDense.inducedTopology -#align category_theory.cover_dense.induced_topology CategoryTheory.CoverDense.inducedTopology + (G.locallyCoverDense_of_isCoverDense K).inducedTopology +#align category_theory.cover_dense.induced_topology CategoryTheory.Functor.inducedTopologyOfIsCoverDense variable (J) @@ -149,17 +153,27 @@ variable {J : GrothendieckTopology C} {K : GrothendieckTopology D} variable (A : Type u) [Category.{v} A] +instance [Full G] [Faithful G] [G.IsCoverDense K] : + Functor.IsContinuous G (G.inducedTopologyOfIsCoverDense K) K := by + apply Functor.IsCoverDense.isContinuous + exact (G.locallyCoverDense_of_isCoverDense K).inducedTopology_coverPreserving + +instance [Full G] [Faithful G] [G.IsCoverDense K] : + Functor.IsCocontinuous G (G.inducedTopologyOfIsCoverDense K) K := + (G.locallyCoverDense_of_isCoverDense K).inducedTopology_isCocontinuous + /-- Cover-dense functors induces an equivalence of categories of sheaves. This is known as the comparison lemma. It requires that the sites are small and the value category is complete. -/ -noncomputable def CoverDense.sheafEquiv [Full G] [Faithful G] (H : CoverDense K G) [HasLimits A] : - Sheaf H.inducedTopology A ≌ Sheaf K A := - H.sheafEquivOfCoverPreservingCoverLifting H.locallyCoverDense.inducedTopology_coverPreserving - H.locallyCoverDense.inducedTopology_coverLifting +noncomputable def Functor.sheafInducedTopologyEquivOfIsCoverDense [Full G] [Faithful G] + [G.IsCoverDense K] [HasLimits A] : + Sheaf (G.inducedTopologyOfIsCoverDense K) A ≌ Sheaf K A := + Functor.IsCoverDense.sheafEquivOfCoverPreservingCoverLifting G + (G.inducedTopologyOfIsCoverDense K) K A set_option linter.uppercaseLean3 false in -#align category_theory.cover_dense.Sheaf_equiv CategoryTheory.CoverDense.sheafEquiv +#align category_theory.cover_dense.Sheaf_equiv CategoryTheory.Functor.sheafInducedTopologyEquivOfIsCoverDense end SmallSite diff --git a/Mathlib/CategoryTheory/Sites/Over.lean b/Mathlib/CategoryTheory/Sites/Over.lean index d2411e66e6d64..cdeec8c09ebe1 100644 --- a/Mathlib/CategoryTheory/Sites/Over.lean +++ b/Mathlib/CategoryTheory/Sites/Over.lean @@ -124,22 +124,26 @@ lemma over_forget_coverPreserving (X : C) : CoverPreserving (J.over X) J (Over.forget X) where cover_preserve hS := hS -lemma over_forget_coverLifting (X : C) : - CoverLifting (J.over X) J (Over.forget X) where - cover_lift hS := J.overEquiv_symm_mem_over _ _ hS - lemma over_forget_compatiblePreserving (X : C) : CompatiblePreserving J (Over.forget X) where - Compatible {F Z T x hx Y₁ Y₂ W f₁ f₂ g₁ g₂ hg₁ hg₂ h} := by + compatible {F Z T x hx Y₁ Y₂ W f₁ f₂ g₁ g₂ hg₁ hg₂ h} := by let W' : Over X := Over.mk (f₁ ≫ Y₁.hom) let g₁' : W' ⟶ Y₁ := Over.homMk f₁ let g₂' : W' ⟶ Y₂ := Over.homMk f₂ (by simpa using h.symm =≫ Z.hom) exact hx g₁' g₂' hg₁ hg₂ (by ext; exact h) +instance (X : C) : (Over.forget X).IsCocontinuous (J.over X) J where + cover_lift hS := J.overEquiv_symm_mem_over _ _ hS + +instance (X : C) : (Over.forget X).IsContinuous (J.over X) J := + Functor.isContinuous_of_coverPreserving + (over_forget_compatiblePreserving J X) + (over_forget_coverPreserving J X) + /-- The pullback functor `Sheaf J A ⥤ Sheaf (J.over X) A` -/ abbrev overPullback (A : Type u') [Category.{v'} A] (X : C) : Sheaf J A ⥤ Sheaf (J.over X) A := - Sites.pullback A (J.over_forget_compatiblePreserving X) (J.over_forget_coverPreserving X) + (Over.forget X).sheafPushforwardContinuous _ _ _ end GrothendieckTopology diff --git a/Mathlib/CategoryTheory/Sites/Pushforward.lean b/Mathlib/CategoryTheory/Sites/Pullback.lean similarity index 60% rename from Mathlib/CategoryTheory/Sites/Pushforward.lean rename to Mathlib/CategoryTheory/Sites/Pullback.lean index 19ca10c355d08..0e6e0c423f34c 100644 --- a/Mathlib/CategoryTheory/Sites/Pushforward.lean +++ b/Mathlib/CategoryTheory/Sites/Pullback.lean @@ -9,12 +9,18 @@ import Mathlib.CategoryTheory.Sites.LeftExact #align_import category_theory.sites.pushforward from "leanprover-community/mathlib"@"e2e38c005fc6f715502490da6cb0ec84df9ed228" /-! -# Pushforward of sheaves +# Pullback of sheaves ## Main definitions -* `CategoryTheory.Sites.Pushforward`: the induced functor `Sheaf J A ⥤ Sheaf K A` for a -cover-preserving and compatible-preserving functor `G : (C, J) ⥤ (D, K)`. +* `CategoryTheory.Functor.sheafPullback`: the functor `Sheaf J A ⥤ Sheaf K A` obtained +as an extension of a functor `G : C ⥤ D` between the underlying categories. + +* `CategoryTheory.Functor.sheafAdjunctionContinuous`: the adjunction +`G.sheafPullback A J K ⊣ G.sheafPushforwardContinuous A J K` when the functor +`G` is continuous. In case `G` is representably flat, the pullback functor +on sheaves commutes with finite limits: this is a morphism of sites in the +sense of SGA 4 IV 4.9. -/ @@ -27,7 +33,7 @@ open CategoryTheory.Limits namespace CategoryTheory -variable {C : Type v₁} [SmallCategory C] {D : Type v₁} [SmallCategory D] +variable {C : Type v₁} [SmallCategory C] {D : Type v₁} [SmallCategory D] (G : C ⥤ D) variable (A : Type u₁) [Category.{v₁} A] @@ -48,25 +54,23 @@ attribute [local instance] reflectsLimitsOfReflectsIsomorphisms instance {X : C} : IsCofiltered (J.Cover X) := inferInstance -/-- The pushforward functor `Sheaf J A ⥤ Sheaf K A` associated to a functor `G : C ⥤ D` in the +/-- The pullback functor `Sheaf J A ⥤ Sheaf K A` associated to a functor `G : C ⥤ D` in the same direction as `G`. -/ @[simps!] -def Sites.pushforward (G : C ⥤ D) : Sheaf J A ⥤ Sheaf K A := +def Functor.sheafPullback : Sheaf J A ⥤ Sheaf K A := sheafToPresheaf J A ⋙ lan G.op ⋙ presheafToSheaf K A -#align category_theory.sites.pushforward CategoryTheory.Sites.pushforward +#align category_theory.sites.pushforward CategoryTheory.Functor.sheafPullback -instance (G : C ⥤ D) [RepresentablyFlat G] : PreservesFiniteLimits (Sites.pushforward A J K G) := by +instance [RepresentablyFlat G] : PreservesFiniteLimits (G.sheafPullback A J K) := by have : PreservesFiniteLimits (lan (Functor.op G) ⋙ presheafToSheaf K A) := compPreservesFiniteLimits _ _ apply compPreservesFiniteLimits -/-- The pushforward functor is left adjoint to the pullback functor. -/ -def Sites.pullbackPushforwardAdjunction {G : C ⥤ D} (hG₁ : CompatiblePreserving K G) - (hG₂ : CoverPreserving J K G) : Sites.pushforward A J K G ⊣ Sites.pullback A hG₁ hG₂ := +/-- The pullback functor is left adjoint to the pushforward functor. -/ +def Functor.sheafAdjunctionContinuous [Functor.IsContinuous.{v₁} G J K] : + G.sheafPullback A J K ⊣ G.sheafPushforwardContinuous A J K := ((Lan.adjunction A G.op).comp (sheafificationAdjunction K A)).restrictFullyFaithful - (sheafToPresheaf J A) (𝟭 _) - (NatIso.ofComponents fun _ => Iso.refl _) - (NatIso.ofComponents fun _ => Iso.refl _) -#align category_theory.sites.pullback_pushforward_adjunction CategoryTheory.Sites.pullbackPushforwardAdjunction + (sheafToPresheaf J A) (𝟭 _) (Iso.refl _) (Iso.refl _) +#align category_theory.sites.pullback_pushforward_adjunction CategoryTheory.Functor.sheafAdjunctionContinuous end CategoryTheory diff --git a/Mathlib/CategoryTheory/Sites/Sheafification.lean b/Mathlib/CategoryTheory/Sites/Sheafification.lean index 3017eb4fac1af..342a24eb8b7d9 100644 --- a/Mathlib/CategoryTheory/Sites/Sheafification.lean +++ b/Mathlib/CategoryTheory/Sites/Sheafification.lean @@ -5,7 +5,7 @@ Authors: Adam Topaz -/ import Mathlib.CategoryTheory.Adjunction.FullyFaithful import Mathlib.CategoryTheory.Sites.Plus -import Mathlib.CategoryTheory.Limits.ConcreteCategory +import Mathlib.CategoryTheory.Limits.Shapes.ConcreteCategory import Mathlib.CategoryTheory.ConcreteCategory.Elementwise #align_import category_theory.sites.sheafification from "leanprover-community/mathlib"@"70fd9563a21e7b963887c9360bd29b2393e6225a" diff --git a/Mathlib/CategoryTheory/UnivLE.lean b/Mathlib/CategoryTheory/UnivLE.lean index a7528afce20b1..d00b1736c21ae 100644 --- a/Mathlib/CategoryTheory/UnivLE.lean +++ b/Mathlib/CategoryTheory/UnivLE.lean @@ -10,7 +10,7 @@ import Mathlib.CategoryTheory.Types /-! # Universe inequalities and essential surjectivity of `uliftFunctor`. -We show `UnivLE.{u, v} ↔ EssSurj (uliftFunctor.{u, v} : Type v ⥤ Type max u v)`. +We show `UnivLE.{max u v, v} ↔ EssSurj (uliftFunctor.{u, v} : Type v ⥤ Type max u v)`. -/ set_option autoImplicit true @@ -20,27 +20,28 @@ open CategoryTheory noncomputable section theorem UnivLE.ofEssSurj.{u, v} (w : EssSurj (uliftFunctor.{u, v} : Type v ⥤ Type max u v)) : - UnivLE.{u, v} := - fun a => by - obtain ⟨a', ⟨m⟩⟩ := w.mem_essImage a + UnivLE.{max u v, v} := + fun α ↦ by + obtain ⟨a', ⟨m⟩⟩ := w.mem_essImage α exact ⟨a', ⟨(Iso.toEquiv m).symm.trans Equiv.ulift⟩⟩ -instance [UnivLE.{u, v}] : EssSurj (uliftFunctor.{u, v} : Type v ⥤ Type max u v) where +instance EssSurj.ofUnivLE [UnivLE.{max u v, v}] : + EssSurj (uliftFunctor.{u, v} : Type v ⥤ Type max u v) where mem_essImage α := ⟨Shrink α, ⟨Equiv.toIso (Equiv.ulift.trans (equivShrink α).symm)⟩⟩ theorem UnivLE_iff_essSurj.{u, v} : - UnivLE.{u, v} ↔ EssSurj (uliftFunctor.{u, v} : Type v ⥤ Type max u v) := + UnivLE.{max u v, v} ↔ EssSurj (uliftFunctor.{u, v} : Type v ⥤ Type max u v) := ⟨fun _ => inferInstance, fun w => UnivLE.ofEssSurj w⟩ -instance [UnivLE.{u, v}] : IsEquivalence uliftFunctor.{u, v} := +instance [UnivLE.{max u v, v}] : IsEquivalence uliftFunctor.{u, v} := Equivalence.ofFullyFaithfullyEssSurj uliftFunctor -def UnivLE.witness.{u, v} [UnivLE.{u, v}] : Type u ⥤ Type v := +def UnivLE.witness.{u, v} [UnivLE.{max u v, v}] : Type u ⥤ Type v := uliftFunctor.{v, u} ⋙ (uliftFunctor.{u, v}).inv -instance [UnivLE.{u, v}] : Faithful UnivLE.witness.{u, v} := +instance [UnivLE.{max u v, v}] : Faithful UnivLE.witness.{u, v} := inferInstanceAs <| Faithful (_ ⋙ _) -instance [UnivLE.{u, v}] : Full UnivLE.witness.{u, v} := +instance [UnivLE.{max u v, v}] : Full UnivLE.witness.{u, v} := inferInstanceAs <| Full (_ ⋙ _) diff --git a/Mathlib/Combinatorics/Colex.lean b/Mathlib/Combinatorics/Colex.lean index c7ad3d9d073c7..0edc2182ed773 100644 --- a/Mathlib/Combinatorics/Colex.lean +++ b/Mathlib/Combinatorics/Colex.lean @@ -1,423 +1,423 @@ /- Copyright (c) 2020 Bhavik Mehta. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. -Authors: Bhavik Mehta, Alena Gusakov +Authors: Bhavik Mehta, Alena Gusakov, Yaël Dillies -/ -import Mathlib.Data.Fintype.Basic import Mathlib.Algebra.GeomSum +import Mathlib.Data.Finset.Slice +import Mathlib.Order.SupClosed #align_import combinatorics.colex from "leanprover-community/mathlib"@"f7fc89d5d5ff1db2d1242c7bb0e9062ce47ef47c" /-! -# Colex +# Colexigraphic order -We define the colex ordering for finite sets, and give a couple of important -lemmas and properties relating to it. +We define the colex order for finite sets, and give a couple of important lemmas and properties +relating to it. -The colex ordering likes to avoid large values - it can be thought of on -`Finset ℕ` as the "binary" ordering. That is, order A based on -`∑_{i ∈ A} 2^i`. -It's defined here in a slightly more general way, requiring only `LT α` in -the definition of colex on `Finset α`. In the context of the Kruskal-Katona -theorem, we are interested in particular on how colex behaves for sets of a -fixed size. If the size is 3, colex on ℕ starts -123, 124, 134, 234, 125, 135, 235, 145, 245, 345, ... +The colex ordering likes to avoid large values: If the biggest element of `t` is bigger than all +elements of `s`, then `s < t`. + +In the special case of `ℕ`, it can be thought of as the "binary" ordering. That is, order `s` based +on $∑_{i ∈ s} 2^i$. It's defined here on `Finset α` for any linear order `α`. + +In the context of the Kruskal-Katona theorem, we are interested in how colex behaves for sets of a +fixed size. For example, for size 3, the colex order on ℕ starts +`012, 013, 023, 123, 014, 024, 124, 034, 134, 234, ...` ## Main statements -* `Colex.hom_lt_iff`: strictly monotone functions preserve colex + * Colex order properties - linearity, decidability and so on. -* `forall_lt_of_colex_lt_of_forall_lt`: if A < B in colex, and everything - in B is < t, then everything in A is < t. This confirms the idea that - an enumeration under colex will exhaust all sets using elements < t before - allowing t to be included. -* `sum_two_pow_le_iff_lt`: colex for α = ℕ is the same as binary - (this also proves binary expansions are unique) +* `Finset.Colex.forall_lt_mono`: if `s < t` in colex, and everything in `t` is `< a`, then + everything in `s` is `< a`. This confirms the idea that an enumeration under colex will exhaust + all sets using elements `< a` before allowing `a` to be included. +* `Finset.toColex_image_le_toColex_image`: Strictly monotone functions preserve colex. +* `Finset.geomSum_le_geomSum_iff_toColex_le_toColex`: Colex for α = ℕ is the same as binary. + This also proves binary expansions are unique. ## See also Related files are: * `Data.List.Lex`: Lexicographic order on lists. -* `Data.Pi.Lex`: Lexicographic order on `(i : α) → α i`. +* `Data.Pi.Lex`: Lexicographic order on `Πₗ i, α i`. * `Data.PSigma.Order`: Lexicographic order on `Σ' i, α i`. * `Data.Sigma.Order`: Lexicographic order on `Σ i, α i`. * `Data.Prod.Lex`: Lexicographic order on `α × β`. -## Tags -colex, colexicographic, binary +## TODO + +* Generalise `Colex.initSeg` so that it applies to `ℕ`. ## References + * https://github.com/b-mehta/maths-notes/blob/master/iii/mich/combinatorics.pdf +## Tags + +colex, colexicographic, binary -/ +open Finset Function +open scoped BigOperators -variable {α : Type*} +#align nat.sum_two_pow_lt Nat.geomSum_lt -open Finset -open BigOperators +variable {α β : Type*} -/-- We define this type synonym to refer to the colexicographic ordering on finsets -rather than the natural subset ordering. --/ -def Finset.Colex (α) := - Finset α --- Porting note: `deriving Inhabited` doesn't work -#align finset.colex Finset.Colex +namespace Finset -instance : Inhabited (Finset.Colex α) := inferInstanceAs (Inhabited (Finset α)) +/-- Type synonym of `Finset α` equipped with the colexicographic order rather than the inclusion +order. -/ +@[ext] +structure Colex (α) := + /-- `toColex` is the "identity" function between `Finset α` and `Finset.Colex α`. -/ + toColex :: + /-- `ofColex` is the "identity" function between `Finset.Colex α` and `Finset α`. -/ + (ofColex : Finset α) -/-- A convenience constructor to turn a `Finset α` into a `Finset.Colex α`, useful in order to -use the colex ordering rather than the subset ordering. --/ -def Finset.toColex {α} (s : Finset α) : Finset.Colex α := - s -#align finset.to_colex Finset.toColex +-- TODO: Why can't we export? +--export Colex (toColex) -@[simp] -theorem Colex.eq_iff (A B : Finset α) : A.toColex = B.toColex ↔ A = B := - Iff.rfl -#align colex.eq_iff Colex.eq_iff +open Colex -/-- `A` is less than `B` in the colex ordering if the largest thing that's not in both sets is in B. -In other words, `max (A ∆ B) ∈ B` (if the maximum exists). --/ -instance Colex.instLT [LT α] : LT (Finset.Colex α) := - ⟨fun A B : Finset α => ∃ k : α, (∀ {x}, k < x → (x ∈ A ↔ x ∈ B)) ∧ k ∉ A ∧ k ∈ B⟩ +instance : Inhabited (Colex α) := ⟨⟨∅⟩⟩ -/-- We can define (≤) in the obvious way. -/ -instance Colex.instLE [LT α] : LE (Finset.Colex α) := - ⟨fun A B => A < B ∨ A = B⟩ +@[simp] lemma toColex_ofColex (s : Colex α) : toColex (ofColex s) = s := rfl +lemma ofColex_toColex (s : Finset α) : ofColex (toColex s) = s := rfl +lemma toColex_inj {s t : Finset α} : toColex s = toColex t ↔ s = t := by simp +@[simp] +lemma ofColex_inj {s t : Colex α} : ofColex s = ofColex t ↔ s = t := by cases s; cases t; simp +lemma toColex_ne_toColex {s t : Finset α} : toColex s ≠ toColex t ↔ s ≠ t := by simp +lemma ofColex_ne_ofColex {s t : Colex α} : ofColex s ≠ ofColex t ↔ s ≠ t := by simp -theorem Colex.lt_def [LT α] (A B : Finset α) : - A.toColex < B.toColex ↔ ∃ k, (∀ {x}, k < x → (x ∈ A ↔ x ∈ B)) ∧ k ∉ A ∧ k ∈ B := - Iff.rfl -#align colex.lt_def Colex.lt_def +lemma toColex_injective : Injective (toColex : Finset α → Colex α) := fun _ _ ↦ toColex_inj.1 +lemma ofColex_injective : Injective (ofColex : Colex α → Finset α) := fun _ _ ↦ ofColex_inj.1 -theorem Colex.le_def [LT α] (A B : Finset α) : - A.toColex ≤ B.toColex ↔ A.toColex < B.toColex ∨ A = B := +namespace Colex +section PartialOrder +variable [PartialOrder α] [PartialOrder β] {f : α → β} {𝒜 𝒜₁ 𝒜₂ : Finset (Finset α)} + {s t u : Finset α} {a b : α} + +instance instLE : LE (Colex α) where + le s t := ∀ ⦃a⦄, a ∈ ofColex s → a ∉ ofColex t → ∃ b, b ∈ ofColex t ∧ b ∉ ofColex s ∧ a ≤ b + +-- TODO: This lemma is weirdly useful given how strange its statement is. +-- Is there a nicer statement? Should this lemma be made public? +private lemma trans_aux (hst : toColex s ≤ toColex t) (htu : toColex t ≤ toColex u) + (has : a ∈ s) (hat : a ∉ t) : ∃ b, b ∈ u ∧ b ∉ s ∧ a ≤ b := by + classical + let s' : Finset α := s.filter fun b ↦ b ∉ t ∧ a ≤ b + have ⟨b, hb, hbmax⟩ := exists_maximal s' ⟨a, by simp [has, hat]⟩ + simp only [mem_filter, and_imp] at hb hbmax + have ⟨c, hct, hcs, hbc⟩ := hst hb.1 hb.2.1 + by_cases hcu : c ∈ u + · exact ⟨c, hcu, hcs, hb.2.2.trans hbc⟩ + have ⟨d, hdu, hdt, hcd⟩ := htu hct hcu + have had : a ≤ d := hb.2.2.trans <| hbc.trans hcd + refine ⟨d, hdu, fun hds ↦ ?_, had⟩ + exact hbmax d hds hdt had <| hbc.trans_lt <| hcd.lt_of_ne <| ne_of_mem_of_not_mem hct hdt + +private lemma antisymm_aux (hst : toColex s ≤ toColex t) (hts : toColex t ≤ toColex s) : s ⊆ t := by + intro a has + by_contra' hat + have ⟨_b, hb₁, hb₂, _⟩ := trans_aux hst hts has hat + exact hb₂ hb₁ + +instance instPartialOrder : PartialOrder (Colex α) where + le_refl s a ha ha' := (ha' ha).elim + le_antisymm s t hst hts := Colex.ext _ _ <| (antisymm_aux hst hts).antisymm (antisymm_aux hts hst) + le_trans s t u hst htu a has hau := by + by_cases hat : a ∈ ofColex t + · have ⟨b, hbu, hbt, hab⟩ := htu hat hau + by_cases hbs : b ∈ ofColex s + · have ⟨c, hcu, hcs, hbc⟩ := trans_aux hst htu hbs hbt + exact ⟨c, hcu, hcs, hab.trans hbc⟩ + · exact ⟨b, hbu, hbs, hab⟩ + · exact trans_aux hst htu has hat + +lemma le_def {s t : Colex α} : + s ≤ t ↔ ∀ ⦃a⦄, a ∈ ofColex s → a ∉ ofColex t → ∃ b, b ∈ ofColex t ∧ b ∉ ofColex s ∧ a ≤ b := Iff.rfl -#align colex.le_def Colex.le_def - -/-- If everything in `A` is less than `k`, we can bound the sum of powers. -/ -theorem Nat.sum_two_pow_lt {k : ℕ} {A : Finset ℕ} (h₁ : ∀ {x}, x ∈ A → x < k) : - A.sum (Nat.pow 2) < 2 ^ k := by - apply lt_of_le_of_lt (sum_le_sum_of_subset fun t => mem_range.2 ∘ h₁) - have z := geom_sum_mul_add 1 k - rw [mul_one, one_add_one_eq_two] at z - rw [← z] - apply Nat.lt_succ_self -#align nat.sum_two_pow_lt Nat.sum_two_pow_lt -namespace Colex +lemma toColex_le_toColex : + toColex s ≤ toColex t ↔ ∀ ⦃a⦄, a ∈ s → a ∉ t → ∃ b, b ∈ t ∧ b ∉ s ∧ a ≤ b := Iff.rfl + +lemma toColex_lt_toColex : + toColex s < toColex t ↔ s ≠ t ∧ ∀ ⦃a⦄, a ∈ s → a ∉ t → ∃ b, b ∈ t ∧ b ∉ s ∧ a ≤ b := by + simp [lt_iff_le_and_ne, toColex_le_toColex, and_comm] + +/-- If `s ⊆ t`, then `s ≤ t` in the colex order. Note the converse does not hold, as inclusion does +not form a linear order. -/ +lemma toColex_mono : Monotone (toColex : Finset α → Colex α) := + fun _s _t hst _a has hat ↦ (hat <| hst has).elim + +/-- If `s ⊂ t`, then `s < t` in the colex order. Note the converse does not hold, as inclusion does +not form a linear order. -/ +lemma toColex_strictMono : StrictMono (toColex : Finset α → Colex α) := + toColex_mono.strictMono_of_injective toColex_injective + +/-- If `s ⊆ t`, then `s ≤ t` in the colex order. Note the converse does not hold, as inclusion does +not form a linear order. -/ +lemma toColex_le_toColex_of_subset (h : s ⊆ t) : toColex s ≤ toColex t := toColex_mono h + +/-- If `s ⊂ t`, then `s < t` in the colex order. Note the converse does not hold, as inclusion does +not form a linear order. -/ +lemma toColex_lt_toColex_of_ssubset (h : s ⊂ t) : toColex s < toColex t := toColex_strictMono h + +instance instOrderBot : OrderBot (Colex α) where + bot := toColex ∅ + bot_le s a ha := by cases ha + +@[simp] lemma toColex_empty : toColex (∅ : Finset α) = ⊥ := rfl +@[simp] lemma ofColex_bot : ofColex (⊥ : Colex α) = ∅ := rfl + +/-- If `s ≤ t` in colex, and all elements in `t` are small, then all elements in `s` are small. -/ +lemma forall_le_mono (hst : toColex s ≤ toColex t) (ht : ∀ b ∈ t, b ≤ a) : ∀ b ∈ s, b ≤ a := by + rintro b hb + by_cases b ∈ t + · exact ht _ ‹_› + · obtain ⟨c, hct, -, hbc⟩ := hst hb ‹_› + exact hbc.trans <| ht _ hct + +/-- If `s ≤ t` in colex, and all elements in `t` are small, then all elements in `s` are small. -/ +lemma forall_lt_mono (hst : toColex s ≤ toColex t) (ht : ∀ b ∈ t, b < a) : ∀ b ∈ s, b < a := by + rintro b hb + by_cases b ∈ t + · exact ht _ ‹_› + · obtain ⟨c, hct, -, hbc⟩ := hst hb ‹_› + exact hbc.trans_lt <| ht _ hct + +/-- `s ≤ {a}` in colex iff all elements of `s` are strictly less than `a`, except possibly `a` in +which case `s = {a}`. -/ +lemma toColex_le_singleton : toColex s ≤ toColex {a} ↔ ∀ b ∈ s, b ≤ a ∧ (a ∈ s → b = a) := by + simp only [toColex_le_toColex, mem_singleton, and_assoc, exists_eq_left] + refine forall₂_congr fun b _ ↦ ?_; obtain rfl | hba := eq_or_ne b a <;> aesop + +/-- `s < {a}` in colex iff all elements of `s` are strictly less than `a`. -/ +lemma toColex_lt_singleton : toColex s < toColex {a} ↔ ∀ b ∈ s, b < a := by + rw [lt_iff_le_and_ne, toColex_le_singleton, toColex_ne_toColex] + refine ⟨fun h b hb ↦ (h.1 _ hb).1.lt_of_ne ?_, + fun h ↦ ⟨fun b hb ↦ ⟨(h _ hb).le, fun ha ↦ (lt_irrefl _ <| h _ ha).elim⟩, ?_⟩⟩ <;> rintro rfl + · refine h.2 <| eq_singleton_iff_unique_mem.2 ⟨hb, fun c hc ↦ (h.1 _ hc).2 hb⟩ + · simp at h + +/-- `{a} ≤ s` in colex iff `s` contains an element greated than or equal to `a`. -/ +lemma singleton_le_toColex : (toColex {a} : Colex α) ≤ toColex s ↔ ∃ x ∈ s, a ≤ x := by + simp [toColex_le_toColex]; by_cases a ∈ s <;> aesop + +/-- Colex is an extension of the base order. -/ +lemma singleton_le_singleton : (toColex {a} : Colex α) ≤ toColex {b} ↔ a ≤ b := by + simp [toColex_le_singleton, eq_comm] + +/-- Colex is an extension of the base order. -/ +lemma singleton_lt_singleton : (toColex {a} : Colex α) < toColex {b} ↔ a < b := by + simp [toColex_lt_singleton] + +variable [DecidableEq α] + +instance instDecidableEq : DecidableEq (Colex α) := fun s t ↦ + decidable_of_iff' (s.ofColex = t.ofColex) <| Colex.ext_iff _ _ + +instance instDecidableLE [@DecidableRel α (· ≤ ·)] : @DecidableRel (Colex α) (· ≤ ·) := fun s t ↦ + decidable_of_iff' + (∀ ⦃a⦄, a ∈ ofColex s → a ∉ ofColex t → ∃ b, b ∈ ofColex t ∧ b ∉ ofColex s ∧ a ≤ b) Iff.rfl + +instance instDecidableLT [@DecidableRel α (· ≤ ·)] : @DecidableRel (Colex α) (· < ·) := + decidableLTOfDecidableLE + +lemma le_iff_sdiff_subset_lowerClosure {s t : Colex α} : + s ≤ t ↔ (ofColex s : Set α) \ ofColex t ⊆ lowerClosure (ofColex t \ ofColex s : Set α) := by + simp [le_def, Set.subset_def, and_assoc] + +/-- The colexigraphic order is insensitive to removing the same elements from both sets. -/ +lemma toColex_sdiff_le_toColex_sdiff (hus : u ⊆ s) (hut : u ⊆ t) : + toColex (s \ u) ≤ toColex (t \ u) ↔ toColex s ≤ toColex t := by + simp_rw [toColex_le_toColex, ←and_imp, ←and_assoc, ←mem_sdiff, sdiff_sdiff_sdiff_cancel_right hus, + sdiff_sdiff_sdiff_cancel_right hut] + +/-- The colexigraphic order is insensitive to removing the same elements from both sets. -/ +lemma toColex_sdiff_lt_toColex_sdiff (hus : u ⊆ s) (hut : u ⊆ t) : + toColex (s \ u) < toColex (t \ u) ↔ toColex s < toColex t := + lt_iff_lt_of_le_iff_le' (toColex_sdiff_le_toColex_sdiff hut hus) <| + toColex_sdiff_le_toColex_sdiff hus hut + +@[simp] lemma toColex_sdiff_le_toColex_sdiff' : + toColex (s \ t) ≤ toColex (t \ s) ↔ toColex s ≤ toColex t := by + simpa using toColex_sdiff_le_toColex_sdiff (inter_subset_left s t) (inter_subset_right s t) + +@[simp] lemma toColex_sdiff_lt_toColex_sdiff' : + toColex (s \ t) < toColex (t \ s) ↔ toColex s < toColex t := by + simpa using toColex_sdiff_lt_toColex_sdiff (inter_subset_left s t) (inter_subset_right s t) + +end PartialOrder + +variable [LinearOrder α] [LinearOrder β] {f : α → β} {𝒜 𝒜₁ 𝒜₂ : Finset (Finset α)} + {s t u : Finset α} {a b : α} {r : ℕ} + +instance instLinearOrder : LinearOrder (Colex α) where + le_total s t := by + classical + obtain rfl | hts := eq_or_ne t s + · simp + have ⟨a, ha, hamax⟩ := exists_max_image _ id (symmDiff_nonempty.2 <| ofColex_ne_ofColex.2 hts) + simp_rw [mem_symmDiff] at ha hamax + exact ha.imp (fun ha b hbs hbt ↦ ⟨a, ha.1, ha.2, hamax _ <| Or.inr ⟨hbs, hbt⟩⟩) + (fun ha b hbt hbs ↦ ⟨a, ha.1, ha.2, hamax _ <| Or.inl ⟨hbt, hbs⟩⟩) + decidableLE := instDecidableLE + decidableLT := instDecidableLT + +private lemma max_mem_aux {s t : Colex α} (hst : s ≠ t) : (ofColex s ∆ ofColex t).Nonempty := by + simpa + +lemma toColex_lt_toColex_iff_exists_forall_lt : + toColex s < toColex t ↔ ∃ a ∈ t, a ∉ s ∧ ∀ b ∈ s, b ∉ t → b < a := by + rw [←not_le, toColex_le_toColex, not_forall] + simp only [not_forall, not_exists, not_and, not_le, exists_prop, exists_and_left] + +lemma lt_iff_exists_forall_lt {s t : Colex α} : + s < t ↔ ∃ a ∈ ofColex t, a ∉ ofColex s ∧ ∀ b ∈ ofColex s, b ∉ ofColex t → b < a := + toColex_lt_toColex_iff_exists_forall_lt + +lemma toColex_le_toColex_iff_max'_mem : + toColex s ≤ toColex t ↔ ∀ hst : s ≠ t, (s ∆ t).max' (symmDiff_nonempty.2 hst) ∈ t := by + refine ⟨fun h hst ↦ ?_, fun h a has hat ↦ ?_⟩ + · set m := (s ∆ t).max' (symmDiff_nonempty.2 hst) + by_contra hmt + have hms : m ∈ s := by simpa [mem_symmDiff, hmt] using max'_mem _ <| symmDiff_nonempty.2 hst + have ⟨b, hbt, hbs, hmb⟩ := h hms hmt + exact lt_irrefl _ <| (max'_lt_iff _ _).1 (hmb.lt_of_ne <| ne_of_mem_of_not_mem hms hbs) _ <| + mem_symmDiff.2 <| Or.inr ⟨hbt, hbs⟩ + · have hst : s ≠ t := ne_of_mem_of_not_mem' has hat + refine ⟨_, h hst, ?_, le_max' _ _ <| mem_symmDiff.2 <| Or.inl ⟨has, hat⟩⟩ + simpa [mem_symmDiff, h hst] using max'_mem _ <| symmDiff_nonempty.2 hst + +lemma le_iff_max'_mem {s t : Colex α} : + s ≤ t ↔ ∀ h : s ≠ t, (ofColex s ∆ ofColex t).max' (max_mem_aux h) ∈ ofColex t := + toColex_le_toColex_iff_max'_mem.trans + ⟨fun h hst ↦ h <| ofColex_ne_ofColex.2 hst, fun h hst ↦ h <| ofColex_ne_ofColex.1 hst⟩ + +lemma toColex_lt_toColex_iff_max'_mem : + toColex s < toColex t ↔ ∃ hst : s ≠ t, (s ∆ t).max' (symmDiff_nonempty.2 hst) ∈ t := by + rw [lt_iff_le_and_ne, toColex_le_toColex_iff_max'_mem]; aesop + +lemma lt_iff_max'_mem {s t : Colex α} : + s < t ↔ ∃ h : s ≠ t, (ofColex s ∆ ofColex t).max' (max_mem_aux h) ∈ ofColex t := by + rw [lt_iff_le_and_ne, le_iff_max'_mem]; aesop /-- Strictly monotone functions preserve the colex ordering. -/ -theorem hom_lt_iff {β : Type*} [LinearOrder α] [DecidableEq β] [Preorder β] {f : α → β} - (h₁ : StrictMono f) (A B : Finset α) : - (A.image f).toColex < (B.image f).toColex ↔ A.toColex < B.toColex := by - simp only [Colex.lt_def, not_exists, mem_image, exists_prop, not_and] - constructor - · rintro ⟨k, z, q, k', _, rfl⟩ - exact - ⟨k', @fun x hx => by - simpa [h₁.injective.eq_iff] using z (h₁ hx), fun t => q _ t rfl, ‹k' ∈ B›⟩ - rintro ⟨k, z, ka, _⟩ - refine' ⟨f k, @fun x hx => _, _, k, ‹k ∈ B›, rfl⟩ - · constructor - any_goals - rintro ⟨x', hx', rfl⟩ - refine' ⟨x', _, rfl⟩ - first |rwa [← z _]|rwa [z _] - rwa [StrictMono.lt_iff_lt h₁] at hx - · simp only [h₁.injective, Function.Injective.eq_iff] - exact fun x hx => ne_of_mem_of_not_mem hx ka -#align colex.hom_lt_iff Colex.hom_lt_iff - -/-- A special case of `Colex.hom_lt_iff` which is sometimes useful. -/ -@[simp] -theorem hom_fin_lt_iff {n : ℕ} (A B : Finset (Fin n)) : - (A.image fun i : Fin n => (i : ℕ)).toColex < (B.image fun i : Fin n => (i : ℕ)).toColex ↔ - A.toColex < B.toColex := by - refine' Colex.hom_lt_iff _ _ _ - exact (fun x y k => k) -#align colex.hom_fin_lt_iff Colex.hom_fin_lt_iff - -instance [LT α] : IsIrrefl (Finset.Colex α) (· < ·) := - ⟨fun _ h => Exists.elim h fun _ ⟨_, a, b⟩ => a b⟩ - -@[trans] -theorem lt_trans [LinearOrder α] {a b c : Finset.Colex α} : a < b → b < c → a < c := by - rintro ⟨k₁, k₁z, notinA, inB⟩ ⟨k₂, k₂z, notinB, inC⟩ - cases' lt_or_gt_of_ne (ne_of_mem_of_not_mem inB notinB) with h h - · refine' ⟨k₂, @fun x hx => _, _, inC⟩ - rw [← k₂z hx] - apply k₁z (Trans.trans h hx) - rwa [k₁z h] - · refine' ⟨k₁, @fun x hx => _, notinA, by rwa [← k₂z h]⟩ - rw [k₁z hx] - apply k₂z (Trans.trans h hx) -#align colex.lt_trans Colex.lt_trans - -@[trans] -theorem le_trans [LinearOrder α] (a b c : Finset.Colex α) : a ≤ b → b ≤ c → a ≤ c := fun AB BC => - AB.elim (fun k => BC.elim (fun t => Or.inl (lt_trans k t)) fun t => t ▸ AB) fun k => k.symm ▸ BC -#align colex.le_trans Colex.le_trans - -instance [LinearOrder α] : IsTrans (Finset.Colex α) (· < ·) := - ⟨fun _ _ _ => Colex.lt_trans⟩ - -theorem lt_trichotomy [LinearOrder α] (A B : Finset.Colex α) : A < B ∨ A = B ∨ B < A := by - by_cases h₁ : A = B - · tauto - have h : Finset.Nonempty (A \ B ∪ B \ A) := by - rw [nonempty_iff_ne_empty] - intro a - simp only [union_eq_empty, sdiff_eq_empty_iff_subset] at a - apply h₁ (Subset.antisymm a.1 a.2) - rcases exists_max_image (A \ B ∪ B \ A) id h with ⟨k, ⟨hk, z⟩⟩ - · simp only [mem_union, mem_sdiff] at hk - cases' hk with hk hk - · right - right - refine' ⟨k, @fun t th => _, hk.2, hk.1⟩ - specialize z t - by_contra h₂ - simp only [mem_union, mem_sdiff, id.def] at z - rw [not_iff, iff_iff_and_or_not_and_not, not_not, and_comm] at h₂ - apply not_le_of_lt th (z h₂) - · left - refine' ⟨k, @fun t th => _, hk.2, hk.1⟩ - specialize z t - by_contra h₃ - simp only [mem_union, mem_sdiff, id.def] at z - rw [not_iff, iff_iff_and_or_not_and_not, not_not, and_comm, or_comm] at h₃ - apply not_le_of_lt th (z h₃) -#align colex.lt_trichotomy Colex.lt_trichotomy - -instance [LinearOrder α] : IsTrichotomous (Finset.Colex α) (· < ·) := - ⟨lt_trichotomy⟩ - -instance decidableLt [LinearOrder α] : ∀ {A B : Finset.Colex α}, Decidable (A < B) := - show ∀ {A B : Finset α}, Decidable (A.toColex < B.toColex) from @fun A B => - decidable_of_iff' (∃ k ∈ B, (∀ x ∈ A ∪ B, k < x → (x ∈ A ↔ x ∈ B)) ∧ k ∉ A) - (by - rw [Colex.lt_def] - apply exists_congr - simp only [mem_union, exists_prop, or_imp, and_comm (a := _ ∈ B), and_assoc] - intro k - refine' and_congr_left' (forall_congr' _) - tauto) -#align colex.decidable_lt Colex.decidableLt - -instance [LinearOrder α] : LinearOrder (Finset.Colex α) := - { instLT, - instLE with - le_refl := fun A => Or.inr rfl - le_trans := le_trans - le_antisymm := fun A B AB BA => - AB.elim (fun k => BA.elim (fun t => (asymm k t).elim) fun t => t.symm) id - le_total := fun A B => - (lt_trichotomy A B).elim3 (Or.inl ∘ Or.inl) (Or.inl ∘ Or.inr) (Or.inr ∘ Or.inl) - -- Porting note: we must give some hints for instances - decidableLE := by - letI : DecidableEq (Finset.Colex α) := inferInstanceAs (DecidableEq (Finset α)) - exact fun A B => inferInstanceAs (Decidable (A < B ∨ A = B)) - decidableLT := inferInstance - decidableEq := inferInstanceAs (DecidableEq (Finset α)) - lt_iff_le_not_le := fun A B => by - constructor - · intro t - refine' ⟨Or.inl t, _⟩ - rintro (i | rfl) - · apply asymm_of _ t i - · apply irrefl _ t - rintro ⟨h₁ | rfl, h₂⟩ - · apply h₁ - apply h₂.elim (Or.inr rfl) } - -/-- The instances set up let us infer that `(· < ·)` is a strict total order. -/ -example [LinearOrder α] : IsStrictTotalOrder (Finset.Colex α) (· < ·) := - inferInstance +lemma toColex_image_le_toColex_image (hf : StrictMono f) : + toColex (s.image f) ≤ toColex (t.image f) ↔ toColex s ≤ toColex t := by + simp [toColex_le_toColex, hf.le_iff_le, hf.injective.eq_iff] /-- Strictly monotone functions preserve the colex ordering. -/ -theorem hom_le_iff {β : Type*} [LinearOrder α] [LinearOrder β] {f : α → β} (h₁ : StrictMono f) - (A B : Finset α) : (A.image f).toColex ≤ (B.image f).toColex ↔ A.toColex ≤ B.toColex := by - rw [le_iff_le_iff_lt_iff_lt, hom_lt_iff h₁] -#align colex.hom_le_iff Colex.hom_le_iff +lemma toColex_image_lt_toColex_image (hf : StrictMono f) : + toColex (s.image f) < toColex (t.image f) ↔ toColex s < toColex t := + lt_iff_lt_of_le_iff_le <| toColex_image_le_toColex_image hf --- Porting note: fixed the doc -/-- A special case of `hom_le_iff` which is sometimes useful. -/ -@[simp] -theorem hom_fin_le_iff {n : ℕ} (A B : Finset (Fin n)) : - (A.image fun i : Fin n => (i : ℕ)).toColex ≤ (B.image fun i : Fin n => (i : ℕ)).toColex ↔ - A.toColex ≤ B.toColex := - Colex.hom_le_iff Fin.val_strictMono _ _ -#align colex.hom_fin_le_iff Colex.hom_fin_le_iff +lemma toColex_image_ofColex_strictMono (hf : StrictMono f) : + StrictMono fun s ↦ toColex $ image f $ ofColex s := + fun _s _t ↦ (toColex_image_lt_toColex_image hf).2 + +/-! ### Initial segments -/ + +/-- `𝒜` is an initial segment of the colexigraphic order on sets of `r`, and that if `t` is below +`s` in colex where `t` has size `r` and `s` is in `𝒜`, then `t` is also in `𝒜`. In effect, `𝒜` is +downwards closed with respect to colex among sets of size `r`. -/ +def IsInitSeg (𝒜 : Finset (Finset α)) (r : ℕ) : Prop := + (𝒜 : Set (Finset α)).Sized r ∧ + ∀ ⦃s t : Finset α⦄, s ∈ 𝒜 → toColex t < toColex s ∧ t.card = r → t ∈ 𝒜 + +@[simp] lemma isInitSeg_empty : IsInitSeg (∅ : Finset (Finset α)) r := by simp [IsInitSeg] -/-- If `A` is before `B` in colex, and everything in `B` is small, then everything in `A` is small. +/-- Initial segments are nested in some way. In particular, if they're the same size they're equal. -/ -theorem forall_lt_of_colex_lt_of_forall_lt [LinearOrder α] {A B : Finset α} (t : α) - (h₁ : A.toColex < B.toColex) (h₂ : ∀ x ∈ B, x < t) : ∀ x ∈ A, x < t := by - rw [Colex.lt_def] at h₁ - rcases h₁ with ⟨k, z, _, _⟩ - intro x hx - apply lt_of_not_ge - intro a - refine' not_lt_of_ge a (h₂ x _) - rwa [← z] - apply lt_of_lt_of_le (h₂ k ‹_›) a -#align colex.forall_lt_of_colex_lt_of_forall_lt Colex.forall_lt_of_colex_lt_of_forall_lt - -/-- `s.toColex < {r}.toColex` iff all elements of `s` are less than `r`. -/ -theorem lt_singleton_iff_mem_lt [LinearOrder α] {r : α} {s : Finset α} : - s.toColex < ({r} : Finset α).toColex ↔ ∀ x ∈ s, x < r := by - simp only [lt_def, mem_singleton, ← and_assoc, exists_eq_right] - constructor - · intro t x hx - rw [← not_le] - intro h - rcases lt_or_eq_of_le h with (h₁ | rfl) - · exact ne_of_irrefl h₁ ((t.1 h₁).1 hx).symm - · exact t.2 hx - · exact fun h => - ⟨fun {z} hz => ⟨fun i => (asymm hz (h _ i)).elim, fun i => (hz.ne' i).elim⟩, - by simpa using h r⟩ -#align colex.lt_singleton_iff_mem_lt Colex.lt_singleton_iff_mem_lt - --- Porting note: fixed the doc -/-- If `{r}` is less than or equal to s in the colexicographical sense, - then s contains an element greater than or equal to r. -/ -theorem mem_le_of_singleton_le [LinearOrder α] {r : α} {s : Finset α} : - ({r} : Finset α).toColex ≤ s.toColex ↔ ∃ x ∈ s, r ≤ x := by - simp only [← not_lt] - simp [lt_singleton_iff_mem_lt] -#align colex.mem_le_of_singleton_le Colex.mem_le_of_singleton_le - -/-- Colex is an extension of the base ordering on α. -/ -theorem singleton_lt_iff_lt [LinearOrder α] {r s : α} : - ({r} : Finset α).toColex < ({s} : Finset α).toColex ↔ r < s := by simp [lt_singleton_iff_mem_lt] -#align colex.singleton_lt_iff_lt Colex.singleton_lt_iff_lt - -/-- Colex is an extension of the base ordering on α. -/ -theorem singleton_le_iff_le [LinearOrder α] {r s : α} : - ({r} : Finset α).toColex ≤ ({s} : Finset α).toColex ↔ r ≤ s := by - rw [le_iff_le_iff_lt_iff_lt, singleton_lt_iff_lt] -#align colex.singleton_le_iff_le Colex.singleton_le_iff_le - -/-- Colex doesn't care if you remove the other set -/ -@[simp] -theorem sdiff_lt_sdiff_iff_lt [LT α] [DecidableEq α] (A B : Finset α) : - (A \ B).toColex < (B \ A).toColex ↔ A.toColex < B.toColex := by - rw [Colex.lt_def, Colex.lt_def] - apply exists_congr - intro k - simp only [mem_sdiff, not_and, not_not] - constructor - · rintro ⟨z, kAB, kB, kA⟩ - refine' ⟨_, kA, kB⟩ - · intro x hx - specialize z hx - tauto - · rintro ⟨z, kA, kB⟩ - refine' ⟨_, fun _ => kB, kB, kA⟩ - intro x hx - rw [z hx] -#align colex.sdiff_lt_sdiff_iff_lt Colex.sdiff_lt_sdiff_iff_lt - -/-- Colex doesn't care if you remove the other set -/ -@[simp] -theorem sdiff_le_sdiff_iff_le [LinearOrder α] (A B : Finset α) : - (A \ B).toColex ≤ (B \ A).toColex ↔ A.toColex ≤ B.toColex := by - rw [le_iff_le_iff_lt_iff_lt, sdiff_lt_sdiff_iff_lt] -#align colex.sdiff_le_sdiff_iff_le Colex.sdiff_le_sdiff_iff_le - -theorem empty_toColex_lt [LinearOrder α] {A : Finset α} (hA : A.Nonempty) : - (∅ : Finset α).toColex < A.toColex := by - rw [Colex.lt_def] - refine' ⟨max' _ hA, _, by simp, max'_mem _ _⟩ - simp only [false_iff_iff, not_mem_empty] - intro x hx t - apply not_le_of_lt hx (le_max' _ _ t) -#align colex.empty_to_colex_lt Colex.empty_toColex_lt - -/-- If `A ⊂ B`, then `A` is less than `B` in the colex order. Note the converse does not hold, as -`⊆` is not a linear order. -/ -theorem colex_lt_of_ssubset [LinearOrder α] {A B : Finset α} (h : A ⊂ B) : - A.toColex < B.toColex := by - rw [← sdiff_lt_sdiff_iff_lt, sdiff_eq_empty_iff_subset.2 h.1] - exact empty_toColex_lt (by simpa [Finset.Nonempty] using exists_of_ssubset h) -#align colex.colex_lt_of_ssubset Colex.colex_lt_of_ssubset +lemma IsInitSeg.total (h₁ : IsInitSeg 𝒜₁ r) (h₂ : IsInitSeg 𝒜₂ r) : 𝒜₁ ⊆ 𝒜₂ ∨ 𝒜₂ ⊆ 𝒜₁ := by + classical + simp_rw [←sdiff_eq_empty_iff_subset, ←not_nonempty_iff_eq_empty] + by_contra' h + have ⟨⟨s, hs⟩, t, ht⟩ := h + rw [mem_sdiff] at hs ht + obtain hst | hst | hts := trichotomous_of (α := Colex α) (· < ·) (toColex s) (toColex t) + · exact hs.2 <| h₂.2 ht.1 ⟨hst, h₁.1 hs.1⟩ + · simp only [toColex.injEq] at hst + exact ht.2 <| hst ▸ hs.1 + · exact ht.2 <| h₁.2 hs.1 ⟨hts, h₂.1 ht.1⟩ + +variable [Fintype α] + +/-- The initial segment of the colexicographic order on sets with `s.card` elements and ending at +`s`. -/ +def initSeg (s : Finset α) : Finset (Finset α) := + univ.filter fun t ↦ s.card = t.card ∧ toColex t ≤ toColex s @[simp] -theorem empty_toColex_le [LinearOrder α] {A : Finset α} : (∅ : Finset α).toColex ≤ A.toColex := by - rcases A.eq_empty_or_nonempty with (rfl | hA) - · simp - · apply (empty_toColex_lt hA).le -#align colex.empty_to_colex_le Colex.empty_toColex_le - -/-- If `A ⊆ B`, then `A ≤ B` in the colex order. Note the converse does not hold, as `⊆` is not a -linear order. -/ -theorem colex_le_of_subset [LinearOrder α] {A B : Finset α} (h : A ⊆ B) : - A.toColex ≤ B.toColex := by - rw [← sdiff_le_sdiff_iff_le, sdiff_eq_empty_iff_subset.2 h] - apply empty_toColex_le -#align colex.colex_le_of_subset Colex.colex_le_of_subset - -/-- The function from finsets to finsets with the colex order is a relation homomorphism. -/ -@[simps] -def toColexRelHom [LinearOrder α] : - ((· ⊆ ·) : Finset α → Finset α → Prop) →r ((· ≤ ·) : Finset.Colex α → Finset.Colex α → Prop) - where - toFun := Finset.toColex - map_rel' {_ _} := colex_le_of_subset -#align colex.to_colex_rel_hom Colex.toColexRelHom - -instance [LinearOrder α] : OrderBot (Finset.Colex α) where - bot := (∅ : Finset α).toColex - bot_le _ := empty_toColex_le - -instance [LinearOrder α] [Fintype α] : OrderTop (Finset.Colex α) where - top := Finset.univ.toColex - le_top _ := colex_le_of_subset (subset_univ _) - -instance [LinearOrder α] : Lattice (Finset.Colex α) := - { inferInstanceAs (SemilatticeSup (Finset.Colex α)), - inferInstanceAs (SemilatticeInf (Finset.Colex α)) with } - -instance [LinearOrder α] [Fintype α] : BoundedOrder (Finset.Colex α) := - { inferInstanceAs (OrderTop (Finset.Colex α)), - inferInstanceAs (OrderBot (Finset.Colex α)) with } - -/-- For subsets of ℕ, we can show that colex is equivalent to binary. -/ -theorem sum_two_pow_lt_iff_lt (A B : Finset ℕ) : - ((∑ i in A, 2 ^ i) < ∑ i in B, 2 ^ i) ↔ A.toColex < B.toColex := by - have z : ∀ A B : Finset ℕ, A.toColex < B.toColex → ∑ i in A, 2 ^ i < ∑ i in B, 2 ^ i := by - intro A B - rw [← sdiff_lt_sdiff_iff_lt, Colex.lt_def] - rintro ⟨k, z, kA, kB⟩ - rw [← sdiff_union_inter A B] - conv_rhs => rw [← sdiff_union_inter B A] - rw [sum_union (disjoint_sdiff_inter _ _), sum_union (disjoint_sdiff_inter _ _), inter_comm, - add_lt_add_iff_right] - apply lt_of_lt_of_le (@Nat.sum_two_pow_lt k (A \ B) _) - · apply single_le_sum (fun _ _ => Nat.zero_le _) kB - intro x hx - apply lt_of_le_of_ne (le_of_not_lt _) - · apply ne_of_mem_of_not_mem hx kA - -- Porting note: `intro` required because `apply` behaves differently - intro kx - have := (z kx).1 hx - rw [mem_sdiff] at this hx - exact hx.2 this.1 - refine' - ⟨fun h => (lt_trichotomy A B).resolve_right fun h₁ => h₁.elim _ (not_lt_of_gt h ∘ z _ _), z A B⟩ - rintro rfl - apply irrefl _ h -#align colex.sum_two_pow_lt_iff_lt Colex.sum_two_pow_lt_iff_lt - -/-- For subsets of ℕ, we can show that colex is equivalent to binary. -/ -theorem sum_two_pow_le_iff_lt (A B : Finset ℕ) : - ((∑ i in A, 2 ^ i) ≤ ∑ i in B, 2 ^ i) ↔ A.toColex ≤ B.toColex := by - rw [le_iff_le_iff_lt_iff_lt, sum_two_pow_lt_iff_lt] -#align colex.sum_two_pow_le_iff_lt Colex.sum_two_pow_le_iff_lt +lemma mem_initSeg : t ∈ initSeg s ↔ s.card = t.card ∧ toColex t ≤ toColex s := by simp [initSeg] + +lemma mem_initSeg_self : s ∈ initSeg s := by simp +@[simp] lemma initSeg_nonempty : (initSeg s).Nonempty := ⟨s, mem_initSeg_self⟩ + +lemma isInitSeg_initSeg : IsInitSeg (initSeg s) s.card := by + refine ⟨fun t ht => (mem_initSeg.1 ht).1.symm, fun t₁ t₂ ht₁ ht₂ ↦ mem_initSeg.2 ⟨ht₂.2.symm, ?_⟩⟩ + rw [mem_initSeg] at ht₁ + exact ht₂.1.le.trans ht₁.2 + +lemma IsInitSeg.exists_initSeg (h𝒜 : IsInitSeg 𝒜 r) (h𝒜₀ : 𝒜.Nonempty) : + ∃ s : Finset α, s.card = r ∧ 𝒜 = initSeg s := by + have hs := sup'_mem (ofColex ⁻¹' 𝒜) (LinearOrder.supClosed _) 𝒜 h𝒜₀ toColex + (fun a ha ↦ by simpa using ha) + refine' ⟨_, h𝒜.1 hs, _⟩ + ext t + rw [mem_initSeg] + refine' ⟨fun p ↦ _, _⟩ + · rw [h𝒜.1 p, h𝒜.1 hs] + exact ⟨rfl, le_sup' _ p⟩ + rintro ⟨cards, le⟩ + obtain p | p := le.eq_or_lt + · rwa [toColex_inj.1 p] + · exact h𝒜.2 hs ⟨p, cards ▸ h𝒜.1 hs⟩ + +/-- Being a nonempty initial segment of colex is equivalent to being an `initSeg`. -/ +lemma isInitSeg_iff_exists_initSeg : + IsInitSeg 𝒜 r ∧ 𝒜.Nonempty ↔ ∃ s : Finset α, s.card = r ∧ 𝒜 = initSeg s := by + refine ⟨fun h𝒜 ↦ h𝒜.1.exists_initSeg h𝒜.2, ?_⟩ + rintro ⟨s, rfl, rfl⟩ + exact ⟨isInitSeg_initSeg, initSeg_nonempty⟩ end Colex + +open Colex + +/-! +### Colex on `ℕ` + +The colexicographic order agrees with the order induced by interpreting a set of naturals as a +`n`-ary expansion. +-/ + +section Nat +variable {s t : Finset ℕ} {n : ℕ} + +lemma geomSum_ofColex_strictMono (hn : 2 ≤ n) : StrictMono fun s ↦ ∑ k in ofColex s, n ^ k := by + rintro ⟨s⟩ ⟨t⟩ hst + rw [toColex_lt_toColex_iff_exists_forall_lt] at hst + obtain ⟨a, hat, has, ha⟩ := hst + rw [←sum_sdiff_lt_sum_sdiff] + exact (Nat.geomSum_lt hn $ by simpa).trans_le <| single_le_sum (fun _ _ ↦ by positivity) <| + mem_sdiff.2 ⟨hat, has⟩ + +/-- For finsets of naturals of naturals, the colexicographic order is equivalent to the order +induced by the `n`-ary expansion. -/ +lemma geomSum_le_geomSum_iff_toColex_le_toColex (hn : 2 ≤ n) : + ∑ k in s, n ^ k ≤ ∑ k in t, n ^ k ↔ toColex s ≤ toColex t := + (geomSum_ofColex_strictMono hn).le_iff_le + +/-- For finsets of naturals of naturals, the colexicographic order is equivalent to the order +induced by the `n`-ary expansion. -/ +lemma geomSum_lt_geomSum_iff_toColex_lt_toColex (hn : 2 ≤ n) : + ∑ i in s, n ^ i < ∑ i in t, n ^ i ↔ toColex s < toColex t := + (geomSum_ofColex_strictMono hn).lt_iff_lt + +-- TODO: Package the above in the `n = 2` case as an order isomorphism `Colex ℕ ≃o ℕ` + +end Nat +end Finset diff --git a/Mathlib/Combinatorics/Optimization/ValuedCSP.lean b/Mathlib/Combinatorics/Optimization/ValuedCSP.lean index 5f36f2258e048..0ca7be351031a 100644 --- a/Mathlib/Combinatorics/Optimization/ValuedCSP.lean +++ b/Mathlib/Combinatorics/Optimization/ValuedCSP.lean @@ -30,8 +30,8 @@ General-Valued CSP subsumes Min-Cost-Hom (including 3-SAT for example) and Finit /-- A template for a valued CSP problem over a domain `D` with costs in `C`. Regarding `C` we want to support `Bool`, `Nat`, `ENat`, `Int`, `Rat`, `NNRat`, `Real`, `NNReal`, `EReal`, `ENNReal`, and tuples made of any of those types. -/ -@[reducible, nolint unusedArguments] -def ValuedCsp (D C : Type*) [OrderedAddCommMonoid C] := +@[nolint unusedArguments] +abbrev ValuedCsp (D C : Type*) [OrderedAddCommMonoid C] := Set (Σ (n : ℕ), (Fin n → D) → C) -- Cost functions `D^n → C` for any `n` variable {D C : Type*} [OrderedAddCommMonoid C] @@ -53,7 +53,7 @@ def ValuedCsp.Term.evalSolution {Γ : ValuedCsp D C} {ι : Type*} t.f (x ∘ t.app) /-- A valued CSP instance over the template `Γ` with variables indexed by `ι`.-/ -def ValuedCsp.Instance (Γ : ValuedCsp D C) (ι : Type*) : Type _ := +abbrev ValuedCsp.Instance (Γ : ValuedCsp D C) (ι : Type*) : Type _ := Multiset (Γ.Term ι) /-- Evaluation of a `Γ` instance `I` for given solution `x`. -/ diff --git a/Mathlib/Computability/AkraBazzi/AkraBazzi.lean b/Mathlib/Computability/AkraBazzi/AkraBazzi.lean new file mode 100644 index 0000000000000..191629749c7b8 --- /dev/null +++ b/Mathlib/Computability/AkraBazzi/AkraBazzi.lean @@ -0,0 +1,1467 @@ +/- +Copyright (c) 2023 Frédéric Dupuis. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Frédéric Dupuis +-/ + +import Mathlib.Computability.AkraBazzi.GrowsPolynomially +import Mathlib.Analysis.Calculus.MeanValue +import Mathlib.Analysis.SpecialFunctions.Pow.Deriv + +/-! +# Divide-and-conquer recurrences and the Akra-Bazzi theorem + +A divide-and-conquer recurrence is a function `T : ℕ → ℝ` that satisfies a recurrence relation of +the form `T(n) = ∑_{i=0}^{k-1} a_i T(r_i(n)) + g(n)` for large enough `n`, where `r_i(n)` is some +function where `‖r_i(n) - b_i n‖ ∈ o(n / (log n)^2)` for every `i`, the `a_i`'s are some positive +coefficients, and the `b_i`'s are reals `∈ (0,1)`. (Note that this can be improved to +`O(n / (log n)^(1+ε))`, this is left as future work.) These recurrences arise mainly in the +analysis of divide-and-conquer algorithms such as mergesort or Strassen's algorithm for matrix +multiplication. This class of algorithms works by dividing an instance of the problem of size `n`, +into `k` smaller instances, where the `i`'th instance is of size roughly `b_i n`, and calling itself +recursively on those smaller instances. `T(n)` then represents the running time of the algorithm, +and `g(n)` represents the running time required to actually divide up the instance and process the +answers that come out of the recursive calls. Since virtually all such algorithms produce instances +that are only approximately of size `b_i n` (they have to round up or down at the very least), we +allow the instance sizes to be given by some function `r_i(n)` that approximates `b_i n`. + +The Akra-Bazzi theorem gives the asymptotic order of such a recurrence: it states that +`T(n) ∈ Θ(n^p (1 + ∑_{u=0}^{n-1} g(n) / u^{p+1}))`, +where `p` is the unique real number such that `∑ a_i b_i^p = 1`. + +## Main definitions and results + +* `AkraBazziRecurrence T g a b r`: the predicate stating that `T : ℕ → ℝ` satifies an Akra-Bazzi + recurrence with parameters `g`, `a`, `b` and `r` as above. +* `GrowsPolynomially`: The growth condition that `g` must satisfy for the theorem to apply. + It roughly states that + `c₁ g(n) ≤ g(u) ≤ c₂ g(n)`, for u between b*n and n for any constant `b ∈ (0,1)`. +* `sumTransform`: The transformation which turns a function `g` into + `n^p * ∑ u in Finset.Ico n₀ n, g u / u^(p+1)`. +* `asympBound`: The asymptotic bound satisfied by an Akra-Bazzi recurrence, namely + `n^p (1 + ∑ g(u) / u^(p+1))` +* `isTheta_asympBound`: The main result stating that + `T(n) ∈ Θ(n^p (1 + ∑_{u=0}^{n-1} g(n) / u^{p+1}))` + +## Implementation + +Note that the original version of the theorem has an integral rather than a sum in the above +expression, and first considers the `T : ℝ → ℝ` case before moving on to `ℕ → ℝ`. We prove the +above version with a sum, as it is simpler and more relevant for algorithms. + +## TODO + +* Specialize this theorem to the very common case where the recurrence is of the form +`T(n) = ℓT(r_i(n)) + g(n)` +where `g(n) ∈ Θ(n^t)` for some `t`. (This is often called the "master theorem" in the literature.) +* Add the original version of the theorem with an integral instead of a sum. + +## References + +* Mohamad Akra and Louay Bazzi, On the solution of linear recurrence equations +* Tom Leighton, Notes on better master theorems for divide-and-conquer recurrences +* Manuel Eberl, Asymptotic reasoning in a proof assistant + +-/ + +open Finset Real Filter Asymptotics BigOperators +open scoped Topology + +/-! +#### Definition of Akra-Bazzi recurrences + +This section defines the predicate `AkraBazziRecurrence T g a b r` which states that `T` +satisfies the recurrence +`T(n) = ∑_{i=0}^{k-1} a_i T(r_i(n)) + g(n)` +with appropriate conditions on the various parameters. +-/ + +variable {α : Type*} [Fintype α] [Nonempty α] + +/-- An Akra-Bazzi recurrence is a function that satisfies the recurrence +`T n = (∑ i, a i * T (r i n)) + g n`. -/ +structure AkraBazziRecurrence (T : ℕ → ℝ) (g : ℝ → ℝ) (a : α → ℝ) + (b : α → ℝ) (r : α → ℕ → ℕ) where + /-- Point below which the recurrence is in the base case -/ + n₀ : ℕ + /-- `n₀` is always `> 0` -/ + n₀_gt_zero : 0 < n₀ + /-- The `a`'s are nonzero -/ + a_pos : ∀ i, 0 < a i + /-- The `b`'s are nonzero -/ + b_pos : ∀ i, 0 < b i + /-- The b's are less than 1 -/ + b_lt_one : ∀ i, b i < 1 + /-- `g` is nonnegative -/ + g_nonneg : ∀ x ≥ 0, 0 ≤ g x + /-- `g` grows polynomially -/ + g_grows_poly : AkraBazziRecurrence.GrowsPolynomially g + /-- The actual recurrence -/ + h_rec (n : ℕ) (hn₀ : n₀ ≤ n) : T n = (∑ i, a i * T (r i n)) + g n + /-- Base case: `T(n) > 0` whenever `n < n₀` -/ + T_gt_zero' (n : ℕ) (hn : n < n₀) : 0 < T n + /-- The `r`'s always reduce `n` -/ + r_lt_n : ∀ i n, n₀ ≤ n → r i n < n + /-- The `r`'s approximate the `b`'s -/ + dist_r_b : ∀ i, (fun n => (r i n : ℝ) - b i * n) =o[atTop] fun n => n / (log n) ^ 2 + +namespace AkraBazziRecurrence + +variable {T : ℕ → ℝ} {g : ℝ → ℝ} {a b : α → ℝ} {r : α → ℕ → ℕ} + (R : AkraBazziRecurrence T g a b r) + +/-- Smallest `b i` -/ +noncomputable def min_bi (b : α → ℝ) : α := + Classical.choose <| Finite.exists_min b + +/-- Largest `b i` -/ +noncomputable def max_bi (b : α → ℝ) : α := + Classical.choose <| Finite.exists_max b + +@[aesop safe apply] +lemma min_bi_le : ∀ i, b (min_bi b) ≤ b i := + Classical.choose_spec (Finite.exists_min b) + +@[aesop safe apply] +lemma max_bi_le : ∀ i, b i ≤ b (max_bi b) := + Classical.choose_spec (Finite.exists_max b) + +lemma dist_r_b' : ∀ᶠ n in atTop, ∀ i, ‖(r i n : ℝ) - b i * n‖ ≤ n / log n ^ 2 := by + rw [Filter.eventually_all] + intro i + simpa using IsLittleO.eventuallyLE (R.dist_r_b i) + +lemma isLittleO_self_div_log_id : (fun (n:ℕ) => n / log n ^ 2) =o[atTop] (fun (n:ℕ) => (n:ℝ)) := by + calc (fun (n:ℕ) => (n:ℝ) / log n ^ 2) = fun (n:ℕ) => (n:ℝ) * ((log n) ^ 2)⁻¹ := by + simp_rw [div_eq_mul_inv] + _ =o[atTop] fun (n:ℕ) => (n:ℝ) * 1⁻¹ := by + refine IsBigO.mul_isLittleO (isBigO_refl _ _) ?_ + refine IsLittleO.inv_rev ?main ?zero + case zero => simp + case main => calc + _ = (fun (_:ℕ) => ((1:ℝ) ^ 2)) := by simp + _ =o[atTop] (fun (n:ℕ) => (log n)^2) := + IsLittleO.pow (IsLittleO.nat_cast_atTop + <| isLittleO_const_log_atTop) (by norm_num) + _ = (fun (n:ℕ) => (n:ℝ)) := by ext; simp + +lemma eventually_b_le_r : ∀ᶠ (n:ℕ) in atTop, ∀ i, (b i : ℝ) * n - (n / log n ^ 2) ≤ r i n := by + filter_upwards [R.dist_r_b'] with n hn + intro i + have h₁ : 0 ≤ b i := le_of_lt <| R.b_pos _ + rw [sub_le_iff_le_add, add_comm, ← sub_le_iff_le_add] + calc (b i : ℝ) * n - r i n = ‖b i * n‖ - ‖(r i n : ℝ)‖ := by + simp only [norm_mul, IsROrC.norm_natCast, sub_left_inj, + Nat.cast_eq_zero, Real.norm_of_nonneg h₁] + _ ≤ ‖(b i * n : ℝ) - r i n‖ := norm_sub_norm_le _ _ + _ = ‖(r i n : ℝ) - b i * n‖ := norm_sub_rev _ _ + _ ≤ n / log n ^ 2 := hn i + +lemma eventually_r_le_b : ∀ᶠ (n:ℕ) in atTop, ∀ i, r i n ≤ (b i : ℝ) * n + (n / log n ^ 2) := by + filter_upwards [R.dist_r_b'] with n hn + intro i + calc r i n = b i * n + (r i n - b i * n) := by ring + _ ≤ b i * n + ‖r i n - b i * n‖ := by gcongr; exact Real.le_norm_self _ + _ ≤ b i * n + n / log n ^ 2 := by gcongr; exact hn i + +lemma eventually_r_lt_n : ∀ᶠ (n:ℕ) in atTop, ∀ i, r i n < n := by + filter_upwards [eventually_ge_atTop R.n₀] with n hn + exact fun i => R.r_lt_n i n hn + +lemma eventually_bi_mul_le_r : ∀ᶠ (n:ℕ) in atTop, ∀ i, (b (min_bi b) / 2) * n ≤ r i n := by + have gt_zero : 0 < b (min_bi b) := R.b_pos (min_bi b) + have hlo := isLittleO_self_div_log_id + rw [Asymptotics.isLittleO_iff] at hlo + have hlo' := hlo (by positivity : 0 < b (min_bi b) / 2) + filter_upwards [hlo', R.eventually_b_le_r] with n hn hn' + intro i + simp only [Real.norm_of_nonneg (by positivity : 0 ≤ (n : ℝ))] at hn + calc b (min_bi b) / 2 * n = b (min_bi b) * n - b (min_bi b) / 2 * n := by ring + _ ≤ b (min_bi b) * n - ‖n / log n ^ 2‖ := by gcongr + _ ≤ b i * n - ‖n / log n ^ 2‖ := by gcongr; aesop + _ = b i * n - n / log n ^ 2 := by + congr + exact Real.norm_of_nonneg <| by positivity + _ ≤ r i n := hn' i + +lemma bi_min_div_two_lt_one : b (min_bi b) / 2 < 1 := by + have gt_zero : 0 < b (min_bi b) := R.b_pos (min_bi b) + calc b (min_bi b) / 2 < b (min_bi b) := by aesop (add safe apply div_two_lt_of_pos) + _ < 1 := R.b_lt_one _ + +lemma bi_min_div_two_pos : 0 < b (min_bi b) / 2 := div_pos (R.b_pos _) (by norm_num) + +lemma exists_eventually_const_mul_le_r : + ∃ c ∈ Set.Ioo (0:ℝ) 1, ∀ᶠ (n:ℕ) in atTop, ∀ i, c * n ≤ r i n := by + have gt_zero : 0 < b (min_bi b) := R.b_pos (min_bi b) + exact ⟨b (min_bi b) / 2, ⟨⟨by positivity, R.bi_min_div_two_lt_one⟩, R.eventually_bi_mul_le_r⟩⟩ + +lemma eventually_r_ge (C : ℝ) : ∀ᶠ (n:ℕ) in atTop, ∀ i, C ≤ r i n := by + obtain ⟨c, hc_mem, hc⟩ := R.exists_eventually_const_mul_le_r + filter_upwards [eventually_ge_atTop ⌈C / c⌉₊, hc] with n hn₁ hn₂ + have h₁ := hc_mem.1 + intro i + calc C = c * (C / c) := by + rw [← mul_div_assoc] + exact (mul_div_cancel_left _ (by positivity)).symm + _ ≤ c * ⌈C / c⌉₊ := by gcongr; simp [Nat.le_ceil] + _ ≤ c * n := by gcongr + _ ≤ r i n := hn₂ i + +lemma tendsto_atTop_r (i : α) : Tendsto (r i) atTop atTop := by + rw [tendsto_atTop] + intro b + have := R.eventually_r_ge b + rw [Filter.eventually_all] at this + exact_mod_cast this i + +lemma tendsto_atTop_r_real (i : α) : Tendsto (fun n => (r i n : ℝ)) atTop atTop := + Tendsto.comp tendsto_nat_cast_atTop_atTop (R.tendsto_atTop_r i) + +lemma exists_eventually_r_le_const_mul : + ∃ c ∈ Set.Ioo (0:ℝ) 1, ∀ᶠ (n:ℕ) in atTop, ∀ i, r i n ≤ c * n := by + let c := b (max_bi b) + (1 - b (max_bi b)) / 2 + have h_max_bi_pos : 0 < b (max_bi b) := R.b_pos _ + have h_max_bi_lt_one : 0 < 1 - b (max_bi b) := by + have : b (max_bi b) < 1 := R.b_lt_one _ + linarith + have hc_pos : 0 < c := by positivity + have h₁ : 0 < (1 - b (max_bi b)) / 2 := by positivity + have hc_lt_one : c < 1 := + calc b (max_bi b) + (1 - b (max_bi b)) / 2 = b (max_bi b) * (1 / 2) + 1 / 2 := by ring + _ < 1 * (1 / 2) + 1 / 2 := by + gcongr + exact R.b_lt_one _ + _ = 1 := by norm_num + refine ⟨c, ⟨hc_pos, hc_lt_one⟩, ?_⟩ + have hlo := isLittleO_self_div_log_id + rw [Asymptotics.isLittleO_iff] at hlo + have hlo' := hlo h₁ + filter_upwards [hlo', R.eventually_r_le_b] with n hn hn' + intro i + rw [Real.norm_of_nonneg (by positivity)] at hn + simp only [Real.norm_of_nonneg (by positivity : 0 ≤ (n : ℝ))] at hn + calc r i n ≤ b i * n + n / log n ^ 2 := by exact hn' i + _ ≤ b i * n + (1 - b (max_bi b)) / 2 * n := by gcongr + _ = (b i + (1 - b (max_bi b)) / 2) * n := by ring + _ ≤ (b (max_bi b) + (1 - b (max_bi b)) / 2) * n := by gcongr; exact max_bi_le _ + +lemma eventually_r_pos : ∀ᶠ (n:ℕ) in atTop, ∀ i, 0 < r i n := by + rw [Filter.eventually_all] + exact fun i => (R.tendsto_atTop_r i).eventually_gt_atTop 0 + +lemma eventually_log_b_mul_pos : ∀ᶠ (n:ℕ) in atTop, ∀ i, 0 < log (b i * n) := by + rw [Filter.eventually_all] + intro i + have h : Tendsto (fun (n:ℕ) => log (b i * n)) atTop atTop := + Tendsto.comp tendsto_log_atTop + <| Tendsto.const_mul_atTop (b_pos R i) tendsto_nat_cast_atTop_atTop + exact h.eventually_gt_atTop 0 + +@[aesop safe apply] lemma T_pos (n : ℕ) : 0 < T n := by + induction n using Nat.strongInductionOn + case ind n h_ind => + rcases lt_or_le n R.n₀ with hn|hn + case inl => exact R.T_gt_zero' n hn -- n < R.n₀ + case inr => -- R.n₀ ≤ n + rw [R.h_rec n hn] + have := R.g_nonneg + refine add_pos_of_pos_of_nonneg (Finset.sum_pos ?sum_elems univ_nonempty) (by aesop) + exact fun i _ => mul_pos (R.a_pos i) <| h_ind _ (R.r_lt_n i _ hn) + +@[aesop safe apply] +lemma T_nonneg (n : ℕ) : 0 ≤ T n := le_of_lt <| R.T_pos n + +/-! +#### Smoothing function + +We define `ε` as the "smoothing function" `fun n => 1 / log n`, which will be used in the form of a +factor of `1 ± ε n` needed to make the induction step go through. + +This is its own definition to make it easier to switch to a different smoothing function. +For example, choosing `1 / log n ^ δ` for a suitable choice of `δ` leads to a slightly tighter +theorem at the price of a more complicated proof. + +This part of the file then proves several properties of this function that will be needed later in +the proof. +-/ + +/-- The "smoothing function" is defined as `1 / log n`. This is defined as an `ℝ → ℝ` function +as opposed to `ℕ → ℝ` since this is more convenient for the proof, where we need to e.g. take +derivatives. -/ +noncomputable def smoothingFn (n : ℝ) : ℝ := 1 / log n + +local notation "ε" => smoothingFn + +lemma one_add_smoothingFn_le_two {x : ℝ} (hx : exp 1 ≤ x) : 1 + ε x ≤ 2 := by + simp only [smoothingFn, ← one_add_one_eq_two] + gcongr + have : 1 < x := by + calc 1 = exp 0 := by simp + _ < exp 1 := by simp + _ ≤ x := hx + rw [div_le_one (log_pos this)] + calc 1 = log (exp 1) := by simp + _ ≤ log x := log_le_log' (exp_pos _) hx + +lemma isLittleO_smoothingFn_one : ε =o[atTop] (fun _ => (1:ℝ)) := by + unfold smoothingFn + refine isLittleO_of_tendsto (fun _ h => False.elim <| one_ne_zero h) ?_ + simp only [one_div, div_one] + exact Tendsto.inv_tendsto_atTop Real.tendsto_log_atTop + +lemma isEquivalent_one_add_smoothingFn_one : (fun x => 1 + ε x) ~[atTop] (fun _ => (1:ℝ)) := + IsEquivalent.add_isLittleO IsEquivalent.refl isLittleO_smoothingFn_one + +lemma isEquivalent_one_sub_smoothingFn_one : (fun x => 1 - ε x) ~[atTop] (fun _ => (1:ℝ)) := + IsEquivalent.sub_isLittleO IsEquivalent.refl isLittleO_smoothingFn_one + +lemma growsPolynomially_one_sub_smoothingFn : GrowsPolynomially fun x => 1 - ε x := + GrowsPolynomially.of_isEquivalent_const isEquivalent_one_sub_smoothingFn_one + +lemma growsPolynomially_one_add_smoothingFn : GrowsPolynomially fun x => 1 + ε x := + GrowsPolynomially.of_isEquivalent_const isEquivalent_one_add_smoothingFn_one + +lemma eventually_one_sub_smoothingFn_gt_const_real (c : ℝ) (hc : c < 1) : + ∀ᶠ (x:ℝ) in atTop, c < 1 - ε x := by + have h₁ : Tendsto (fun x => 1 - ε x) atTop (𝓝 1) := by + rw [← isEquivalent_const_iff_tendsto one_ne_zero] + exact isEquivalent_one_sub_smoothingFn_one + rw [tendsto_order] at h₁ + exact h₁.1 c hc + +lemma eventually_one_sub_smoothingFn_gt_const (c : ℝ) (hc : c < 1) : + ∀ᶠ (n:ℕ) in atTop, c < 1 - ε n := + Eventually.nat_cast_atTop (p := fun n => c < 1 - ε n) + <| eventually_one_sub_smoothingFn_gt_const_real c hc + +lemma eventually_one_sub_smoothingFn_pos_real : ∀ᶠ (x:ℝ) in atTop, 0 < 1 - ε x := + eventually_one_sub_smoothingFn_gt_const_real 0 zero_lt_one + +lemma eventually_one_sub_smoothingFn_pos : ∀ᶠ (n:ℕ) in atTop, 0 < 1 - ε n := + (eventually_one_sub_smoothingFn_pos_real).nat_cast_atTop + +lemma eventually_one_sub_smoothingFn_nonneg : ∀ᶠ (n:ℕ) in atTop, 0 ≤ 1 - ε n := by + filter_upwards [eventually_one_sub_smoothingFn_pos] with n hn; exact le_of_lt hn + +lemma eventually_one_sub_smoothingFn_r_pos : ∀ᶠ (n:ℕ) in atTop, ∀ i, 0 < 1 - ε (r i n) := by + rw [Filter.eventually_all] + exact fun i => (R.tendsto_atTop_r_real i).eventually eventually_one_sub_smoothingFn_pos_real + +@[aesop safe apply] +lemma differentiableAt_smoothingFn {x : ℝ} (hx : 1 < x) : DifferentiableAt ℝ ε x := by + have : log x ≠ 0 := Real.log_ne_zero_of_pos_of_ne_one (by positivity) (ne_of_gt hx) + show DifferentiableAt ℝ (fun z => 1 / log z) x + simp_rw [one_div] + exact DifferentiableAt.inv (differentiableAt_log (by positivity)) this + +@[aesop safe apply] +lemma differentiableAt_one_sub_smoothingFn {x : ℝ} (hx : 1 < x) : + DifferentiableAt ℝ (fun z => 1 - ε z) x := + DifferentiableAt.sub (differentiableAt_const _) <| differentiableAt_smoothingFn hx + +lemma differentiableOn_one_sub_smoothingFn : DifferentiableOn ℝ (fun z => 1 - ε z) (Set.Ioi 1) := + fun _ hx => (differentiableAt_one_sub_smoothingFn hx).differentiableWithinAt + +@[aesop safe apply] +lemma differentiableAt_one_add_smoothingFn {x : ℝ} (hx : 1 < x) : + DifferentiableAt ℝ (fun z => 1 + ε z) x := + DifferentiableAt.add (differentiableAt_const _) <| differentiableAt_smoothingFn hx + +lemma differentiableOn_one_add_smoothingFn : DifferentiableOn ℝ (fun z => 1 + ε z) (Set.Ioi 1) := + fun _ hx => (differentiableAt_one_add_smoothingFn hx).differentiableWithinAt + +lemma deriv_smoothingFn {x : ℝ} (hx : 1 < x) : deriv ε x = -x⁻¹ / (log x ^ 2) := by + have : log x ≠ 0 := Real.log_ne_zero_of_pos_of_ne_one (by positivity) (ne_of_gt hx) + show deriv (fun z => 1 / log z) x = -x⁻¹ / (log x ^ 2) + rw [deriv_div] <;> aesop + +lemma isLittleO_deriv_smoothingFn : deriv ε =o[atTop] fun x => x⁻¹ := calc + deriv ε =ᶠ[atTop] fun x => -x⁻¹ / (log x ^ 2) := by + filter_upwards [eventually_gt_atTop 1] with x hx + rw [deriv_smoothingFn hx] + _ = fun x => (-x * log x ^ 2)⁻¹ := by + simp_rw [neg_div, div_eq_mul_inv, ← mul_inv, neg_inv, neg_mul] + _ =o[atTop] fun x => (x * 1)⁻¹ := by + refine IsLittleO.inv_rev ?_ ?_ + · refine IsBigO.mul_isLittleO + (by rw [isBigO_neg_right]; aesop (add safe isBigO_refl)) ?_ + rw [isLittleO_one_left_iff] + exact Tendsto.comp tendsto_norm_atTop_atTop + <| Tendsto.comp (tendsto_pow_atTop (by norm_num)) tendsto_log_atTop + · exact Filter.eventually_of_forall (fun x hx => by rw [mul_one] at hx; simp [hx]) + _ = fun x => x⁻¹ := by simp + +lemma eventually_deriv_one_sub_smoothingFn : + deriv (fun x => 1 - ε x) =ᶠ[atTop] fun x => x⁻¹ / (log x ^ 2) := calc + deriv (fun x => 1 - ε x) =ᶠ[atTop] -(deriv ε) := by + filter_upwards [eventually_gt_atTop 1] with x hx; rw [deriv_sub] <;> aesop + _ =ᶠ[atTop] fun x => x⁻¹ / (log x ^ 2) := by + filter_upwards [eventually_gt_atTop 1] with x hx + simp [deriv_smoothingFn hx, neg_div] + +lemma eventually_deriv_one_add_smoothingFn : + deriv (fun x => 1 + ε x) =ᶠ[atTop] fun x => -x⁻¹ / (log x ^ 2) := calc + deriv (fun x => 1 + ε x) =ᶠ[atTop] deriv ε := by + filter_upwards [eventually_gt_atTop 1] with x hx; rw [deriv_add] <;> aesop + _ =ᶠ[atTop] fun x => -x⁻¹ / (log x ^ 2) := by + filter_upwards [eventually_gt_atTop 1] with x hx + simp [deriv_smoothingFn hx] + +lemma isLittleO_deriv_one_sub_smoothingFn : + deriv (fun x => 1 - ε x) =o[atTop] fun (x:ℝ) => x⁻¹ := calc + deriv (fun x => 1 - ε x) =ᶠ[atTop] fun z => -(deriv ε z) := by + filter_upwards [eventually_gt_atTop 1] with x hx; rw [deriv_sub] <;> aesop + _ =o[atTop] fun x => x⁻¹ := by rw [isLittleO_neg_left]; exact isLittleO_deriv_smoothingFn + +lemma isLittleO_deriv_one_add_smoothingFn : + deriv (fun x => 1 + ε x) =o[atTop] fun (x:ℝ) => x⁻¹ := calc + deriv (fun x => 1 + ε x) =ᶠ[atTop] fun z => deriv ε z := by + filter_upwards [eventually_gt_atTop 1] with x hx; rw [deriv_add] <;> aesop + _ =o[atTop] fun x => x⁻¹ := isLittleO_deriv_smoothingFn + +lemma eventually_one_add_smoothingFn_pos : ∀ᶠ (n:ℕ) in atTop, 0 < 1 + ε n := by + have h₁ := isLittleO_smoothingFn_one + rw [isLittleO_iff] at h₁ + refine Eventually.nat_cast_atTop (p := fun n => 0 < 1 + ε n) ?_ + filter_upwards [h₁ (by norm_num : (0:ℝ) < 1/2), eventually_gt_atTop 1] with x _ hx' + have : 0 < log x := Real.log_pos hx' + show 0 < 1 + 1 / log x + positivity + +lemma eventually_one_add_smoothingFn_r_pos : ∀ᶠ (n:ℕ) in atTop, ∀ i, 0 < 1 + ε (r i n) := by + rw [Filter.eventually_all] + exact fun i => (R.tendsto_atTop_r i).eventually (f := r i) eventually_one_add_smoothingFn_pos + +lemma eventually_one_add_smoothingFn_nonneg : ∀ᶠ (n:ℕ) in atTop, 0 ≤ 1 + ε n := by + filter_upwards [eventually_one_add_smoothingFn_pos] with n hn; exact le_of_lt hn + +lemma strictAntiOn_smoothingFn : StrictAntiOn ε (Set.Ioi 1) := by + show StrictAntiOn (fun x => 1 / log x) (Set.Ioi 1) + simp_rw [one_div] + refine StrictAntiOn.comp_strictMonoOn inv_strictAntiOn ?log fun _ hx => log_pos hx + refine StrictMonoOn.mono strictMonoOn_log (fun x (hx : 1 < x) => ?_) + show 0 < x + linarith + +lemma strictMonoOn_one_sub_smoothingFn : StrictMonoOn (fun (x:ℝ) => (1:ℝ) - ε x) (Set.Ioi 1) := by + simp_rw [sub_eq_add_neg] + exact StrictMonoOn.const_add (StrictAntiOn.neg <| strictAntiOn_smoothingFn) 1 + +lemma strictAntiOn_one_add_smoothingFn : StrictAntiOn (fun (x:ℝ) => (1:ℝ) + ε x) (Set.Ioi 1) := + StrictAntiOn.const_add strictAntiOn_smoothingFn 1 + +lemma isEquivalent_smoothingFn_sub_self (i : α) : + (fun (n:ℕ) => ε (b i * n) - ε n) ~[atTop] fun n => -log (b i) / (log n)^2 := by + calc (fun (n:ℕ) => 1 / log (b i * n) - 1 / log n) + =ᶠ[atTop] fun (n:ℕ) => (log n - log (b i * n)) / (log (b i * n) * log n) := by + filter_upwards [eventually_gt_atTop 1, R.eventually_log_b_mul_pos] with n hn hn' + have h_log_pos : 0 < log n := Real.log_pos <| by aesop + simp only [one_div] + rw [inv_sub_inv (by have := hn' i; positivity) (by aesop)] + _ =ᶠ[atTop] (fun (n:ℕ) => (log n - log (b i) - log n) / ((log (b i) + log n) * log n)) := by + filter_upwards [eventually_ne_atTop 0] with n hn + have : 0 < b i := R.b_pos i + rw [log_mul (by positivity) (by aesop), sub_add_eq_sub_sub] + _ = (fun (n:ℕ) => -log (b i) / ((log (b i) + log n) * log n)) := by ext; congr; ring + _ ~[atTop] (fun (n:ℕ) => -log (b i) / (log n * log n)) := by + refine IsEquivalent.div (IsEquivalent.refl) <| IsEquivalent.mul ?_ (IsEquivalent.refl) + have : (fun (n:ℕ) => log (b i) + log n) = fun (n:ℕ) => log n + log (b i) := by + ext; simp [add_comm] + rw [this] + exact IsEquivalent.add_isLittleO IsEquivalent.refl + <| IsLittleO.nat_cast_atTop (f := fun (_:ℝ) => log (b i)) + isLittleO_const_log_atTop + _ = (fun (n:ℕ) => -log (b i) / (log n)^2) := by ext; congr 1; rw [← pow_two] + +lemma isTheta_smoothingFn_sub_self (i : α) : + (fun (n : ℕ) => ε (b i * n) - ε n) =Θ[atTop] fun n => 1 / (log n)^2 := by + calc (fun (n : ℕ) => ε (b i * n) - ε n) =Θ[atTop] fun n => (-log (b i)) / (log n)^2 := by + exact (R.isEquivalent_smoothingFn_sub_self i).isTheta + _ = fun (n:ℕ) => (-log (b i)) * 1 / (log n)^2 := by simp only [mul_one] + _ = fun (n:ℕ) => -log (b i) * (1 / (log n)^2) := by simp_rw [← mul_div_assoc] + _ =Θ[atTop] fun (n:ℕ) => 1 / (log n)^2 := by + have : -log (b i) ≠ 0 := by + rw [neg_ne_zero] + exact Real.log_ne_zero_of_pos_of_ne_one + (R.b_pos i) (ne_of_lt <| R.b_lt_one i) + rw [← isTheta_const_mul_right this] + + +/-! +#### Akra-Bazzi exponent `p` + +Every Akra-Bazzi recurrence has an associated exponent, denoted by `p : ℝ`, such that +`∑ a_i b_i^p = 1`. This section shows the existence and uniqueness of this exponent `p` for any +`R : AkraBazziRecurrence`, and defines `R.asympBound` to be the asymptotic bound satisfied by `R`, +namely `n^p (1 + ∑_{u < n} g(u) / u^(p+1))`. -/ + +@[continuity] +lemma continuous_sumCoeffsExp : Continuous (fun (p : ℝ) => ∑ i, a i * (b i) ^ p) := by + refine continuous_finset_sum Finset.univ <| fun i _ => Continuous.mul (by continuity) ?_ + exact Continuous.rpow continuous_const continuous_id (fun x => Or.inl (ne_of_gt (R.b_pos i))) + +lemma strictAnti_sumCoeffsExp : StrictAnti (fun (p : ℝ) => ∑ i, a i * (b i) ^ p) := by + rw [← Finset.sum_fn] + refine Finset.sum_induction_nonempty _ _ (fun _ _ => StrictAnti.add) univ_nonempty ?terms + refine fun i _ => StrictAnti.const_mul ?_ (R.a_pos i) + exact Real.strictAnti_rpow_of_base_lt_one (R.b_pos i) (R.b_lt_one i) + +lemma tendsto_zero_sumCoeffsExp : Tendsto (fun (p : ℝ) => ∑ i, a i * (b i) ^ p) atTop (𝓝 0) := by + have h₁ : Finset.univ.sum (fun _ : α => (0:ℝ)) = 0 := by simp + rw [← h₁] + refine tendsto_finset_sum (univ : Finset α) (fun i _ => ?_) + have h₂ : (0:ℝ) = (a i : ℝ) * 0 := by simp + show Tendsto (fun z => (a i : ℝ) * b i ^ z) atTop (𝓝 0) + rw [h₂] + refine Tendsto.mul (by simp) <| tendsto_rpow_atTop_of_base_lt_one _ ?_ (R.b_lt_one i) + have := R.b_pos i + linarith + +lemma tendsto_atTop_sumCoeffsExp : Tendsto (fun (p : ℝ) => ∑ i, a i * (b i) ^ p) atBot atTop := by + have h₁ : Tendsto (fun p => (a (max_bi b) : ℝ) * b (max_bi b) ^ p) atBot atTop := + Tendsto.mul_atTop (R.a_pos (max_bi b)) (by simp) + <| tendsto_rpow_atBot_of_base_lt_one _ + (by have := R.b_pos (max_bi b); linarith) (R.b_lt_one _) + refine tendsto_atTop_mono (fun p => ?_) h₁ + let f := fun i => (a i : ℝ) * b i ^ p + show f (max_bi b) ≤ Finset.univ.sum f + refine Finset.single_le_sum (fun i _ => ?_) (mem_univ _) + have h₁ : 0 < a i := R.a_pos i + have h₂ : 0 < b i := R.b_pos i + positivity + +lemma one_mem_range_sumCoeffsExp : 1 ∈ Set.range (fun (p : ℝ) => ∑ i, a i * (b i) ^ p) := by + refine mem_range_of_exists_le_of_exists_ge R.continuous_sumCoeffsExp ?le_one ?ge_one + case le_one => + exact Eventually.exists <| eventually_le_of_tendsto_lt zero_lt_one R.tendsto_zero_sumCoeffsExp + case ge_one => + exact Eventually.exists <| R.tendsto_atTop_sumCoeffsExp.eventually_ge_atTop _ + +/-- The function x ↦ ∑ a_i b_i^x is injective. This implies the uniqueness of `p`. -/ +lemma injective_sumCoeffsExp : Function.Injective (fun (p : ℝ) => ∑ i, a i * (b i) ^ p) := + R.strictAnti_sumCoeffsExp.injective + +variable (a) (b) +/-- The exponent `p` associated with a particular Akra-Bazzi recurrence. -/ +noncomputable irreducible_def p : ℝ := Function.invFun (fun (p : ℝ) => ∑ i, a i * (b i) ^ p) 1 + +variable {a} {b} + +@[simp] +lemma sumCoeffsExp_p_eq_one : ∑ i, a i * (b i) ^ p a b = 1 := by + simp only [p] + exact Function.invFun_eq (by rw [← Set.mem_range]; exact R.one_mem_range_sumCoeffsExp) + +/-! +#### The sum transform + +This section defines the "sum transform" of a function `g` as +`∑ u in Finset.Ico n₀ n, g u / u^(p+1)`, +and uses it to define `asympBound` as the bound satisfied by an Akra-Bazzi recurrence. + +Several properties of the sum transform are then proven. +-/ + +/-- The transformation which turns a function `g` into +`n^p * ∑ u in Finset.Ico n₀ n, g u / u^(p+1)`. -/ +noncomputable def sumTransform (p : ℝ) (g : ℝ → ℝ) (n₀ n : ℕ) := + n^p * ∑ u in Finset.Ico n₀ n, g u / u^(p + 1) + +lemma sumTransform_def {p : ℝ} {g : ℝ → ℝ} {n₀ n : ℕ} : + sumTransform p g n₀ n = n^p * ∑ u in Finset.Ico n₀ n, g u / u^(p + 1) := rfl + + +variable (g) (a) (b) +/-- The asymptotic bound satisfied by an Akra-Bazzi recurrence, namely +`n^p (1 + ∑_{u < n} g(u) / u^(p+1))`. -/ +noncomputable def asympBound (n : ℕ) : ℝ := n ^ p a b + sumTransform (p a b) g 0 n + +lemma asympBound_def {n : ℕ} : asympBound g a b n = n ^ p a b + sumTransform (p a b) g 0 n := rfl + +variable {g} {a} {b} + +lemma asympBound_def' {n : ℕ} : + asympBound g a b n = n ^ p a b * (1 + (∑ u in range n, g u / u ^ (p a b + 1))) := by + simp [asympBound_def, sumTransform, mul_add, mul_one, Finset.sum_Ico_eq_sum_range] + +lemma asympBound_pos (n : ℕ) (hn : 0 < n) : 0 < asympBound g a b n := by + calc 0 < (n:ℝ) ^ p a b * (1 + 0) := by aesop (add safe Real.rpow_pos_of_pos) + _ ≤ asympBound g a b n := by + simp only [asympBound_def'] + gcongr n^p a b * (1 + ?_) + have := R.g_nonneg + aesop (add safe Real.rpow_nonneg_of_nonneg, + safe div_nonneg, + safe Finset.sum_nonneg) + +lemma eventually_asympBound_pos : ∀ᶠ (n:ℕ) in atTop, 0 < asympBound g a b n := by + filter_upwards [eventually_gt_atTop 0] with n hn + exact R.asympBound_pos n hn + +lemma eventually_asympBound_r_pos : ∀ᶠ (n:ℕ) in atTop, ∀ i, 0 < asympBound g a b (r i n) := by + rw [Filter.eventually_all] + exact fun i => (R.tendsto_atTop_r i).eventually R.eventually_asympBound_pos + +lemma eventually_atTop_sumTransform_le : + ∃ c > 0, ∀ᶠ (n:ℕ) in atTop, ∀ i, sumTransform (p a b) g (r i n) n ≤ c * g n := by + obtain ⟨c₁, hc₁_mem, hc₁⟩ := R.exists_eventually_const_mul_le_r + obtain ⟨c₂, hc₂_mem, hc₂⟩ := R.g_grows_poly.eventually_atTop_le_nat hc₁_mem + have hc₁_pos : 0 < c₁ := hc₁_mem.1 + refine ⟨max c₂ (c₂ / c₁ ^ (p a b + 1)), by positivity, ?_⟩ + filter_upwards [hc₁, hc₂, R.eventually_r_pos, R.eventually_r_lt_n, eventually_gt_atTop 0] + with n hn₁ hn₂ hrpos hr_lt_n hn_pos + intro i + have hrpos_i := hrpos i + have g_nonneg : 0 ≤ g n := R.g_nonneg n (by positivity) + rcases le_or_lt 0 (p a b + 1) with hp|hp + case h.inl => -- 0 ≤ p a b + 1 + calc sumTransform (p a b) g (r i n) n + = n ^ (p a b) * (∑ u in Finset.Ico (r i n) n, g u / u ^ ((p a b) + 1)) := by rfl + _ ≤ n ^ (p a b) * (∑ u in Finset.Ico (r i n) n, c₂ * g n / u ^ ((p a b) + 1)) := by + gcongr with u hu + rw [Finset.mem_Ico] at hu + have hu' : u ∈ Set.Icc (r i n) n := ⟨hu.1, by linarith⟩ + refine hn₂ u ?_ + rw [Set.mem_Icc] + refine ⟨?_, by norm_cast; linarith⟩ + calc c₁ * n ≤ r i n := by exact hn₁ i + _ ≤ u := by exact_mod_cast hu'.1 + _ ≤ n ^ (p a b) * (∑ _u in Finset.Ico (r i n) n, c₂ * g n / (r i n) ^ ((p a b) + 1)) := by + gcongr with u hu; rw [Finset.mem_Ico] at hu; exact hu.1 + _ ≤ n ^ (p a b) * (Finset.Ico (r i n) n).card • (c₂ * g n / (r i n) ^ ((p a b) + 1)) := by + gcongr; exact Finset.sum_le_card_nsmul _ _ _ (fun x _ => by rfl) + _ = n ^ (p a b) * (Finset.Ico (r i n) n).card * (c₂ * g n / (r i n) ^ ((p a b) + 1)) := by + rw [nsmul_eq_mul, mul_assoc] + _ = n ^ (p a b) * (n - r i n) * (c₂ * g n / (r i n) ^ ((p a b) + 1)) := by + congr; rw [Nat.card_Ico, Nat.cast_sub (le_of_lt <| hr_lt_n i)] + _ ≤ n ^ (p a b) * n * (c₂ * g n / (r i n) ^ ((p a b) + 1)) := by + gcongr; simp only [tsub_le_iff_right, le_add_iff_nonneg_right, Nat.cast_nonneg] + _ ≤ n ^ (p a b) * n * (c₂ * g n / (c₁ * n) ^ ((p a b) + 1)) := by + gcongr; exact hn₁ i + _ = c₂ * g n * n ^ ((p a b) + 1) / (c₁ * n) ^ ((p a b) + 1) := by + rw [← Real.rpow_add_one (by positivity) (p a b)]; ring + _ = c₂ * g n * n ^ ((p a b) + 1) / (n ^ ((p a b) + 1) * c₁ ^ ((p a b) + 1)) := by + rw [mul_comm c₁, Real.mul_rpow (by positivity) (by positivity)] + _ = c₂ * g n * (n ^ ((p a b) + 1) / (n ^ ((p a b) + 1))) / c₁ ^ ((p a b) + 1) := by ring + _ = c₂ * g n / c₁ ^ ((p a b) + 1) := by rw [div_self (by positivity), mul_one] + _ = (c₂ / c₁ ^ ((p a b) + 1)) * g n := by ring + _ ≤ max c₂ (c₂ / c₁ ^ ((p a b) + 1)) * g n := by gcongr; exact le_max_right _ _ + case h.inr => -- p a b + 1 < 0 + calc sumTransform (p a b) g (r i n) n + = n ^ (p a b) * (∑ u in Finset.Ico (r i n) n, g u / u ^ ((p a b) + 1)) := by rfl + _ ≤ n ^ (p a b) * (∑ u in Finset.Ico (r i n) n, c₂ * g n / u ^ ((p a b) + 1)) := by + gcongr with u hu + rw [Finset.mem_Ico] at hu + have hu' : u ∈ Set.Icc (r i n) n := ⟨hu.1, by linarith⟩ + refine hn₂ u ?_ + rw [Set.mem_Icc] + refine ⟨?_, by norm_cast; linarith⟩ + calc c₁ * n ≤ r i n := by exact hn₁ i + _ ≤ u := by exact_mod_cast hu'.1 + _ ≤ n ^ (p a b) * (∑ _u in Finset.Ico (r i n) n, c₂ * g n / n ^ ((p a b) + 1)) := by + gcongr n ^ (p a b) * (Finset.Ico (r i n) n).sum (fun _ => c₂ * g n / ?_) with u hu + rw [Finset.mem_Ico] at hu + have : 0 < u := calc + 0 < r i n := by exact hrpos_i + _ ≤ u := by exact hu.1 + exact rpow_le_rpow_of_exponent_nonpos (by positivity) + (by exact_mod_cast (le_of_lt hu.2)) (le_of_lt hp) + _ ≤ n ^ (p a b) * (Finset.Ico (r i n) n).card • (c₂ * g n / n ^ ((p a b) + 1)) := by + gcongr; exact Finset.sum_le_card_nsmul _ _ _ (fun x _ => by rfl) + _ = n ^ (p a b) * (Finset.Ico (r i n) n).card * (c₂ * g n / n ^ ((p a b) + 1)) := by + rw [nsmul_eq_mul, mul_assoc] + _ = n ^ (p a b) * (n - r i n) * (c₂ * g n / n ^ ((p a b) + 1)) := by + congr; rw [Nat.card_Ico, Nat.cast_sub (le_of_lt <| hr_lt_n i)] + _ ≤ n ^ (p a b) * n * (c₂ * g n / n ^ ((p a b) + 1)) := by + gcongr; simp only [tsub_le_iff_right, le_add_iff_nonneg_right, Nat.cast_nonneg] + _ = c₂ * (n^((p a b) + 1) / n ^ ((p a b) + 1)) * g n := by + rw [← Real.rpow_add_one (by positivity) (p a b)]; ring + _ = c₂ * g n := by rw [div_self (by positivity), mul_one] + _ ≤ max c₂ (c₂ / c₁ ^ ((p a b) + 1)) * g n := by gcongr; exact le_max_left _ _ + +lemma eventually_atTop_sumTransform_ge : + ∃ c > 0, ∀ᶠ (n:ℕ) in atTop, ∀ i, c * g n ≤ sumTransform (p a b) g (r i n) n := by + obtain ⟨c₁, hc₁_mem, hc₁⟩ := R.exists_eventually_const_mul_le_r + obtain ⟨c₂, hc₂_mem, hc₂⟩ := R.g_grows_poly.eventually_atTop_ge_nat hc₁_mem + obtain ⟨c₃, hc₃_mem, hc₃⟩ := R.exists_eventually_r_le_const_mul + have hc₁_pos : 0 < c₁ := hc₁_mem.1 + have hc₃' : 0 < (1 - c₃) := by have := hc₃_mem.2; linarith + refine ⟨min (c₂ * (1 - c₃)) ((1 - c₃) * c₂ / c₁^((p a b) + 1)), by positivity, ?_⟩ + filter_upwards [hc₁, hc₂, hc₃, R.eventually_r_pos, R.eventually_r_lt_n, eventually_gt_atTop 0] + with n hn₁ hn₂ hn₃ hrpos hr_lt_n hn_pos + intro i + have hrpos_i := hrpos i + have g_nonneg : 0 ≤ g n := R.g_nonneg n (by positivity) + rcases le_or_gt 0 (p a b + 1) with hp|hp + case h.inl => -- 0 ≤ (p a b) + 1 + calc sumTransform (p a b) g (r i n) n + = n ^ (p a b) * (∑ u in Finset.Ico (r i n) n, g u / u ^ ((p a b) + 1)) := by rfl + _ ≥ n ^ (p a b) * (∑ u in Finset.Ico (r i n) n, c₂ * g n / u^((p a b) + 1)) := by + gcongr with u hu + rw [Finset.mem_Ico] at hu + have hu' : u ∈ Set.Icc (r i n) n := ⟨hu.1, by linarith⟩ + refine hn₂ u ?_ + rw [Set.mem_Icc] + refine ⟨?_, by norm_cast; linarith⟩ + calc c₁ * n ≤ r i n := by exact hn₁ i + _ ≤ u := by exact_mod_cast hu'.1 + _ ≥ n ^ (p a b) * (∑ _u in Finset.Ico (r i n) n, c₂ * g n / n ^ ((p a b) + 1)) := by + gcongr with u hu + · rw [Finset.mem_Ico] at hu + have := calc 0 < r i n := hrpos_i + _ ≤ u := hu.1 + positivity + · rw [Finset.mem_Ico] at hu + exact le_of_lt hu.2 + _ ≥ n ^ (p a b) * (Finset.Ico (r i n) n).card • (c₂ * g n / n ^ ((p a b) + 1)) := by + gcongr; exact Finset.card_nsmul_le_sum _ _ _ (fun x _ => by rfl) + _ = n ^ (p a b) * (Finset.Ico (r i n) n).card * (c₂ * g n / n ^ ((p a b) + 1)) := by + rw [nsmul_eq_mul, mul_assoc] + _ = n ^ (p a b) * (n - r i n) * (c₂ * g n / n ^ ((p a b) + 1)) := by + congr; rw [Nat.card_Ico, Nat.cast_sub (le_of_lt <| hr_lt_n i)] + _ ≥ n ^ (p a b) * (n - c₃ * n) * (c₂ * g n / n ^ ((p a b) + 1)) := by + gcongr; exact hn₃ i + _ = n ^ (p a b) * n * (1 - c₃) * (c₂ * g n / n ^ ((p a b) + 1)) := by ring + _ = c₂ * (1 - c₃) * g n * (n ^ ((p a b) + 1) / n ^ ((p a b) + 1)) := by + rw [← Real.rpow_add_one (by positivity) (p a b)]; ring + _ = c₂ * (1 - c₃) * g n := by rw [div_self (by positivity), mul_one] + _ ≥ min (c₂ * (1 - c₃)) ((1 - c₃) * c₂ / c₁ ^ ((p a b) + 1)) * g n := by + gcongr; exact min_le_left _ _ + case h.inr => -- (p a b) + 1 < 0 + calc sumTransform (p a b) g (r i n) n + = n ^ (p a b) * (∑ u in Finset.Ico (r i n) n, g u / u^((p a b) + 1)) := by rfl + _ ≥ n ^ (p a b) * (∑ u in Finset.Ico (r i n) n, c₂ * g n / u ^ ((p a b) + 1)) := by + gcongr with u hu + rw [Finset.mem_Ico] at hu + have hu' : u ∈ Set.Icc (r i n) n := ⟨hu.1, by linarith⟩ + refine hn₂ u ?_ + rw [Set.mem_Icc] + refine ⟨?_, by norm_cast; linarith⟩ + calc c₁ * n ≤ r i n := by exact hn₁ i + _ ≤ u := by exact_mod_cast hu'.1 + _ ≥ n ^ (p a b) * (∑ _u in Finset.Ico (r i n) n, c₂ * g n / (r i n) ^ ((p a b) + 1)) := by + gcongr n^(p a b) * (Finset.Ico (r i n) n).sum (fun _ => c₂ * g n / ?_) with u hu + · rw [Finset.mem_Ico] at hu + have := calc 0 < r i n := hrpos_i + _ ≤ u := hu.1 + positivity + · rw [Finset.mem_Ico] at hu + exact rpow_le_rpow_of_exponent_nonpos (by positivity) + (by exact_mod_cast hu.1) (le_of_lt hp) + _ ≥ n ^ (p a b) * (Finset.Ico (r i n) n).card • (c₂ * g n / (r i n) ^ ((p a b) + 1)) := by + gcongr; exact Finset.card_nsmul_le_sum _ _ _ (fun x _ => by rfl) + _ = n ^ (p a b) * (Finset.Ico (r i n) n).card * (c₂ * g n / (r i n) ^ ((p a b) + 1)) := by + rw [nsmul_eq_mul, mul_assoc] + _ ≥ n ^ (p a b) * (Finset.Ico (r i n) n).card * (c₂ * g n / (c₁ * n) ^ ((p a b) + 1)) := by + gcongr n^(p a b) * (Finset.Ico (r i n) n).card * (c₂ * g n / ?_) + exact rpow_le_rpow_of_exponent_nonpos (by positivity) (hn₁ i) (le_of_lt hp) + _ = n ^ (p a b) * (n - r i n) * (c₂ * g n / (c₁ * n) ^ ((p a b) + 1)) := by + congr; rw [Nat.card_Ico, Nat.cast_sub (le_of_lt <| hr_lt_n i)] + _ ≥ n ^ (p a b) * (n - c₃ * n) * (c₂ * g n / (c₁ * n) ^ ((p a b) + 1)) := by + gcongr; exact hn₃ i + _ = n ^ (p a b) * n * (1 - c₃) * (c₂ * g n / (c₁ * n) ^ ((p a b) + 1)) := by ring + _ = n ^ (p a b) * n * (1 - c₃) * (c₂ * g n / (c₁ ^ ((p a b) + 1) * n ^ ((p a b) + 1))) := by + rw [Real.mul_rpow (by positivity) (by positivity)] + _ = (n ^ ((p a b) + 1) / n ^ ((p a b) + 1)) * (1 - c₃) * c₂ * g n / c₁ ^ ((p a b) + 1) := by + rw [← Real.rpow_add_one (by positivity) (p a b)]; ring + _ = (1 - c₃) * c₂ / c₁ ^ ((p a b) + 1) * g n := by + rw [div_self (by positivity), one_mul]; ring + _ ≥ min (c₂ * (1 - c₃)) ((1 - c₃) * c₂ / c₁ ^ ((p a b) + 1)) * g n := by + gcongr; exact min_le_right _ _ + +/-! +#### Technical lemmas + +The next several lemmas are technical lemmas leading up to `rpow_p_mul_one_sub_smoothingFn_le` and +`rpow_p_mul_one_add_smoothingFn_ge`, which are key steps in the main proof. +-/ + +lemma isBigO_apply_r_sub_b (q : ℝ → ℝ) (hq_diff : DifferentiableOn ℝ q (Set.Ioi 1)) + (hq_poly : GrowsPolynomially fun x => ‖deriv q x‖) (i : α): + (fun n => q (r i n) - q (b i * n)) =O[atTop] fun n => (deriv q n) * (r i n - b i * n) := by + let b' := b (min_bi b) / 2 + have hb_pos : 0 < b' := by have := R.b_pos (min_bi b); positivity + have hb_lt_one : b' < 1 := calc + b (min_bi b) / 2 < b (min_bi b) := by exact div_two_lt_of_pos (R.b_pos (min_bi b)) + _ < 1 := R.b_lt_one (min_bi b) + have hb : b' ∈ Set.Ioo 0 1 := ⟨hb_pos, hb_lt_one⟩ + have hb' : ∀ i, b' ≤ b i := fun i => calc + b (min_bi b) / 2 ≤ b i / 2 := by gcongr; aesop + _ ≤ b i := by exact le_of_lt <| div_two_lt_of_pos (R.b_pos i) + obtain ⟨c₁, _, c₂, _, hq_poly⟩ := hq_poly b' hb + rw [isBigO_iff] + refine ⟨c₂, ?_⟩ + have h_tendsto : Tendsto (fun x => b' * x) atTop atTop := + Tendsto.const_mul_atTop hb_pos tendsto_id + filter_upwards [hq_poly.nat_cast_atTop, R.eventually_bi_mul_le_r, eventually_ge_atTop R.n₀, + eventually_gt_atTop 0, (h_tendsto.eventually_gt_atTop 1).nat_cast_atTop] with + n hn h_bi_le_r h_ge_n₀ h_n_pos h_bn + rw [norm_mul, ← mul_assoc] + refine Convex.norm_image_sub_le_of_norm_deriv_le + (s := Set.Icc (b'*n) n) (fun z hz => ?diff) (fun z hz => (hn z hz).2) + (convex_Icc _ _) ?mem_Icc <| ⟨h_bi_le_r i, by exact_mod_cast (le_of_lt (R.r_lt_n i n h_ge_n₀))⟩ + case diff => + refine hq_diff.differentiableAt (Ioi_mem_nhds ?_) + calc 1 < b' * n := by exact h_bn + _ ≤ z := hz.1 + case mem_Icc => + refine ⟨by gcongr; exact hb' i, ?_⟩ + calc b i * n ≤ 1 * n := by gcongr; exact le_of_lt <| R.b_lt_one i + _ = n := by simp + +lemma eventually_deriv_rpow_p_mul_one_sub_smoothingFn (p : ℝ) : + deriv (fun z => z ^ p * (1 - ε z)) + =ᶠ[atTop] fun z => p * z ^ (p-1) * (1 - ε z) + z ^ (p-1) / (log z ^ 2) := calc + deriv (fun x => x ^ p * (1 - ε x)) + =ᶠ[atTop] fun x => deriv (· ^ p) x * (1 - ε x) + x ^ p * deriv (1 - ε ·) x := by + filter_upwards [eventually_gt_atTop 1] with x hx + rw [deriv_mul] + · exact differentiableAt_rpow_const_of_ne _ (by positivity) + · exact differentiableAt_one_sub_smoothingFn hx + _ =ᶠ[atTop] fun x => p * x ^ (p-1) * (1 - ε x) + x ^ p * (x⁻¹ / (log x ^ 2)) := by + filter_upwards [eventually_gt_atTop 1, eventually_deriv_one_sub_smoothingFn] + with x hx hderiv + rw [hderiv, Real.deriv_rpow_const (Or.inl <| by positivity)] + _ =ᶠ[atTop] fun x => p * x ^ (p-1) * (1 - ε x) + x ^ (p-1) / (log x ^ 2) := by + filter_upwards [eventually_gt_atTop 0] with x hx + rw [mul_div, ← Real.rpow_neg_one, ← Real.rpow_add (by positivity), sub_eq_add_neg] + +lemma eventually_deriv_rpow_p_mul_one_add_smoothingFn (p : ℝ) : + deriv (fun z => z ^ p * (1 + ε z)) + =ᶠ[atTop] fun z => p * z ^ (p-1) * (1 + ε z) - z ^ (p-1) / (log z ^ 2) := calc + deriv (fun x => x ^ p * (1 + ε x)) + =ᶠ[atTop] fun x => deriv (· ^ p) x * (1 + ε x) + x ^ p * deriv (1 + ε ·) x := by + filter_upwards [eventually_gt_atTop 1] with x hx + rw [deriv_mul] + · exact differentiableAt_rpow_const_of_ne _ (by positivity) + · exact differentiableAt_one_add_smoothingFn hx + _ =ᶠ[atTop] fun x => p * x ^ (p-1) * (1 + ε x) - x ^ p * (x⁻¹ / (log x ^ 2)) := by + filter_upwards [eventually_gt_atTop 1, eventually_deriv_one_add_smoothingFn] + with x hx hderiv + simp [hderiv, Real.deriv_rpow_const (Or.inl <| by positivity), neg_div, sub_eq_add_neg] + _ =ᶠ[atTop] fun x => p * x ^ (p-1) * (1 + ε x) - x ^ (p-1) / (log x ^ 2) := by + filter_upwards [eventually_gt_atTop 0] with x hx + simp [mul_div, ← Real.rpow_neg_one, ← Real.rpow_add (by positivity), sub_eq_add_neg] + +lemma isEquivalent_deriv_rpow_p_mul_one_sub_smoothingFn {p : ℝ} (hp : p ≠ 0) : + deriv (fun z => z ^ p * (1 - ε z)) ~[atTop] fun z => p * z ^ (p-1) := calc + deriv (fun z => z ^ p * (1 - ε z)) + =ᶠ[atTop] fun z => p * z ^ (p-1) * (1 - ε z) + z^(p-1) / (log z ^ 2) := + eventually_deriv_rpow_p_mul_one_sub_smoothingFn p + _ ~[atTop] fun z => p * z ^ (p-1) := by + refine IsEquivalent.add_isLittleO ?one ?two + case one => calc + (fun z => p * z ^ (p-1) * (1 - ε z)) ~[atTop] fun z => p * z ^ (p-1) * 1 := + IsEquivalent.mul IsEquivalent.refl isEquivalent_one_sub_smoothingFn_one + _ = fun z => p * z ^ (p-1) := by ext; ring + case two => calc + (fun z => z ^ (p-1) / (log z ^ 2)) =o[atTop] fun z => z ^ (p-1) / 1 := by + simp_rw [div_eq_mul_inv] + refine IsBigO.mul_isLittleO (isBigO_refl _ _) + (IsLittleO.inv_rev ?_ (by aesop (add safe eventually_of_forall))) + rw [isLittleO_const_left] + refine Or.inr <| Tendsto.comp tendsto_norm_atTop_atTop ?_ + exact Tendsto.comp (g := fun z => z ^ 2) + (tendsto_pow_atTop (by norm_num)) tendsto_log_atTop + _ = fun z => z ^ (p-1) := by ext; simp + _ =Θ[atTop] fun z => p * z ^ (p-1) := by + exact IsTheta.const_mul_right hp <| isTheta_refl _ _ + +lemma isEquivalent_deriv_rpow_p_mul_one_add_smoothingFn {p : ℝ} (hp : p ≠ 0) : + deriv (fun z => z ^ p * (1 + ε z)) ~[atTop] fun z => p * z ^ (p-1) := calc + deriv (fun z => z ^ p * (1 + ε z)) + =ᶠ[atTop] fun z => p * z ^ (p-1) * (1 + ε z) - z ^ (p-1) / (log z ^ 2) := + eventually_deriv_rpow_p_mul_one_add_smoothingFn p + _ ~[atTop] fun z => p * z ^ (p-1) := by + refine IsEquivalent.add_isLittleO ?one ?two + case one => calc + (fun z => p * z ^ (p-1) * (1 + ε z)) ~[atTop] fun z => p * z ^ (p-1) * 1 := + IsEquivalent.mul IsEquivalent.refl isEquivalent_one_add_smoothingFn_one + _ = fun z => p * z ^ (p-1) := by ext; ring + case two => calc + (fun z => -(z ^ (p-1) / (log z ^ 2))) =o[atTop] fun z => z ^ (p-1) / 1 := by + simp_rw [isLittleO_neg_left, div_eq_mul_inv] + refine IsBigO.mul_isLittleO (isBigO_refl _ _) + (IsLittleO.inv_rev ?_ (by aesop (add safe eventually_of_forall))) + rw [isLittleO_const_left] + refine Or.inr <| Tendsto.comp tendsto_norm_atTop_atTop ?_ + exact Tendsto.comp (g := fun z => z ^ 2) + (tendsto_pow_atTop (by norm_num)) tendsto_log_atTop + _ = fun z => z ^ (p-1) := by ext; simp + _ =Θ[atTop] fun z => p * z ^ (p-1) := by + exact IsTheta.const_mul_right hp <| isTheta_refl _ _ + +lemma isTheta_deriv_rpow_p_mul_one_sub_smoothingFn {p : ℝ} (hp : p ≠ 0) : + (fun x => ‖deriv (fun z => z ^ p * (1 - ε z)) x‖) =Θ[atTop] fun z => z ^ (p-1) := by + refine IsTheta.norm_left ?_ + calc (fun x => deriv (fun z => z ^ p * (1 - ε z)) x) =Θ[atTop] fun z => p * z ^ (p-1) := + (isEquivalent_deriv_rpow_p_mul_one_sub_smoothingFn hp).isTheta + _ =Θ[atTop] fun z => z ^ (p-1) := + IsTheta.const_mul_left hp <| isTheta_refl _ _ + +lemma isTheta_deriv_rpow_p_mul_one_add_smoothingFn {p : ℝ} (hp : p ≠ 0) : + (fun x => ‖deriv (fun z => z ^ p * (1 + ε z)) x‖) =Θ[atTop] fun z => z ^ (p-1) := by + refine IsTheta.norm_left ?_ + calc (fun x => deriv (fun z => z ^ p * (1 + ε z)) x) =Θ[atTop] fun z => p * z ^ (p-1) := + (isEquivalent_deriv_rpow_p_mul_one_add_smoothingFn hp).isTheta + _ =Θ[atTop] fun z => z ^ (p-1) := + IsTheta.const_mul_left hp <| isTheta_refl _ _ + +lemma growsPolynomially_deriv_rpow_p_mul_one_sub_smoothingFn (p : ℝ) : + GrowsPolynomially fun x => ‖deriv (fun z => z ^ p * (1 - ε z)) x‖ := by + rcases eq_or_ne p 0 with hp|hp + case inl => -- p = 0 + have h₁ : (fun x => ‖deriv (fun z => z ^ p * (1 - ε z)) x‖) + =ᶠ[atTop] fun z => z⁻¹ / (log z ^ 2) := by + filter_upwards [eventually_deriv_one_sub_smoothingFn, eventually_gt_atTop 1] with x hx hx_pos + have : 0 ≤ x⁻¹ / (log x ^ 2) := by + have hlog : 0 < log x := Real.log_pos hx_pos + positivity + simp only [hp, Real.rpow_zero, one_mul, differentiableAt_const, hx, Real.norm_of_nonneg this] + refine GrowsPolynomially.congr_of_eventuallyEq h₁ ?_ + refine GrowsPolynomially.div (GrowsPolynomially.inv growsPolynomially_id) + (GrowsPolynomially.pow 2 growsPolynomially_log ?_) + filter_upwards [eventually_ge_atTop 1] with _ hx + exact log_nonneg hx + case inr => -- p ≠ 0 + refine GrowsPolynomially.of_isTheta (growsPolynomially_rpow (p-1)) + (isTheta_deriv_rpow_p_mul_one_sub_smoothingFn hp) ?_ + filter_upwards [eventually_gt_atTop 0] with _ _ + positivity + +lemma growsPolynomially_deriv_rpow_p_mul_one_add_smoothingFn (p : ℝ) : + GrowsPolynomially fun x => ‖deriv (fun z => z ^ p * (1 + ε z)) x‖ := by + rcases eq_or_ne p 0 with hp|hp + case inl => -- p = 0 + have h₁ : (fun x => ‖deriv (fun z => z ^ p * (1 + ε z)) x‖) + =ᶠ[atTop] fun z => z⁻¹ / (log z ^ 2) := by + filter_upwards [eventually_deriv_one_add_smoothingFn, eventually_gt_atTop 1] with x hx hx_pos + have : 0 ≤ x⁻¹ / (log x ^ 2) := by + have hlog : 0 < log x := Real.log_pos hx_pos + positivity + simp only [neg_div, norm_neg, hp, Real.rpow_zero, + one_mul, differentiableAt_const, hx, Real.norm_of_nonneg this] + refine GrowsPolynomially.congr_of_eventuallyEq h₁ ?_ + refine GrowsPolynomially.div (GrowsPolynomially.inv growsPolynomially_id) + (GrowsPolynomially.pow 2 growsPolynomially_log ?_) + filter_upwards [eventually_ge_atTop 1] with x hx + exact log_nonneg hx + case inr => -- p ≠ 0 + refine GrowsPolynomially.of_isTheta ((growsPolynomially_rpow (p-1))) + (isTheta_deriv_rpow_p_mul_one_add_smoothingFn hp) ?_ + filter_upwards [eventually_gt_atTop 0] with _ _ + positivity + +lemma rpow_p_mul_one_sub_smoothingFn_le : + ∀ᶠ (n : ℕ) in atTop, ∀ i, (r i n) ^ (p a b) * (1 - ε (r i n)) + ≤ (b i) ^ (p a b) * n ^ (p a b) * (1 - ε n) := by + rw [Filter.eventually_all] + intro i + let q : ℝ → ℝ := fun x => x ^ (p a b) * (1 - ε x) + have h_diff_q : DifferentiableOn ℝ q (Set.Ioi 1) := by + refine DifferentiableOn.mul + (DifferentiableOn.mono (differentiableOn_rpow_const _) fun z hz => ?_) + differentiableOn_one_sub_smoothingFn + rw [Set.mem_compl_singleton_iff] + exact ne_of_gt <| zero_lt_one.trans hz + have h_deriv_q : deriv q =O[atTop] fun x => x ^ ((p a b) - 1) := calc + deriv q = deriv fun x => (fun z => z ^ (p a b)) x * (fun z => 1 - ε z) x := by rfl + _ =ᶠ[atTop] fun x => deriv (fun z => z ^ (p a b)) x * (1 - ε x) + + x ^ (p a b) * deriv (fun z => 1 - ε z) x := by + filter_upwards [eventually_ne_atTop 0, eventually_gt_atTop 1] with x hx hx' + rw [deriv_mul] <;> aesop + _ =O[atTop] fun x => x ^ ((p a b) - 1) := by + refine IsBigO.add ?left ?right + case left => calc + (fun x => deriv (fun z => z ^ (p a b)) x * (1 - ε x)) + =O[atTop] fun x => x ^ ((p a b) - 1) * (1 - ε x) := by + exact IsBigO.mul (isBigO_deriv_rpow_const_atTop (p a b)) (isBigO_refl _ _) + _ =O[atTop] fun x => x ^ ((p a b) - 1) * 1 := by + refine IsBigO.mul (isBigO_refl _ _) + isEquivalent_one_sub_smoothingFn_one.isBigO + _ = fun x => x ^ ((p a b) - 1) := by ext; rw [mul_one] + case right => calc + (fun x => x ^ (p a b) * deriv (fun z => 1 - ε z) x) + =O[atTop] (fun x => x ^ (p a b) * x⁻¹) := by + exact IsBigO.mul (isBigO_refl _ _) isLittleO_deriv_one_sub_smoothingFn.isBigO + _ =ᶠ[atTop] fun x => x ^ ((p a b) - 1) := by + filter_upwards [eventually_gt_atTop 0] with x hx + rw [← Real.rpow_neg_one, ← Real.rpow_add hx, ← sub_eq_add_neg] + have h_main_norm : (fun (n:ℕ) => ‖q (r i n) - q (b i * n)‖) + ≤ᶠ[atTop] fun (n:ℕ) => ‖(b i) ^ (p a b) * n ^ (p a b) * (ε (b i * n) - ε n)‖ := by + refine IsLittleO.eventuallyLE ?_ + calc + (fun (n:ℕ) => q (r i n) - q (b i * n)) + =O[atTop] fun n => (deriv q n) * (r i n - b i * n) := by + exact R.isBigO_apply_r_sub_b q h_diff_q + (growsPolynomially_deriv_rpow_p_mul_one_sub_smoothingFn (p a b)) i + _ =o[atTop] fun n => (deriv q n) * (n / log n ^ 2) := by + exact IsBigO.mul_isLittleO (isBigO_refl _ _) (R.dist_r_b i) + _ =O[atTop] fun n => n^((p a b) - 1) * (n / log n ^ 2) := by + exact IsBigO.mul (IsBigO.nat_cast_atTop h_deriv_q) (isBigO_refl _ _) + _ =ᶠ[atTop] fun n => n^(p a b) / (log n) ^ 2 := by + filter_upwards [eventually_ne_atTop 0] with n hn + have hn' : (n:ℝ) ≠ 0 := by positivity + simp [← mul_div_assoc, ← Real.rpow_add_one hn'] + _ = fun (n:ℕ) => (n:ℝ) ^ (p a b) * (1 / (log n)^2) := by + simp_rw [mul_div, mul_one] + _ =Θ[atTop] fun (n:ℕ) => (b i) ^ (p a b) * n ^ (p a b) * (1 / (log n)^2) := by + refine IsTheta.symm ?_ + simp_rw [mul_assoc] + refine IsTheta.const_mul_left ?_ (isTheta_refl _ _) + have := R.b_pos i; positivity + _ =Θ[atTop] fun (n:ℕ) => (b i)^(p a b) * n^(p a b) * (ε (b i * n) - ε n) := by + exact IsTheta.symm <| IsTheta.mul (isTheta_refl _ _) + <| R.isTheta_smoothingFn_sub_self i + have h_main : (fun (n:ℕ) => q (r i n) - q (b i * n)) + ≤ᶠ[atTop] fun (n:ℕ) => (b i) ^ (p a b) * n ^ (p a b) * (ε (b i * n) - ε n) := by + calc (fun (n:ℕ) => q (r i n) - q (b i * n)) + ≤ᶠ[atTop] fun (n:ℕ) => ‖q (r i n) - q (b i * n)‖ := by + filter_upwards with _; exact le_norm_self _ + _ ≤ᶠ[atTop] fun (n:ℕ) => ‖(b i) ^ (p a b) * n ^ (p a b) * (ε (b i * n) - ε n)‖ := + h_main_norm + _ =ᶠ[atTop] fun (n:ℕ) => (b i) ^ (p a b) * n ^ (p a b) * (ε (b i * n) - ε n) := by + filter_upwards [eventually_gt_atTop ⌈(b i)⁻¹⌉₊, eventually_gt_atTop 1] with n hn hn' + refine norm_of_nonneg ?_ + have h₁ := R.b_pos i + have h₂ : 0 ≤ ε (b i * n) - ε n := by + refine sub_nonneg_of_le <| + (strictAntiOn_smoothingFn.le_iff_le ?n_gt_one ?bn_gt_one).mpr ?le + case n_gt_one => + show 1 < (n:ℝ) + rw [Nat.one_lt_cast] + exact hn' + case bn_gt_one => + calc 1 = b i * (b i)⁻¹ := by rw [mul_inv_cancel (by positivity)] + _ ≤ b i * ⌈(b i)⁻¹⌉₊ := by gcongr; exact Nat.le_ceil _ + _ < b i * n := by gcongr; rw [Nat.cast_lt]; exact hn + case le => calc b i * n ≤ 1 * n := by have := R.b_lt_one i; gcongr + _ = n := by rw [one_mul] + positivity + filter_upwards [h_main] with n hn + have h₁ : q (b i * n) + (b i) ^ (p a b) * n ^ (p a b) * (ε (b i * n) - ε n) + = (b i) ^ (p a b) * n ^ (p a b) * (1 - ε n) := by + have := R.b_pos i + simp only [mul_rpow (by positivity : (0:ℝ) ≤ b i) (by positivity : (0:ℝ) ≤ n)] + ring + show q (r i n) ≤ (b i) ^ (p a b) * n ^ (p a b) * (1 - ε n) + rw [← h₁, ← sub_le_iff_le_add'] + exact hn + +lemma rpow_p_mul_one_add_smoothingFn_ge : + ∀ᶠ (n : ℕ) in atTop, ∀ i, (b i) ^ (p a b) * n ^ (p a b) * (1 + ε n) + ≤ (r i n) ^ (p a b) * (1 + ε (r i n)) := by + rw [Filter.eventually_all] + intro i + let q : ℝ → ℝ := fun x => x ^ (p a b) * (1 + ε x) + have h_diff_q : DifferentiableOn ℝ q (Set.Ioi 1) := by + refine DifferentiableOn.mul + (DifferentiableOn.mono (differentiableOn_rpow_const _) fun z hz => ?_) + differentiableOn_one_add_smoothingFn + rw [Set.mem_compl_singleton_iff] + exact ne_of_gt <| zero_lt_one.trans hz + have h_deriv_q : deriv q =O[atTop] fun x => x ^ ((p a b) - 1) := calc + deriv q = deriv fun x => (fun z => z ^ (p a b)) x * (fun z => 1 + ε z) x := by rfl + _ =ᶠ[atTop] fun x => deriv (fun z => z ^ (p a b)) x * (1 + ε x) + + x ^ (p a b) * deriv (fun z => 1 + ε z) x := by + filter_upwards [eventually_ne_atTop 0, eventually_gt_atTop 1] with x hx hx' + rw [deriv_mul] <;> aesop + _ =O[atTop] fun x => x ^ ((p a b) - 1) := by + refine IsBigO.add ?left ?right + case left => calc + (fun x => deriv (fun z => z ^ (p a b)) x * (1 + ε x)) + =O[atTop] fun x => x ^ ((p a b) - 1) * (1 + ε x) := by + exact IsBigO.mul (isBigO_deriv_rpow_const_atTop (p a b)) (isBigO_refl _ _) + _ =O[atTop] fun x => x ^ ((p a b) - 1) * 1 := + IsBigO.mul (isBigO_refl _ _) isEquivalent_one_add_smoothingFn_one.isBigO + _ = fun x => x ^ ((p a b) - 1) := by ext; rw [mul_one] + case right => calc + (fun x => x ^ (p a b) * deriv (fun z => 1 + ε z) x) + =O[atTop] (fun x => x ^ (p a b) * x⁻¹) := by + exact IsBigO.mul (isBigO_refl _ _) + isLittleO_deriv_one_add_smoothingFn.isBigO + _ =ᶠ[atTop] fun x => x ^ ((p a b) - 1) := by + filter_upwards [eventually_gt_atTop 0] with x hx + rw [← Real.rpow_neg_one, ← Real.rpow_add hx, ← sub_eq_add_neg] + have h_main_norm : (fun (n:ℕ) => ‖q (r i n) - q (b i * n)‖) + ≤ᶠ[atTop] fun (n:ℕ) => ‖(b i) ^ (p a b) * n ^ (p a b) * (ε (b i * n) - ε n)‖ := by + refine IsLittleO.eventuallyLE ?_ + calc + (fun (n:ℕ) => q (r i n) - q (b i * n)) + =O[atTop] fun n => (deriv q n) * (r i n - b i * n) := by + exact R.isBigO_apply_r_sub_b q h_diff_q + (growsPolynomially_deriv_rpow_p_mul_one_add_smoothingFn (p a b)) i + _ =o[atTop] fun n => (deriv q n) * (n / log n ^ 2) := by + exact IsBigO.mul_isLittleO (isBigO_refl _ _) (R.dist_r_b i) + _ =O[atTop] fun n => n ^ ((p a b) - 1) * (n / log n ^ 2) := by + exact IsBigO.mul (IsBigO.nat_cast_atTop h_deriv_q) (isBigO_refl _ _) + _ =ᶠ[atTop] fun n => n ^ (p a b) / (log n) ^ 2 := by + filter_upwards [eventually_ne_atTop 0] with n hn + have hn' : (n:ℝ) ≠ 0 := by positivity + simp [← mul_div_assoc, ← Real.rpow_add_one hn'] + _ = fun (n:ℕ) => (n:ℝ) ^ (p a b) * (1 / (log n) ^ 2) := by simp_rw [mul_div, mul_one] + _ =Θ[atTop] fun (n:ℕ) => (b i) ^ (p a b) * n ^ (p a b) * (1 / (log n) ^ 2) := by + refine IsTheta.symm ?_ + simp_rw [mul_assoc] + refine IsTheta.const_mul_left ?_ (isTheta_refl _ _) + have := R.b_pos i; positivity + _ =Θ[atTop] fun (n:ℕ) => (b i) ^ (p a b) * n ^ (p a b) * (ε (b i * n) - ε n) := by + exact IsTheta.symm <| IsTheta.mul (isTheta_refl _ _) + <| R.isTheta_smoothingFn_sub_self i + have h_main : (fun (n:ℕ) => q (b i * n) - q (r i n)) + ≤ᶠ[atTop] fun (n:ℕ) => (b i) ^ (p a b) * n ^ (p a b) * (ε (b i * n) - ε n) := by + calc (fun (n:ℕ) => q (b i * n) - q (r i n)) + ≤ᶠ[atTop] fun (n:ℕ) => ‖q (r i n) - q (b i * n)‖ := by + filter_upwards with _; rw [norm_sub_rev]; exact le_norm_self _ + _ ≤ᶠ[atTop] fun (n:ℕ) => ‖(b i) ^ (p a b) * n ^ (p a b) * (ε (b i * n) - ε n)‖ := + h_main_norm + _ =ᶠ[atTop] fun (n:ℕ) => (b i) ^ (p a b) * n ^ (p a b) * (ε (b i * n) - ε n) := by + filter_upwards [eventually_gt_atTop ⌈(b i)⁻¹⌉₊, eventually_gt_atTop 1] with n hn hn' + refine norm_of_nonneg ?_ + have h₁ := R.b_pos i + have h₂ : 0 ≤ ε (b i * n) - ε n := by + refine sub_nonneg_of_le <| + (strictAntiOn_smoothingFn.le_iff_le ?n_gt_one ?bn_gt_one).mpr ?le + case n_gt_one => + show 1 < (n:ℝ) + rw [Nat.one_lt_cast] + exact hn' + case bn_gt_one => + calc 1 = b i * (b i)⁻¹ := by rw [mul_inv_cancel (by positivity)] + _ ≤ b i * ⌈(b i)⁻¹⌉₊ := by gcongr; exact Nat.le_ceil _ + _ < b i * n := by gcongr; rw [Nat.cast_lt]; exact hn + case le => calc b i * n ≤ 1 * n := by have := R.b_lt_one i; gcongr + _ = n := by rw [one_mul] + positivity + filter_upwards [h_main] with n hn + have h₁ : q (b i * n) - (b i) ^ (p a b) * n ^ (p a b) * (ε (b i * n) - ε n) + = (b i) ^ (p a b) * n ^ (p a b) * (1 + ε n) := by + have := R.b_pos i + simp only [mul_rpow (by positivity : (0:ℝ) ≤ b i) (by positivity : (0:ℝ) ≤ n)] + ring + show (b i) ^ (p a b) * n ^ (p a b) * (1 + ε n) ≤ q (r i n) + rw [← h₁, sub_le_iff_le_add', ← sub_le_iff_le_add] + exact hn + +/-! +#### Main proof + +This final section proves the Akra-Bazzi theorem. +-/ + +lemma base_nonempty {n : ℕ} (hn : 0 < n) : (Finset.Ico (⌊b (min_bi b) / 2 * n⌋₊) n).Nonempty := by + let b' := b (min_bi b) + have hb_pos : 0 < b' := R.b_pos _ + simp_rw [Finset.nonempty_Ico] + exact_mod_cast calc ⌊b' / 2 * n⌋₊ ≤ b' / 2 * n := by exact Nat.floor_le (by positivity) + _ < 1 / 2 * n := by gcongr; exact R.b_lt_one (min_bi b) + _ ≤ 1 * n := by gcongr; norm_num + _ = n := by simp + +/-- The main proof of the upper bound part of the Akra-Bazzi theorem. The factor +`1 - ε n` does not change the asymptotic order, but is needed for the induction step to go +through. -/ +lemma T_isBigO_smoothingFn_mul_asympBound : + T =O[atTop] (fun n => (1 - ε n) * asympBound g a b n) := by + let b' := b (min_bi b) / 2 + have hb_pos : 0 < b' := R.bi_min_div_two_pos + rw [isBigO_atTop_iff_eventually_exists] + obtain ⟨c₁, hc₁, h_sumTransform_aux⟩ := R.eventually_atTop_sumTransform_ge + filter_upwards [eventually_ge_atTop R.n₀, -- n₀_ge_Rn₀ + eventually_forall_ge_atTop.mpr eventually_one_sub_smoothingFn_pos, -- h_smoothing_pos + eventually_forall_ge_atTop.mpr + <| eventually_one_sub_smoothingFn_gt_const (1/2) (by norm_num), -- h_smoothing_gt_half + eventually_forall_ge_atTop.mpr R.eventually_asympBound_pos, -- h_asympBound_pos + eventually_forall_ge_atTop.mpr R.eventually_asympBound_r_pos, -- h_asympBound_r_pos + (tendsto_nat_floor_mul_atTop b' hb_pos).eventually_forall_ge_atTop + R.eventually_asympBound_pos, -- h_asympBound_floor + eventually_gt_atTop 0, -- n₀_pos + eventually_forall_ge_atTop.mpr R.eventually_one_sub_smoothingFn_r_pos, -- h_smoothing_r_pos + eventually_forall_ge_atTop.mpr R.rpow_p_mul_one_sub_smoothingFn_le, -- bound1 + (tendsto_nat_floor_mul_atTop b' hb_pos).eventually_forall_ge_atTop + eventually_one_sub_smoothingFn_pos, -- h_smoothingFn_floor + eventually_forall_ge_atTop.mpr h_sumTransform_aux, -- h_sumTransform + eventually_forall_ge_atTop.mpr R.eventually_bi_mul_le_r] -- h_bi_le_r + with n₀ n₀_ge_Rn₀ h_smoothing_pos h_smoothing_gt_half + h_asympBound_pos h_asympBound_r_pos h_asympBound_floor n₀_pos h_smoothing_r_pos + bound1 h_smoothingFn_floor h_sumTransform h_bi_le_r + -- Max of the ratio `T(n) / asympBound(n)` over the base case `n ∈ [b * n₀, n₀)` + have h_base_nonempty := R.base_nonempty n₀_pos + let base_max : ℝ := + (Finset.Ico (⌊b' * n₀⌋₊) n₀).sup' h_base_nonempty + fun n => T n / ((1 - ε n) * asympBound g a b n) + -- The big-O constant we are aiming for: max of the base case ratio and what we need to + -- cancel out the `g(n)` term in the calculation below + set C := max (2 * c₁⁻¹) base_max with hC + refine ⟨C, fun n hn => ?_⟩ + -- Base case: statement is true for `b' * n₀ ≤ n < n₀` + have h_base : ∀ n ∈ Finset.Ico (⌊b' * n₀⌋₊) n₀, T n ≤ C * ((1 - ε n) * asympBound g a b n) := by + intro n hn + rw [Finset.mem_Ico] at hn + have htmp1 : 0 < 1 - ε n := h_smoothingFn_floor n hn.1 + have htmp2 : 0 < asympBound g a b n := h_asympBound_floor n hn.1 + rw [← _root_.div_le_iff (by positivity)] + rw [← Finset.mem_Ico] at hn + calc T n / ((1 - ε ↑n) * asympBound g a b n) + ≤ (Finset.Ico (⌊b' * n₀⌋₊) n₀).sup' h_base_nonempty + (fun z => T z / ((1 - ε z) * asympBound g a b z)) := + Finset.le_sup'_of_le _ (b := n) hn <| le_refl _ + _ ≤ C := le_max_right _ _ + have h_asympBound_pos' : 0 < asympBound g a b n := h_asympBound_pos n hn + have h_one_sub_smoothingFn_pos' : 0 < 1 - ε n := h_smoothing_pos n hn + rw [Real.norm_of_nonneg (R.T_nonneg n), Real.norm_of_nonneg (by positivity)] + -- We now prove all other cases by induction + induction n using Nat.strongInductionOn + case ind n h_ind => + have b_mul_n₀_le_ri i : ⌊b' * ↑n₀⌋₊ ≤ r i n := by + exact_mod_cast calc ⌊b' * (n₀ : ℝ)⌋₊ ≤ b' * n₀ := Nat.floor_le <| by positivity + _ ≤ b' * n := by gcongr + _ ≤ r i n := h_bi_le_r n hn i + have g_pos : 0 ≤ g n := R.g_nonneg n (by positivity) + calc + T n = (∑ i, a i * T (r i n)) + g n := by exact R.h_rec n <| n₀_ge_Rn₀.trans hn + _ ≤ (∑ i, a i * (C * ((1 - ε (r i n)) * asympBound g a b (r i n)))) + g n := by + -- Apply the induction hypothesis, or use the base case depending on how large n is + gcongr (∑ i, a i * ?_) + g n with i _ + · exact le_of_lt <| R.a_pos _ + · by_cases ri_lt_n₀ : r i n < n₀ + case pos => exact h_base _ <| by aesop + case neg => + push_neg at ri_lt_n₀ + exact h_ind (r i n) (R.r_lt_n _ _ (n₀_ge_Rn₀.trans hn)) ri_lt_n₀ + (h_asympBound_r_pos _ hn _) (h_smoothing_r_pos n hn i) + _ = (∑ i, a i * (C * ((1 - ε (r i n)) * ((r i n) ^ (p a b) + * (1 + (∑ u in range (r i n), g u / u ^ ((p a b) + 1))))))) + g n := by + simp_rw [asympBound_def'] + _ = (∑ i, C * a i * ((r i n) ^ (p a b) * (1 - ε (r i n)) + * ((1 + (∑ u in range (r i n), g u / u ^ ((p a b) + 1)))))) + g n := by + congr; ext; ring + _ ≤ (∑ i, C * a i * ((b i) ^ (p a b) * n ^ (p a b) * (1 - ε n) + * ((1 + (∑ u in range (r i n), g u / u ^ ((p a b) + 1)))))) + g n := by + gcongr (∑ i, C * a i * (?_ + * ((1 + (∑ u in range (r i n), g u / u ^ ((p a b) + 1)))))) + g n with i + · have := R.a_pos i + positivity + · refine add_nonneg zero_le_one <| Finset.sum_nonneg fun j _ => ?_ + rw [div_nonneg_iff] + exact Or.inl ⟨R.g_nonneg j (by positivity), by positivity⟩ + · exact bound1 n hn i + _ = (∑ i, C * a i * ((b i) ^ (p a b) * n ^ (p a b) * (1 - ε n) + * ((1 + ((∑ u in range n, g u / u ^ ((p a b) + 1)) + - (∑ u in Finset.Ico (r i n) n, g u / u ^ ((p a b) + 1))))))) + g n := by + congr; ext i; congr + refine eq_sub_of_add_eq ?_ + rw [add_comm] + exact add_eq_of_eq_sub <| Finset.sum_Ico_eq_sub _ + <| le_of_lt <| R.r_lt_n i n <| n₀_ge_Rn₀.trans hn + _ = (∑ i, C * a i * ((b i) ^ (p a b) * (1 - ε n) * ((n ^ (p a b) + * (1 + (∑ u in range n, g u / u ^ ((p a b) + 1))) + - n ^ (p a b) * (∑ u in Finset.Ico (r i n) n, g u / u ^ ((p a b) + 1)))))) + + g n := by + congr; ext; ring + _ = (∑ i, C * a i * ((b i) ^ (p a b) * (1 - ε n) + * ((asympBound g a b n - sumTransform (p a b) g (r i n) n)))) + g n := by + simp_rw [asympBound_def', sumTransform_def] + _ ≤ (∑ i, C * a i * ((b i) ^ (p a b) * (1 - ε n) + * ((asympBound g a b n - c₁ * g n)))) + g n := by + gcongr with i + · have := R.a_pos i + positivity + · have := R.b_pos i + positivity + · exact h_sumTransform n hn i + _ = (∑ i, C * (1 - ε n) * ((asympBound g a b n - c₁ * g n)) + * (a i * (b i) ^ (p a b))) + g n := by + congr; ext; ring + _ = C * (1 - ε n) * (asympBound g a b n - c₁ * g n) + g n := by + rw [← Finset.mul_sum, R.sumCoeffsExp_p_eq_one, mul_one] + _ = C * (1 - ε n) * asympBound g a b n + (1 - C * c₁ * (1 - ε n)) * g n := by ring + _ ≤ C * (1 - ε n) * asympBound g a b n + 0 := by + gcongr + refine mul_nonpos_of_nonpos_of_nonneg ?_ g_pos + rw [sub_nonpos] + calc 1 ≤ 2 * (c₁⁻¹ * c₁) * (1/2) := by + rw [inv_mul_cancel (by positivity : c₁ ≠ 0)]; norm_num + _ = (2 * c₁⁻¹) * c₁ * (1/2) := by ring + _ ≤ C * c₁ * (1 - ε n) := by gcongr + · rw [hC]; exact le_max_left _ _ + · exact le_of_lt <| h_smoothing_gt_half n hn + _ = C * ((1 - ε n) * asympBound g a b n) := by ring + +/-- The main proof of the lower bound part of the Akra-Bazzi theorem. The factor +`1 + ε n` does not change the asymptotic order, but is needed for the induction step to go +through. -/ +lemma smoothingFn_mul_asympBound_isBigO_T : + (fun (n : ℕ) => (1 + ε n) * asympBound g a b n) =O[atTop] T := by + let b' := b (min_bi b) / 2 + have hb_pos : 0 < b' := R.bi_min_div_two_pos + rw [isBigO_atTop_iff_eventually_exists_pos] + obtain ⟨c₁, hc₁, h_sumTransform_aux⟩ := R.eventually_atTop_sumTransform_le + filter_upwards [eventually_ge_atTop R.n₀, -- n₀_ge_Rn₀ + (tendsto_nat_floor_mul_atTop b' hb_pos).eventually_gt_atTop 0, -- h_b_floor + eventually_forall_ge_atTop.mpr eventually_one_add_smoothingFn_pos, -- h_smoothing_pos + (tendsto_nat_floor_mul_atTop b' hb_pos).eventually_forall_ge_atTop + eventually_one_add_smoothingFn_pos, -- h_smoothing_pos' + eventually_forall_ge_atTop.mpr R.eventually_asympBound_pos, -- h_asympBound_pos + eventually_forall_ge_atTop.mpr R.eventually_asympBound_r_pos, -- h_asympBound_r_pos + (tendsto_nat_floor_mul_atTop b' hb_pos).eventually_forall_ge_atTop + R.eventually_asympBound_pos, -- h_asympBound_floor + eventually_gt_atTop 0, -- n₀_pos + eventually_forall_ge_atTop.mpr R.eventually_one_add_smoothingFn_r_pos, -- h_smoothing_r_pos + eventually_forall_ge_atTop.mpr R.rpow_p_mul_one_add_smoothingFn_ge, -- bound2 + (tendsto_nat_floor_mul_atTop b' hb_pos).eventually_forall_ge_atTop + eventually_one_add_smoothingFn_pos, -- h_smoothingFn_floor + eventually_forall_ge_atTop.mpr h_sumTransform_aux, -- h_sumTransform + eventually_forall_ge_atTop.mpr R.eventually_bi_mul_le_r, -- h_bi_le_r + eventually_forall_ge_atTop.mpr (eventually_ge_atTop ⌈exp 1⌉₊)] -- h_exp + with n₀ n₀_ge_Rn₀ h_b_floor h_smoothing_pos h_smoothing_pos' h_asympBound_pos h_asympBound_r_pos + h_asympBound_floor n₀_pos h_smoothing_r_pos bound2 h_smoothingFn_floor h_sumTransform + h_bi_le_r h_exp + have h_base_nonempty := R.base_nonempty n₀_pos + -- Min of the ratio T(n) / asympBound(n) over the base case n ∈ [b * n₀, n₀) + set base_min : ℝ := + (Finset.Ico (⌊b' * n₀⌋₊) n₀).inf' h_base_nonempty + (fun n => T n / ((1 + ε n) * asympBound g a b n)) with base_min_def + -- The big-O constant we are aiming for: min of the base case ratio and what we need to cancel + -- out the g(n) term in the calculation below + let C := min (2 * c₁)⁻¹ base_min + have hC_pos : 0 < C := by + refine lt_min (by positivity) ?_ + obtain ⟨m, hm_mem, hm⟩ := + Finset.exists_mem_eq_inf' h_base_nonempty (fun n => T n / ((1 + ε n) * asympBound g a b n)) + calc 0 < T m / ((1 + ε m) * asympBound g a b m) := by + have H₁ : 0 < T m := by exact R.T_pos _ + have H₂ : 0 < 1 + ε m := by rw [Finset.mem_Ico] at hm_mem + exact h_smoothing_pos' m hm_mem.1 + have H₃ : 0 < asympBound g a b m := by + refine R.asympBound_pos m ?_ + calc 0 < ⌊b' * n₀⌋₊ := by exact h_b_floor + _ ≤ m := by rw [Finset.mem_Ico] at hm_mem; exact hm_mem.1 + positivity + _ = base_min := by rw [base_min_def, hm] + refine ⟨C, hC_pos, fun n hn => ?_⟩ + -- Base case: statement is true for `b' * n₀ ≤ n < n₀` + have h_base : ∀ n ∈ Finset.Ico (⌊b' * n₀⌋₊) n₀, C * ((1 + ε n) * asympBound g a b n) ≤ T n := by + intro n hn + rw [Finset.mem_Ico] at hn + have htmp1 : 0 < 1 + ε n := h_smoothingFn_floor n hn.1 + have htmp2 : 0 < asympBound g a b n := h_asympBound_floor n hn.1 + rw [← _root_.le_div_iff (by positivity)] + rw [← Finset.mem_Ico] at hn + calc T n / ((1 + ε ↑n) * asympBound g a b n) + ≥ (Finset.Ico (⌊b' * n₀⌋₊) n₀).inf' h_base_nonempty + fun z => T z / ((1 + ε z) * asympBound g a b z) := + Finset.inf'_le_of_le _ (b := n) hn <| le_refl _ + _ ≥ C := min_le_right _ _ + have h_asympBound_pos' : 0 < asympBound g a b n := h_asympBound_pos n hn + have h_one_sub_smoothingFn_pos' : 0 < 1 + ε n := h_smoothing_pos n hn + rw [Real.norm_of_nonneg (R.T_nonneg n), Real.norm_of_nonneg (by positivity)] + -- We now prove all other cases by induction + induction n using Nat.strongInductionOn + case ind n h_ind => + have b_mul_n₀_le_ri i : ⌊b' * ↑n₀⌋₊ ≤ r i n := by + exact_mod_cast calc ⌊b' * ↑n₀⌋₊ ≤ b' * n₀ := Nat.floor_le <| by positivity + _ ≤ b' * n := by gcongr + _ ≤ r i n := h_bi_le_r n hn i + have g_pos : 0 ≤ g n := R.g_nonneg n (by positivity) + calc + T n = (∑ i, a i * T (r i n)) + g n := by exact R.h_rec n <| n₀_ge_Rn₀.trans hn + _ ≥ (∑ i, a i * (C * ((1 + ε (r i n)) * asympBound g a b (r i n)))) + g n := by + -- Apply the induction hypothesis, or use the base case depending on how large `n` is + gcongr (∑ i, a i * ?_) + g n with i _ + · exact le_of_lt <| R.a_pos _ + · rcases lt_or_le (r i n) n₀ with ri_lt_n₀ | n₀_le_ri + case inl => exact h_base _ <| Finset.mem_Ico.mpr ⟨b_mul_n₀_le_ri i, ri_lt_n₀⟩ + case inr => + exact h_ind (r i n) (R.r_lt_n _ _ (n₀_ge_Rn₀.trans hn)) n₀_le_ri + (h_asympBound_r_pos _ hn _) (h_smoothing_r_pos n hn i) + _ = (∑ i, a i * (C * ((1 + ε (r i n)) * ((r i n) ^ (p a b) + * (1 + (∑ u in range (r i n), g u / u ^ ((p a b) + 1))))))) + g n := by + simp_rw [asympBound_def'] + _ = (∑ i, C * a i * ((r i n)^(p a b) * (1 + ε (r i n)) + * ((1 + (∑ u in range (r i n), g u / u ^ ((p a b) + 1)))))) + g n := by + congr; ext; ring + _ ≥ (∑ i, C * a i * ((b i) ^ (p a b) * n ^ (p a b) * (1 + ε n) + * ((1 + (∑ u in range (r i n), g u / u ^ ((p a b) + 1)))))) + g n := by + gcongr (∑ i, C * a i * (?_ * + ((1 + (∑ u in range (r i n), g u / u ^ ((p a b) + 1)))))) + g n with i + · have := R.a_pos i + positivity + · refine add_nonneg zero_le_one <| Finset.sum_nonneg fun j _ => ?_ + rw [div_nonneg_iff] + exact Or.inl ⟨R.g_nonneg j (by positivity), by positivity⟩ + · exact bound2 n hn i + _ = (∑ i, C * a i * ((b i) ^ (p a b) * n ^ (p a b) * (1 + ε n) + * ((1 + ((∑ u in range n, g u / u ^ ((p a b) + 1)) + - (∑ u in Finset.Ico (r i n) n, g u / u ^ ((p a b) + 1))))))) + g n := by + congr; ext i; congr + refine eq_sub_of_add_eq ?_ + rw [add_comm] + exact add_eq_of_eq_sub <| Finset.sum_Ico_eq_sub _ + <| le_of_lt <| R.r_lt_n i n <| n₀_ge_Rn₀.trans hn + _ = (∑ i, C * a i * ((b i) ^ (p a b) * (1 + ε n) + * ((n ^ (p a b) * (1 + (∑ u in range n, g u / u ^ ((p a b) + 1))) + - n ^ (p a b) * (∑ u in Finset.Ico (r i n) n, g u / u ^ ((p a b) + 1)))))) + + g n := by + congr; ext; ring + _ = (∑ i, C * a i * ((b i) ^ (p a b) * (1 + ε n) + * ((asympBound g a b n - sumTransform (p a b) g (r i n) n)))) + g n := by + simp_rw [asympBound_def', sumTransform_def] + _ ≥ (∑ i, C * a i * ((b i) ^ (p a b) * (1 + ε n) + * ((asympBound g a b n - c₁ * g n)))) + g n := by + gcongr with i + · have := R.a_pos i + positivity + · have := R.b_pos i + positivity + · exact h_sumTransform n hn i + _ = (∑ i, C * (1 + ε n) * ((asympBound g a b n - c₁ * g n)) + * (a i * (b i) ^ (p a b))) + g n := by congr; ext; ring + _ = C * (1 + ε n) * (asympBound g a b n - c₁ * g n) + g n := by + rw [← Finset.mul_sum, R.sumCoeffsExp_p_eq_one, mul_one] + _ = C * (1 + ε n) * asympBound g a b n + (1 - C * c₁ * (1 + ε n)) * g n := by ring + _ ≥ C * (1 + ε n) * asympBound g a b n + 0 := by + gcongr + refine mul_nonneg ?_ g_pos + rw [sub_nonneg] + calc C * c₁ * (1 + ε n) ≤ C * c₁ * 2 := by + gcongr + refine one_add_smoothingFn_le_two ?_ + calc exp 1 ≤ ⌈exp 1⌉₊ := by exact Nat.le_ceil _ + _ ≤ n := by exact_mod_cast h_exp n hn + _ = C * (2 * c₁) := by ring + _ ≤ (2 * c₁)⁻¹ * (2 * c₁) := by gcongr; exact min_le_left _ _ + _ = c₁⁻¹ * c₁ := by ring + _ = 1 := inv_mul_cancel (by positivity) + _ = C * ((1 + ε n) * asympBound g a b n) := by ring + +/-- The **Akra-Bazzi theorem**: `T ∈ O(n^p (1 + ∑_u^n g(u) / u^{p+1}))` -/ +theorem isBigO_asympBound : T =O[atTop] asympBound g a b := by + calc T =O[atTop] (fun n => (1 - ε n) * asympBound g a b n) := by + exact R.T_isBigO_smoothingFn_mul_asympBound + _ =O[atTop] (fun n => 1 * asympBound g a b n) := by + refine IsBigO.mul (isBigO_const_of_tendsto (y := 1) ?_ one_ne_zero) + (isBigO_refl _ _) + show Tendsto ((fun n => 1 - ε n) ∘ (Nat.cast : ℕ → ℝ)) atTop (𝓝 1) + exact Tendsto.comp isEquivalent_one_sub_smoothingFn_one.tendsto_const + tendsto_nat_cast_atTop_atTop + _ = asympBound g a b := by simp + +/-- The **Akra-Bazzi theorem**: `T ∈ Ω(n^p (1 + ∑_u^n g(u) / u^{p+1}))` -/ +theorem isBigO_symm_asympBound : asympBound g a b =O[atTop] T := by + calc asympBound g a b = (fun n => 1 * asympBound g a b n) := by simp + _ ~[atTop] (fun n => (1 + ε n) * asympBound g a b n) := by + refine IsEquivalent.mul (IsEquivalent.symm ?_) IsEquivalent.refl + show (fun (n:ℕ) => 1 + ε n) ~[atTop] Function.const ℕ (1:ℝ) + rw [isEquivalent_const_iff_tendsto one_ne_zero] + show Tendsto ((fun n => 1 + ε n) ∘ (Nat.cast : ℕ → ℝ)) atTop (𝓝 1) + exact Tendsto.comp isEquivalent_one_add_smoothingFn_one.tendsto_const + tendsto_nat_cast_atTop_atTop + _ =O[atTop] T := R.smoothingFn_mul_asympBound_isBigO_T + +/-- The **Akra-Bazzi theorem**: `T ∈ Θ(n^p (1 + ∑_u^n g(u) / u^{p+1}))` -/ +theorem isTheta_asympBound : T =Θ[atTop] asympBound g a b := + ⟨R.isBigO_asympBound, R.isBigO_symm_asympBound⟩ + +end AkraBazziRecurrence diff --git a/Mathlib/Computability/AkraBazzi/GrowsPolynomially.lean b/Mathlib/Computability/AkraBazzi/GrowsPolynomially.lean new file mode 100644 index 0000000000000..24fd536352dc4 --- /dev/null +++ b/Mathlib/Computability/AkraBazzi/GrowsPolynomially.lean @@ -0,0 +1,721 @@ +/- +Copyright (c) 2023 Frédéric Dupuis. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Frédéric Dupuis +-/ + +import Mathlib.Analysis.Asymptotics.AsymptoticEquivalent +import Mathlib.Analysis.SpecialFunctions.Pow.Real +import Mathlib.Order.Filter.EventuallyConst +import Mathlib.Algebra.Order.ToIntervalMod +import Mathlib.Analysis.SpecialFunctions.Log.Base + +/-! +# Akra-Bazzi theorem: The polynomial growth condition + +This file defines and develops an API for the polynomial growth condition that appears in the +statement of the Akra-Bazzi theorem: for the Akra-Bazzi theorem to hold, the function `g` must +satisfy the condition that `c₁ g(n) ≤ g(u) ≤ c₂ g(n)`, for u between b*n and n for any constant +`b ∈ (0,1)`. + +## Implementation notes + +Our definition states that the condition must hold for any `b ∈ (0,1)`. This is equivalent to +only requiring it for `b = 1/2` or any other particular value between 0 and 1. While this +could in principle make it harder to prove that a particular function grows polynomially, +this issue doesn't seem to arise in practice. + +-/ + +open Finset Real Filter Asymptotics BigOperators +open scoped Topology + +namespace AkraBazziRecurrence + +/-- The growth condition that the function `g` must satisfy for the Akra-Bazzi theorem to apply. +It roughly states that `c₁ g(n) ≤ g(u) ≤ c₂ g(n)`, for `u` between `b*n` and `n` for any +constant `b ∈ (0,1)`. -/ +def GrowsPolynomially (f : ℝ → ℝ) : Prop := + ∀ b ∈ Set.Ioo 0 1, ∃ c₁ > 0, ∃ c₂ > 0, + ∀ᶠ x in atTop, ∀ u ∈ Set.Icc (b * x) x, f u ∈ Set.Icc (c₁ * (f x)) (c₂ * f x) + +namespace GrowsPolynomially + +lemma congr_of_eventuallyEq {f g : ℝ → ℝ} (hfg : f =ᶠ[atTop] g) (hg : GrowsPolynomially g) : + GrowsPolynomially f := by + intro b hb + have hg' := hg b hb + obtain ⟨c₁, hc₁_mem, c₂, hc₂_mem, hg'⟩ := hg' + refine ⟨c₁, hc₁_mem, c₂, hc₂_mem, ?_⟩ + filter_upwards [hg', (tendsto_id.const_mul_atTop hb.1).eventually_forall_ge_atTop hfg, hfg] + with x hx₁ hx₂ hx₃ + intro u hu + rw [hx₂ u hu.1, hx₃] + exact hx₁ u hu + +lemma iff_eventuallyEq {f g : ℝ → ℝ} (h : f =ᶠ[atTop] g) : + GrowsPolynomially f ↔ GrowsPolynomially g := + ⟨fun hf => congr_of_eventuallyEq h.symm hf, fun hg => congr_of_eventuallyEq h hg⟩ + +variable {f : ℝ → ℝ} + +lemma eventually_atTop_le {b : ℝ} (hb : b ∈ Set.Ioo 0 1) (hf : GrowsPolynomially f) : + ∃ c > 0, ∀ᶠ x in atTop, ∀ u ∈ Set.Icc (b * x) x, f u ≤ c * f x := by + obtain ⟨c₁, _, c₂, hc₂, h⟩ := hf b hb + refine ⟨c₂, hc₂, ?_⟩ + filter_upwards [h] + exact fun _ H u hu => (H u hu).2 + +lemma eventually_atTop_le_nat {b : ℝ} (hb : b ∈ Set.Ioo 0 1) (hf : GrowsPolynomially f) : + ∃ c > 0, ∀ᶠ (n:ℕ) in atTop, ∀ u ∈ Set.Icc (b * n) n, f u ≤ c * f n := by + obtain ⟨c, hc_mem, hc⟩ := hf.eventually_atTop_le hb + exact ⟨c, hc_mem, hc.nat_cast_atTop⟩ + +lemma eventually_atTop_ge {b : ℝ} (hb : b ∈ Set.Ioo 0 1) (hf : GrowsPolynomially f) : + ∃ c > 0, ∀ᶠ x in atTop, ∀ u ∈ Set.Icc (b * x) x, c * f x ≤ f u := by + obtain ⟨c₁, hc₁, c₂, _, h⟩ := hf b hb + refine ⟨c₁, hc₁, ?_⟩ + filter_upwards [h] + exact fun _ H u hu => (H u hu).1 + +lemma eventually_atTop_ge_nat {b : ℝ} (hb : b ∈ Set.Ioo 0 1) (hf : GrowsPolynomially f) : + ∃ c > 0, ∀ᶠ (n:ℕ) in atTop, ∀ u ∈ Set.Icc (b * n) n, c * f n ≤ f u := by + obtain ⟨c, hc_mem, hc⟩ := hf.eventually_atTop_ge hb + exact ⟨c, hc_mem, hc.nat_cast_atTop⟩ + +lemma eventually_zero_of_frequently_zero (hf : GrowsPolynomially f) (hf' : ∃ᶠ x in atTop, f x = 0) : + ∀ᶠ x in atTop, f x = 0 := by + obtain ⟨c₁, hc₁_mem, c₂, hc₂_mem, hf⟩ := hf (1/2) (by norm_num) + rw [frequently_atTop] at hf' + filter_upwards [eventually_forall_ge_atTop.mpr hf, eventually_gt_atTop 0] with x hx hx_pos + obtain ⟨x₀, hx₀_ge, hx₀⟩ := hf' (max x 1) + have x₀_pos := calc + 0 < 1 := by norm_num + _ ≤ x₀ := le_of_max_le_right hx₀_ge + have hmain : ∀ (m : ℕ) (z : ℝ), x ≤ z → + z ∈ Set.Icc ((2:ℝ)^(-(m:ℤ) -1) * x₀) ((2:ℝ)^(-(m:ℤ)) * x₀) → f z = 0 := by + intro m + induction m + case zero => + simp only [Nat.zero_eq, CharP.cast_eq_zero, neg_zero, zero_sub, zpow_zero, one_mul] at * + specialize hx x₀ (le_of_max_le_left hx₀_ge) + simp only [hx₀, mul_zero, Set.Icc_self, Set.mem_singleton_iff] at hx + refine fun z _ hz => hx _ ?_ + simp only [zpow_neg, zpow_one] at hz + simp only [one_div, hz] + case succ k ih => + intro z hxz hz + simp only [Nat.succ_eq_add_one, Nat.cast_add, Nat.cast_one] at * + have hx' : x ≤ (2:ℝ)^(-(k:ℤ) - 1) * x₀ := by + calc x ≤ z := hxz + _ ≤ _ := by simp only [neg_add, ← sub_eq_add_neg] at hz; exact hz.2 + specialize hx ((2:ℝ)^(-(k:ℤ) - 1) * x₀) hx' z + specialize ih ((2:ℝ)^(-(k:ℤ) - 1) * x₀) hx' ?ineq + case ineq => + rw [Set.left_mem_Icc] + gcongr + · norm_num + · linarith + simp only [ih, mul_zero, Set.Icc_self, Set.mem_singleton_iff] at hx + refine hx ⟨?lb₁, ?ub₁⟩ + case lb₁ => + rw [one_div, ← zpow_neg_one, ← mul_assoc, ← zpow_add₀ (by norm_num)] + have h₁ : (-1 : ℤ) + (-k - 1) = -k - 2 := by ring + have h₂ : -(k + (1:ℤ)) - 1 = -k - 2 := by ring + rw [h₁] + rw [h₂] at hz + exact hz.1 + case ub₁ => + have := hz.2 + simp only [neg_add, ← sub_eq_add_neg] at this + exact this + refine hmain ⌊-logb 2 (x / x₀)⌋₊ x le_rfl ⟨?lb, ?ub⟩ + case lb => + rw [← le_div_iff x₀_pos] + refine (logb_le_logb (b := 2) (by norm_num) (zpow_pos_of_pos (by norm_num) _) + (by positivity)).mp ?_ + rw [← rpow_int_cast, logb_rpow (by norm_num) (by norm_num), ← neg_le_neg_iff] + simp only [Int.cast_sub, Int.cast_neg, Int.cast_ofNat, Int.cast_one, neg_sub, sub_neg_eq_add] + calc -logb 2 (x/x₀) ≤ ⌈-logb 2 (x/x₀)⌉₊ := Nat.le_ceil (-logb 2 (x / x₀)) + _ ≤ _ := by rw [add_comm]; exact_mod_cast Nat.ceil_le_floor_add_one _ + case ub => + rw [← div_le_iff x₀_pos] + refine (logb_le_logb (b := 2) (by norm_num) (by positivity) + (zpow_pos_of_pos (by norm_num) _)).mp ?_ + rw [← rpow_int_cast, logb_rpow (by norm_num) (by norm_num), ← neg_le_neg_iff] + simp only [Int.cast_neg, Int.cast_ofNat, neg_neg] + have : 0 ≤ -logb 2 (x / x₀) := by + rw [neg_nonneg] + refine logb_nonpos (by norm_num) (by positivity) ?_ + rw [div_le_one x₀_pos] + exact le_of_max_le_left hx₀_ge + exact_mod_cast Nat.floor_le this + +lemma eventually_atTop_nonneg_or_nonpos (hf : GrowsPolynomially f) : + (∀ᶠ x in atTop, 0 ≤ f x) ∨ (∀ᶠ x in atTop, f x ≤ 0) := by + obtain ⟨c₁, _, c₂, _, h⟩ := hf (1/2) (by norm_num) + rcases lt_trichotomy c₁ c₂ with hlt|heq|hgt + case inl => -- c₁ < c₂ + left + filter_upwards [h, eventually_ge_atTop 0] with x hx hx_nonneg + have h' : 3 / 4 * x ∈ Set.Icc (1 / 2 * x) x := by + rw [Set.mem_Icc] + exact ⟨by gcongr ?_ * x; norm_num, by linarith⟩ + have hu := hx (3/4 * x) h' + have hu := Set.nonempty_of_mem hu + rw [Set.nonempty_Icc] at hu + have hu' : 0 ≤ (c₂ - c₁) * f x := by linarith + exact nonneg_of_mul_nonneg_right hu' (by linarith) + case inr.inr => -- c₂ < c₁ + right + filter_upwards [h, eventually_ge_atTop 0] with x hx hx_nonneg + have h' : 3 / 4 * x ∈ Set.Icc (1 / 2 * x) x := by + rw [Set.mem_Icc] + exact ⟨by gcongr ?_ * x; norm_num, by linarith⟩ + have hu := hx (3/4 * x) h' + have hu := Set.nonempty_of_mem hu + rw [Set.nonempty_Icc] at hu + have hu' : (c₁ - c₂) * f x ≤ 0 := by linarith + exact nonpos_of_mul_nonpos_right hu' (by linarith) + case inr.inl => -- c₁ = c₂ + have hmain : ∃ c, ∀ᶠ x in atTop, f x = c := by + simp only [heq, Set.Icc_self, Set.mem_singleton_iff, one_mul] at h + rw [eventually_atTop] at h + obtain ⟨n₀, hn₀⟩ := h + refine ⟨f (max n₀ 2), ?_⟩ + rw [eventually_atTop] + refine ⟨max n₀ 2, ?_⟩ + refine Real.induction_Ico_mul _ 2 (by norm_num) (by positivity) ?base ?step + case base => + intro x ⟨hxlb, hxub⟩ + have h₁ := calc n₀ ≤ 1 * max n₀ 2 := by simp + _ ≤ 2 * max n₀ 2 := by gcongr; norm_num + have h₂ := hn₀ (2 * max n₀ 2) h₁ (max n₀ 2) ⟨by simp [-max_le_iff, hxlb], by linarith⟩ + rw [h₂] + exact hn₀ (2 * max n₀ 2) h₁ x ⟨by simp [-max_le_iff, hxlb], le_of_lt hxub⟩ + case step => + intro n hn hyp_ind z hz + have z_nonneg : 0 ≤ z := by + calc (0:ℝ) ≤ (2:ℝ)^n * max n₀ 2 := by + exact mul_nonneg (pow_nonneg (by norm_num) _) (by norm_num) + _ ≤ z := by exact_mod_cast hz.1 + have le_2n : max n₀ 2 ≤ (2:ℝ)^n * max n₀ 2 := by + nth_rewrite 1 [← one_mul (max n₀ 2)] + gcongr + exact one_le_pow_of_one_le (by norm_num : (1:ℝ) ≤ 2) _ + have n₀_le_z : n₀ ≤ z := by + calc n₀ ≤ max n₀ 2 := by simp + _ ≤ (2:ℝ)^n * max n₀ 2 := le_2n + _ ≤ _ := by exact_mod_cast hz.1 + have fz_eq_c₂fz : f z = c₂ * f z := hn₀ z n₀_le_z z ⟨by linarith, le_rfl⟩ + have z_to_half_z' : f (1/2 * z) = c₂ * f z := hn₀ z n₀_le_z (1/2 * z) ⟨le_rfl, by linarith⟩ + have z_to_half_z : f (1/2 * z) = f z := by rwa [← fz_eq_c₂fz] at z_to_half_z' + have half_z_to_base : f (1/2 * z) = f (max n₀ 2) := by + refine hyp_ind (1/2 * z) ⟨?lb, ?ub⟩ + case lb => + calc max n₀ 2 ≤ ((1:ℝ)/(2:ℝ)) * (2:ℝ) ^ 1 * max n₀ 2 := by simp + _ ≤ ((1:ℝ)/(2:ℝ)) * (2:ℝ) ^ n * max n₀ 2 := by gcongr; norm_num + _ ≤ _ := by rw [mul_assoc]; gcongr; exact_mod_cast hz.1 + case ub => + have h₁ : (2:ℝ)^n = ((1:ℝ)/(2:ℝ)) * (2:ℝ)^(n+1) := by + rw [one_div, pow_add, pow_one] + ring + rw [h₁, mul_assoc] + gcongr + exact_mod_cast hz.2 + rw [← z_to_half_z, half_z_to_base] + obtain ⟨c, hc⟩ := hmain + rcases le_or_lt 0 c with hpos|hneg + case inl => + exact Or.inl <| by filter_upwards [hc] with _ hc; simpa only [hc] + case inr => + right + filter_upwards [hc] with x hc + exact le_of_lt <| by simpa only [hc] + +lemma eventually_atTop_zero_or_pos_or_neg (hf : GrowsPolynomially f) : + (∀ᶠ x in atTop, f x = 0) ∨ (∀ᶠ x in atTop, 0 < f x) ∨ (∀ᶠ x in atTop, f x < 0) := by + by_cases h : ∃ᶠ x in atTop, f x = 0 + case pos => exact Or.inl <| eventually_zero_of_frequently_zero hf h + case neg => + rw [not_frequently] at h + push_neg at h + rcases eventually_atTop_nonneg_or_nonpos hf with h'|h' + case inl => + refine Or.inr (Or.inl ?_) + simp only [lt_iff_le_and_ne] + rw [eventually_and] + exact ⟨h', by filter_upwards [h] with x hx; exact hx.symm⟩ + case inr => + refine Or.inr (Or.inr ?_) + simp only [lt_iff_le_and_ne] + rw [eventually_and] + exact ⟨h', h⟩ + +protected lemma neg {f : ℝ → ℝ} (hf : GrowsPolynomially f) : GrowsPolynomially (-f) := by + intro b hb + obtain ⟨c₁, hc₁_mem, c₂, hc₂_mem, hf⟩ := hf b hb + refine ⟨c₂, hc₂_mem, c₁, hc₁_mem, ?_⟩ + filter_upwards [hf] with x hx + intro u hu + simp only [Pi.neg_apply, Set.neg_mem_Icc_iff, neg_mul_eq_mul_neg, neg_neg] + exact hx u hu + +protected lemma neg_iff {f : ℝ → ℝ} : GrowsPolynomially f ↔ GrowsPolynomially (-f) := + ⟨fun hf => hf.neg, fun hf => by rw [← neg_neg f]; exact hf.neg⟩ + +protected lemma abs (hf : GrowsPolynomially f) : GrowsPolynomially (fun x => |f x|) := by + rcases eventually_atTop_nonneg_or_nonpos hf with hf'|hf' + case inl => + have hmain : f =ᶠ[atTop] fun x => |f x| := by + filter_upwards [hf'] with x hx + rw [abs_of_nonneg hx] + rw [← iff_eventuallyEq hmain] + exact hf + case inr => + have hmain : -f =ᶠ[atTop] fun x => |f x| := by + filter_upwards [hf'] with x hx + simp only [Pi.neg_apply, abs_of_nonpos hx] + + rw [← iff_eventuallyEq hmain] + exact hf.neg + +protected lemma norm (hf : GrowsPolynomially f) : GrowsPolynomially (fun x => ‖f x‖) := by + simp only [norm_eq_abs] + exact hf.abs + +end GrowsPolynomially + +variable {f : ℝ → ℝ} + +lemma growsPolynomially_const {c : ℝ} : GrowsPolynomially (fun _ => c) := by + refine fun _ _ => ⟨1, by norm_num, 1, by norm_num, ?_⟩ + filter_upwards [] with x + simp + +lemma growsPolynomially_id : GrowsPolynomially (fun x => x) := by + intro b hb + refine ⟨b, hb.1, ?_⟩ + refine ⟨1, by norm_num, ?_⟩ + refine eventually_of_forall fun x u hu => ?_ + simp only [one_mul, ge_iff_le, gt_iff_lt, not_le, Set.mem_Icc] + exact ⟨hu.1, hu.2⟩ + +protected lemma GrowsPolynomially.mul {f g : ℝ → ℝ} (hf : GrowsPolynomially f) + (hg : GrowsPolynomially g) : GrowsPolynomially fun x => f x * g x := by + suffices : GrowsPolynomially fun x => |f x| * |g x| + · rcases eventually_atTop_nonneg_or_nonpos hf with hf'|hf' + case inl => + rcases eventually_atTop_nonneg_or_nonpos hg with hg'|hg' + case inl => + have hmain : (fun x => f x * g x) =ᶠ[atTop] fun x => |f x| * |g x| := by + filter_upwards [hf', hg'] with x hx₁ hx₂ + rw [abs_of_nonneg hx₁, abs_of_nonneg hx₂] + rwa [iff_eventuallyEq hmain] + case inr => + have hmain : (fun x => f x * g x) =ᶠ[atTop] fun x => -|f x| * |g x| := by + filter_upwards [hf', hg'] with x hx₁ hx₂ + simp [abs_of_nonneg hx₁, abs_of_nonpos hx₂] + simp only [iff_eventuallyEq hmain, neg_mul] + exact this.neg + case inr => + rcases eventually_atTop_nonneg_or_nonpos hg with hg'|hg' + case inl => + have hmain : (fun x => f x * g x) =ᶠ[atTop] fun x => -|f x| * |g x| := by + filter_upwards [hf', hg'] with x hx₁ hx₂ + rw [abs_of_nonpos hx₁, abs_of_nonneg hx₂, neg_neg] + simp only [iff_eventuallyEq hmain, neg_mul] + exact this.neg + case inr => + have hmain : (fun x => f x * g x) =ᶠ[atTop] fun x => |f x| * |g x| := by + filter_upwards [hf', hg'] with x hx₁ hx₂ + simp [abs_of_nonpos hx₁, abs_of_nonpos hx₂] + simp only [iff_eventuallyEq hmain, neg_mul] + exact this + · intro b hb + have hf := hf.abs b hb + have hg := hg.abs b hb + obtain ⟨c₁, hc₁_mem, c₂, hc₂_mem, hf⟩ := hf + obtain ⟨c₃, hc₃_mem, c₄, hc₄_mem, hg⟩ := hg + refine ⟨c₁ * c₃, by show 0 < c₁ * c₃; positivity, ?_⟩ + refine ⟨c₂ * c₄, by show 0 < c₂ * c₄; positivity, ?_⟩ + filter_upwards [hf, hg] with x hf hg + intro u hu + refine ⟨?lb, ?ub⟩ + case lb => calc + c₁ * c₃ * (|f x| * |g x|) = (c₁ * |f x|) * (c₃ * |g x|) := by ring + _ ≤ |f u| * |g u| := by + gcongr + · exact (hf u hu).1 + · exact (hg u hu).1 + case ub => calc + |f u| * |g u| ≤ (c₂ * |f x|) * (c₄ * |g x|) := by + gcongr + · exact (hf u hu).2 + · exact (hg u hu).2 + _ = c₂ * c₄ * (|f x| * |g x|) := by ring + +lemma GrowsPolynomially.const_mul {f : ℝ → ℝ} {c : ℝ} (hf : GrowsPolynomially f) : + GrowsPolynomially fun x => c * f x := + GrowsPolynomially.mul growsPolynomially_const hf + +protected lemma GrowsPolynomially.add {f g : ℝ → ℝ} (hf : GrowsPolynomially f) + (hg : GrowsPolynomially g) (hf' : 0 ≤ᶠ[atTop] f) (hg' : 0 ≤ᶠ[atTop] g) : + GrowsPolynomially fun x => f x + g x := by + intro b hb + have hf := hf b hb + have hg := hg b hb + obtain ⟨c₁, hc₁_mem, c₂, hc₂_mem, hf⟩ := hf + obtain ⟨c₃, hc₃_mem, c₄, _, hg⟩ := hg + refine ⟨min c₁ c₃, by show 0 < min c₁ c₃; positivity, ?_⟩ + refine ⟨max c₂ c₄, by show 0 < max c₂ c₄; positivity, ?_⟩ + filter_upwards [hf, hg, + (tendsto_id.const_mul_atTop hb.1).eventually_forall_ge_atTop hf', + (tendsto_id.const_mul_atTop hb.1).eventually_forall_ge_atTop hg', + eventually_ge_atTop 0] with x hf hg hf' hg' hx_pos + intro u hu + have hbx : b * x ≤ x := calc + b * x ≤ 1 * x := by gcongr; exact le_of_lt hb.2 + _ = x := by ring + have fx_nonneg : 0 ≤ f x := hf' x hbx + have gx_nonneg : 0 ≤ g x := hg' x hbx + refine ⟨?lb, ?ub⟩ + case lb => calc + min c₁ c₃ * (f x + g x) = min c₁ c₃ * f x + min c₁ c₃ * g x := by simp only [mul_add] + _ ≤ c₁ * f x + c₃ * g x := by + gcongr + · exact min_le_left _ _ + · exact min_le_right _ _ + _ ≤ f u + g u := by + gcongr + · exact (hf u hu).1 + · exact (hg u hu).1 + case ub => calc + max c₂ c₄ * (f x + g x) = max c₂ c₄ * f x + max c₂ c₄ * g x := by simp only [mul_add] + _ ≥ c₂ * f x + c₄ * g x := by gcongr + · exact le_max_left _ _ + · exact le_max_right _ _ + _ ≥ f u + g u := by gcongr + · exact (hf u hu).2 + · exact (hg u hu).2 + +lemma GrowsPolynomially.add_isLittleO {f g : ℝ → ℝ} (hf : GrowsPolynomially f) + (hfg : g =o[atTop] f) : GrowsPolynomially fun x => f x + g x := by + intro b hb + have hb_ub := hb.2 + rw [isLittleO_iff] at hfg + rcases hf.eventually_atTop_nonneg_or_nonpos with hf'|hf' + case inl => -- f is eventually nonneg + have hf := hf b hb + obtain ⟨c₁, hc₁_mem : 0 < c₁, c₂, hc₂_mem : 0 < c₂, hf⟩ := hf + specialize hfg (c := 1/2) (by norm_num) + refine ⟨c₁ / 3, by positivity, 3*c₂, by positivity, ?_⟩ + filter_upwards [hf, + (tendsto_id.const_mul_atTop hb.1).eventually_forall_ge_atTop hfg, + (tendsto_id.const_mul_atTop hb.1).eventually_forall_ge_atTop hf', + eventually_ge_atTop 0] with x hf₁ hfg' hf₂ hx_nonneg + have hbx : b * x ≤ x := by nth_rewrite 2 [← one_mul x]; gcongr + have hfg₂ : ‖g x‖ ≤ 1/2 * f x := by + calc ‖g x‖ ≤ 1/2 * ‖f x‖ := hfg' x hbx + _ = 1/2 * f x := by congr; exact norm_of_nonneg (hf₂ _ hbx) + have hx_ub : f x + g x ≤ 3/2 * f x := by + calc _ ≤ f x + ‖g x‖ := by gcongr; exact le_norm_self (g x) + _ ≤ f x + 1/2 * f x := by gcongr + _ = 3/2 * f x := by ring + have hx_lb : 1/2 * f x ≤ f x + g x := by + calc f x + g x ≥ f x - ‖g x‖ := by + rw [sub_eq_add_neg, norm_eq_abs]; gcongr; exact neg_abs_le_self (g x) + _ ≥ f x - 1/2 * f x := by gcongr + _ = 1/2 * f x := by ring + intro u ⟨hu_lb, hu_ub⟩ + have hfu_nonneg : 0 ≤ f u := hf₂ _ hu_lb + have hfg₃ : ‖g u‖ ≤ 1/2 * f u := by + calc ‖g u‖ ≤ 1/2 * ‖f u‖ := hfg' _ hu_lb + _ = 1/2 * f u := by congr; simp only [norm_eq_abs, abs_eq_self, hfu_nonneg] + refine ⟨?lb, ?ub⟩ + case lb => + calc f u + g u ≥ f u - ‖g u‖ := by + rw [sub_eq_add_neg, norm_eq_abs]; gcongr; exact neg_abs_le_self _ + _ ≥ f u - 1/2 * f u := by gcongr + _ = 1/2 * f u := by ring + _ ≥ 1/2 * (c₁ * f x) := by gcongr; exact (hf₁ u ⟨hu_lb, hu_ub⟩).1 + _ = c₁/3 * (3/2 * f x) := by ring + _ ≥ c₁/3 * (f x + g x) := by gcongr + case ub => + calc _ ≤ f u + ‖g u‖ := by gcongr; exact le_norm_self (g u) + _ ≤ f u + 1/2 * f u := by gcongr + _ = 3/2 * f u := by ring + _ ≤ 3/2 * (c₂ * f x) := by gcongr; exact (hf₁ u ⟨hu_lb, hu_ub⟩).2 + _ = 3*c₂ * (1/2 * f x) := by ring + _ ≤ 3*c₂ * (f x + g x) := by gcongr + case inr => -- f is eventually nonpos + have hf := hf b hb + obtain ⟨c₁, hc₁_mem : 0 < c₁, c₂, hc₂_mem : 0 < c₂, hf⟩ := hf + specialize hfg (c := 1/2) (by norm_num) + refine ⟨3*c₁, by positivity, c₂/3, by positivity, ?_⟩ + filter_upwards [hf, + (tendsto_id.const_mul_atTop hb.1).eventually_forall_ge_atTop hfg, + (tendsto_id.const_mul_atTop hb.1).eventually_forall_ge_atTop hf', + eventually_ge_atTop 0] with x hf₁ hfg' hf₂ hx_nonneg + have hbx : b * x ≤ x := by nth_rewrite 2 [← one_mul x]; gcongr + have hfg₂ : ‖g x‖ ≤ -1/2 * f x := by + calc ‖g x‖ ≤ 1/2 * ‖f x‖ := hfg' x hbx + _ = 1/2 * (-f x) := by congr; exact norm_of_nonpos (hf₂ x hbx) + _ = _ := by ring + have hx_ub : f x + g x ≤ 1/2 * f x := by + calc _ ≤ f x + ‖g x‖ := by gcongr; exact le_norm_self (g x) + _ ≤ f x + (-1/2 * f x) := by gcongr + _ = 1/2 * f x := by ring + have hx_lb : 3/2 * f x ≤ f x + g x := by + calc f x + g x ≥ f x - ‖g x‖ := by + rw [sub_eq_add_neg, norm_eq_abs]; gcongr; exact neg_abs_le_self (g x) + _ ≥ f x + 1/2 * f x := by + rw [sub_eq_add_neg] + gcongr + refine le_of_neg_le_neg ?bc.a + rwa [neg_neg, ← neg_mul, ← neg_div] + _ = 3/2 * f x := by ring + intro u ⟨hu_lb, hu_ub⟩ + have hfu_nonpos : f u ≤ 0:= hf₂ _ hu_lb + have hfg₃ : ‖g u‖ ≤ -1/2 * f u := by + calc ‖g u‖ ≤ 1/2 * ‖f u‖ := hfg' _ hu_lb + _ = 1/2 * (-f u) := by congr; exact norm_of_nonpos hfu_nonpos + _ = -1/2 * f u := by ring + refine ⟨?lb, ?ub⟩ + case lb => + calc f u + g u ≥ f u - ‖g u‖ := by + rw [sub_eq_add_neg, norm_eq_abs]; gcongr; exact neg_abs_le_self _ + _ ≥ f u + 1/2 * f u := by + rw [sub_eq_add_neg] + gcongr + refine le_of_neg_le_neg ?_ + rwa [neg_neg, ← neg_mul, ← neg_div] + _ = 3/2 * f u := by ring + _ ≥ 3/2 * (c₁ * f x) := by gcongr; exact (hf₁ u ⟨hu_lb, hu_ub⟩).1 + _ = 3*c₁ * (1/2 * f x) := by ring + _ ≥ 3*c₁ * (f x + g x) := by gcongr + case ub => + calc _ ≤ f u + ‖g u‖ := by gcongr; exact le_norm_self (g u) + _ ≤ f u - 1/2 * f u := by + rw [sub_eq_add_neg] + gcongr + rwa [← neg_mul, ← neg_div] + _ = 1/2 * f u := by ring + _ ≤ 1/2 * (c₂ * f x) := by gcongr; exact (hf₁ u ⟨hu_lb, hu_ub⟩).2 + _ = c₂/3 * (3/2 * f x) := by ring + _ ≤ c₂/3 * (f x + g x) := by gcongr + +protected lemma GrowsPolynomially.inv {f : ℝ → ℝ} (hf : GrowsPolynomially f) : + GrowsPolynomially fun x => (f x)⁻¹ := by + rcases hf.eventually_atTop_zero_or_pos_or_neg with hf'|hf_pos_or_neg + case inl => + refine fun b hb => ⟨1, by simp, 1, by simp, ?_⟩ + have hb_pos := hb.1 + filter_upwards [hf', (tendsto_id.const_mul_atTop hb_pos).eventually_forall_ge_atTop hf'] + with x hx hx' + intro u hu + simp only [hx, inv_zero, mul_zero, Set.Icc_self, Set.mem_singleton_iff, hx' u hu.1] + case inr => + suffices : GrowsPolynomially fun x => |(f x)⁻¹| + · rcases hf_pos_or_neg with hf'|hf' + case inl => + have hmain : (fun x => (f x)⁻¹) =ᶠ[atTop] fun x => |(f x)⁻¹| := by + filter_upwards [hf'] with x hx₁ + rw [abs_of_nonneg (inv_nonneg_of_nonneg (le_of_lt hx₁))] + rwa [iff_eventuallyEq hmain] + case inr => + have hmain : (fun x => (f x)⁻¹) =ᶠ[atTop] fun x => -|(f x)⁻¹| := by + filter_upwards [hf'] with x hx₁ + simp [abs_of_nonpos (inv_nonpos.mpr (le_of_lt hx₁))] + rw [iff_eventuallyEq hmain] + exact this.neg + have hf' : ∀ᶠ x in atTop, f x ≠ 0 := by + cases hf_pos_or_neg with + | inl H => filter_upwards [H] with _ hx; exact (ne_of_lt hx).symm + | inr H => filter_upwards [H] with _ hx; exact (ne_of_gt hx).symm + simp only [abs_inv] + have hf := hf.abs + intro b hb + have hb_pos := hb.1 + obtain ⟨c₁, hc₁_mem, c₂, hc₂_mem, hf⟩ := hf b hb + refine ⟨c₂⁻¹, by show 0 < c₂⁻¹; positivity, ?_⟩ + refine ⟨c₁⁻¹, by show 0 < c₁⁻¹; positivity, ?_⟩ + filter_upwards [hf, hf', (tendsto_id.const_mul_atTop hb_pos).eventually_forall_ge_atTop hf'] + with x hx hx' hx'' + intro u hu + have h₁ : 0 < |f u| := by rw [abs_pos]; exact hx'' u hu.1 + refine ⟨?lb, ?ub⟩ + case lb => + rw [← mul_inv] + gcongr + exact (hx u hu).2 + case ub => + rw [← mul_inv] + gcongr + exact (hx u hu).1 + +protected lemma GrowsPolynomially.div {f g : ℝ → ℝ} (hf : GrowsPolynomially f) + (hg : GrowsPolynomially g) : GrowsPolynomially fun x => f x / g x := by + have : (fun x => f x / g x) = fun x => f x * (g x)⁻¹ := by ext; rw [div_eq_mul_inv] + rw [this] + exact GrowsPolynomially.mul hf (GrowsPolynomially.inv hg) + +protected lemma GrowsPolynomially.rpow (p : ℝ) (hf : GrowsPolynomially f) + (hf_nonneg : ∀ᶠ x in atTop, 0 ≤ f x) : GrowsPolynomially fun x => (f x) ^ p := by + intro b hb + obtain ⟨c₁, (hc₁_mem : 0 < c₁), c₂, hc₂_mem, hfnew⟩ := hf b hb + have hc₁p : 0 < c₁ ^ p := Real.rpow_pos_of_pos hc₁_mem _ + have hc₂p : 0 < c₂ ^ p := Real.rpow_pos_of_pos hc₂_mem _ + obtain _ | hp := le_or_lt 0 p + case inl => -- 0 ≤ p + refine ⟨c₁^p, hc₁p, ?_⟩ + refine ⟨c₂^p, hc₂p, ?_⟩ + filter_upwards [eventually_gt_atTop 0, hfnew, hf_nonneg, + (tendsto_id.const_mul_atTop hb.1).eventually_forall_ge_atTop hf_nonneg] + with x _ hf₁ hf_nonneg hf_nonneg₂ + intro u hu + have fu_nonneg : 0 ≤ f u := hf_nonneg₂ u hu.1 + refine ⟨?lb, ?ub⟩ + case lb => calc + c₁^p * (f x)^p = (c₁ * f x)^p := by rw [mul_rpow (le_of_lt hc₁_mem) hf_nonneg] + _ ≤ _ := by gcongr; exact (hf₁ u hu).1 + case ub => calc + (f u)^p ≤ (c₂ * f x)^p := by gcongr; exact (hf₁ u hu).2 + _ = _ := by rw [← mul_rpow (le_of_lt hc₂_mem) hf_nonneg] + case inr => -- p < 0 + rcases hf.eventually_atTop_zero_or_pos_or_neg with hzero|hpos|hneg + case inl => -- eventually zero + refine ⟨1, by norm_num, 1, by norm_num, ?_⟩ + filter_upwards [hzero, hfnew] with x hx hx' + intro u hu + simp only [hx, ne_eq, zero_rpow (ne_of_lt hp), mul_zero, le_refl, not_true, lt_self_iff_false, + Set.Icc_self, Set.mem_singleton_iff] + simp only [hx, mul_zero, Set.Icc_self, Set.mem_singleton_iff] at hx' + rw [hx' u hu, zero_rpow (ne_of_lt hp)] + case inr.inl => -- eventually positive + refine ⟨c₂^p, hc₂p, ?_⟩ + refine ⟨c₁^p, hc₁p, ?_⟩ + filter_upwards [eventually_gt_atTop 0, hfnew, hpos, + (tendsto_id.const_mul_atTop hb.1).eventually_forall_ge_atTop hpos] + with x _ hf₁ hf_pos hf_pos₂ + intro u hu + refine ⟨?lb, ?ub⟩ + case lb => calc + c₂^p * (f x)^p = (c₂ * f x)^p := by rw [mul_rpow (le_of_lt hc₂_mem) (le_of_lt hf_pos)] + _ ≤ _ := rpow_le_rpow_of_exponent_nonpos (hf_pos₂ u hu.1) (hf₁ u hu).2 (le_of_lt hp) + case ub => calc + (f u)^p ≤ (c₁ * f x)^p := by + exact rpow_le_rpow_of_exponent_nonpos (by positivity) (hf₁ u hu).1 (le_of_lt hp) + _ = _ := by rw [← mul_rpow (le_of_lt hc₁_mem) (le_of_lt hf_pos)] + case inr.inr => -- eventually negative (which is impossible) + have : ∀ᶠ (_:ℝ) in atTop, False := by filter_upwards [hf_nonneg, hneg] with x hx hx'; linarith + rw [Filter.eventually_false_iff_eq_bot] at this + exact False.elim <| (atTop_neBot).ne this + +protected lemma GrowsPolynomially.pow (p : ℕ) (hf : GrowsPolynomially f) + (hf_nonneg : ∀ᶠ x in atTop, 0 ≤ f x) : GrowsPolynomially fun x => (f x) ^ p := by + simp_rw [← rpow_nat_cast] + exact hf.rpow p hf_nonneg + +protected lemma GrowsPolynomially.zpow (p : ℤ) (hf : GrowsPolynomially f) + (hf_nonneg : ∀ᶠ x in atTop, 0 ≤ f x) : GrowsPolynomially fun x => (f x) ^ p := by + simp_rw [← rpow_int_cast] + exact hf.rpow p hf_nonneg + +lemma growsPolynomially_rpow (p : ℝ) : GrowsPolynomially fun x => x ^ p := + (growsPolynomially_id).rpow p (eventually_ge_atTop 0) + +lemma growsPolynomially_pow (p : ℕ) : GrowsPolynomially fun x => x ^ p := + (growsPolynomially_id).pow p (eventually_ge_atTop 0) + +lemma growsPolynomially_zpow (p : ℤ) : GrowsPolynomially fun x => x ^ p := + (growsPolynomially_id).zpow p (eventually_ge_atTop 0) + +lemma growsPolynomially_log : GrowsPolynomially Real.log := by + intro b hb + have hb₀ : 0 < b := hb.1 + refine ⟨1 / 2, by norm_num, ?_⟩ + refine ⟨1, by norm_num, ?_⟩ + have h_tendsto : Tendsto (fun x => 1 / 2 * Real.log x) atTop atTop := + Tendsto.const_mul_atTop (by norm_num) Real.tendsto_log_atTop + filter_upwards [eventually_gt_atTop 1, + (tendsto_id.const_mul_atTop hb.1).eventually_forall_ge_atTop + <| h_tendsto.eventually (eventually_gt_atTop (-Real.log b)) ] with x hx_pos hx + intro u hu + refine ⟨?lb, ?ub⟩ + case lb => calc + 1 / 2 * Real.log x = Real.log x + (-1 / 2) * Real.log x := by ring + _ ≤ Real.log x + Real.log b := by + gcongr + rw [neg_div, neg_mul, ←neg_le] + refine le_of_lt (hx x ?_) + calc b * x ≤ 1 * x := by gcongr; exact le_of_lt hb.2 + _ = x := by rw [one_mul] + _ = Real.log (b * x) := by rw [←Real.log_mul (by positivity) (by positivity), mul_comm] + _ ≤ Real.log u := by gcongr; exact hu.1 + case ub => + rw [one_mul] + gcongr + · calc 0 < b * x := by positivity + _ ≤ u := by exact hu.1 + · exact hu.2 + +lemma GrowsPolynomially.of_isTheta {f g : ℝ → ℝ} (hg : GrowsPolynomially g) (hf : f =Θ[atTop] g) + (hf' : ∀ᶠ x in atTop, 0 ≤ f x) : GrowsPolynomially f := by + intro b hb + have hb_pos := hb.1 + have hf_lb := isBigO_iff''.mp hf.isBigO_symm + have hf_ub := isBigO_iff'.mp hf.isBigO + obtain ⟨c₁, hc₁_pos : 0 < c₁, hf_lb⟩ := hf_lb + obtain ⟨c₂, hc₂_pos : 0 < c₂, hf_ub⟩ := hf_ub + have hg := hg.norm b hb + obtain ⟨c₃, hc₃_pos : 0 < c₃, hg⟩ := hg + obtain ⟨c₄, hc₄_pos : 0 < c₄, hg⟩ := hg + have h_lb_pos : 0 < c₁ * c₂⁻¹ * c₃ := by positivity + have h_ub_pos : 0 < c₂ * c₄ * c₁⁻¹ := by positivity + refine ⟨c₁ * c₂⁻¹ * c₃, h_lb_pos, ?_⟩ + refine ⟨c₂ * c₄ * c₁⁻¹, h_ub_pos, ?_⟩ + have c₂_cancel : c₂⁻¹ * c₂ = 1 := inv_mul_cancel (by positivity) + have c₁_cancel : c₁⁻¹ * c₁ = 1 := inv_mul_cancel (by positivity) + filter_upwards [(tendsto_id.const_mul_atTop hb_pos).eventually_forall_ge_atTop hf', + (tendsto_id.const_mul_atTop hb_pos).eventually_forall_ge_atTop hf_lb, + (tendsto_id.const_mul_atTop hb_pos).eventually_forall_ge_atTop hf_ub, + (tendsto_id.const_mul_atTop hb_pos).eventually_forall_ge_atTop hg, + eventually_ge_atTop 0] + with x hf_pos h_lb h_ub hg_bound hx_pos + intro u hu + have hbx : b * x ≤ x := + calc b * x ≤ 1 * x := by gcongr; exact le_of_lt hb.2 + _ = x := by rw [one_mul] + have hg_bound := hg_bound x hbx + refine ⟨?lb, ?ub⟩ + case lb => calc + c₁ * c₂⁻¹ * c₃ * f x ≤ c₁ * c₂⁻¹ * c₃ * (c₂ * ‖g x‖) := by + rw [←Real.norm_of_nonneg (hf_pos x hbx)]; gcongr; exact h_ub x hbx + _ = (c₂⁻¹ * c₂) * c₁ * (c₃ * ‖g x‖) := by ring + _ = c₁ * (c₃ * ‖g x‖) := by simp [c₂_cancel] + _ ≤ c₁ * ‖g u‖ := by gcongr; exact (hg_bound u hu).1 + _ ≤ f u := by + rw [←Real.norm_of_nonneg (hf_pos u hu.1)] + exact h_lb u hu.1 + case ub => calc + f u ≤ c₂ * ‖g u‖ := by rw [←Real.norm_of_nonneg (hf_pos u hu.1)]; exact h_ub u hu.1 + _ ≤ c₂ * (c₄ * ‖g x‖) := by gcongr; exact (hg_bound u hu).2 + _ = c₂ * c₄ * (c₁⁻¹ * c₁) * ‖g x‖ := by simp [c₁_cancel]; ring + _ = c₂ * c₄ * c₁⁻¹ * (c₁ * ‖g x‖) := by ring + _ ≤ c₂ * c₄ * c₁⁻¹ * f x := by + gcongr + rw [←Real.norm_of_nonneg (hf_pos x hbx)] + exact h_lb x hbx + +lemma GrowsPolynomially.of_isEquivalent {f g : ℝ → ℝ} (hg : GrowsPolynomially g) + (hf : f ~[atTop] g) : GrowsPolynomially f := by + have : f = g + (f - g) := by ext; simp + rw [this] + exact add_isLittleO hg hf + +lemma GrowsPolynomially.of_isEquivalent_const {f : ℝ → ℝ} {c : ℝ} (hf : f ~[atTop] fun _ => c) : + GrowsPolynomially f := + of_isEquivalent growsPolynomially_const hf + +end AkraBazziRecurrence diff --git a/Mathlib/Computability/Halting.lean b/Mathlib/Computability/Halting.lean index 9f60ef77c8412..751a3f513767f 100644 --- a/Mathlib/Computability/Halting.lean +++ b/Mathlib/Computability/Halting.lean @@ -194,6 +194,7 @@ theorem to_re {p : α → Prop} (hp : ComputablePred p) : RePred p := by cases a; cases f n <;> simp #align computable_pred.to_re ComputablePred.to_re +/-- **Rice's Theorem** -/ theorem rice (C : Set (ℕ →. ℕ)) (h : ComputablePred fun c => eval c ∈ C) {f g} (hf : Nat.Partrec f) (hg : Nat.Partrec g) (fC : f ∈ C) : g ∈ C := by cases' h with _ h; skip diff --git a/Mathlib/Computability/TuringMachine.lean b/Mathlib/Computability/TuringMachine.lean index 608e047588d20..dc62e3f4ede3e 100644 --- a/Mathlib/Computability/TuringMachine.lean +++ b/Mathlib/Computability/TuringMachine.lean @@ -504,7 +504,7 @@ instance Tape.inhabited {Γ} [Inhabited Γ] : Inhabited (Tape Γ) := ⟨by constructor <;> apply default⟩ #align turing.tape.inhabited Turing.Tape.inhabited -/-- A direction for the turing machine `move` command, either +/-- A direction for the Turing machine `move` command, either left or right. -/ inductive Dir | left @@ -986,7 +986,7 @@ theorem tr_eval' {σ₁ σ₂} (f₁ : σ₁ → Option σ₁) (f₂ : σ₂ → /-! ## The TM0 model -A TM0 turing machine is essentially a Post-Turing machine, adapted for type theory. +A TM0 Turing machine is essentially a Post-Turing machine, adapted for type theory. A Post-Turing machine with symbol type `Γ` and label type `Λ` is a function `Λ → Γ → Option (Λ × Stmt)`, where a `Stmt` can be either `move left`, `move right` or `write a` @@ -1349,7 +1349,7 @@ theorem stmts₁_supportsStmt_mono {S : Finset Λ} {q₁ q₂ : Stmt₁} (h : q case halt => subst h; trivial #align turing.TM1.stmts₁_supports_stmt_mono Turing.TM1.stmts₁_supportsStmt_mono -/-- The set of all statements in a turing machine, plus one extra value `none` representing the +/-- The set of all statements in a Turing machine, plus one extra value `none` representing the halt state. This is used in the TM1 to TM0 reduction. -/ noncomputable def stmts (M : Λ → Stmt₁) (S : Finset Λ) : Finset (Option Stmt₁) := Finset.insertNone (S.biUnion fun q ↦ stmts₁ (M q)) diff --git a/Mathlib/Condensed/Equivalence.lean b/Mathlib/Condensed/Equivalence.lean index a16348d3e2ed1..7424208af47fa 100644 --- a/Mathlib/Condensed/Equivalence.lean +++ b/Mathlib/Condensed/Equivalence.lean @@ -23,7 +23,7 @@ construction of the equivalence of the categories of sheaves, given these three ## Main theorems -* `Condensed.StoneanCompHaus.coverDense`, `Condensed.StoneanCompHaus.coverPreserving`, +* `Condensed.StoneanCompHaus.isCoverDense`, `Condensed.StoneanCompHaus.coverPreserving`, `Condensed.StoneanCompHaus.coverLifting`: the three conditions needed to guarantee the equivalence of the categories of sheaves on the coherent site on `Stonean` on the one hand and `CompHaus` on the other. @@ -64,7 +64,7 @@ lemma generate_singleton_mem_coherentTopology (B : CompHaus) : exact ⟨(), (CompHaus.epi_iff_surjective (presentation.π B)).mp (presentation.epi_π B) b⟩ open CompHaus in -lemma coverDense : CoverDense (coherentTopology _) Stonean.toCompHaus := by +instance isCoverDense : Stonean.toCompHaus.IsCoverDense (coherentTopology _) := by constructor intro B convert generate_singleton_mem_coherentTopology B @@ -82,7 +82,7 @@ lemma coverDense : CoverDense (coherentTopology _) Stonean.toCompHaus := by theorem coverDense.inducedTopology_Sieve_iff_EffectiveEpiFamily (X : Stonean) (S : Sieve X) : (∃ (α : Type) (_ : Fintype α) (Y : α → Stonean) (π : (a : α) → (Y a ⟶ X)), EffectiveEpiFamily Y π ∧ (∀ a : α, (S.arrows) (π a)) ) ↔ - (S ∈ coverDense.inducedTopology X) := by + (S ∈ Stonean.toCompHaus.inducedTopologyOfIsCoverDense (coherentTopology _) X) := by refine ⟨fun ⟨α, _, Y, π, ⟨H₁, H₂⟩⟩ ↦ ?_, fun hS ↦ ?_⟩ · apply (coherentTopology.mem_sieves_iff_hasEffectiveEpiFamily (Sieve.functorPushforward _ S)).mpr refine ⟨α, inferInstance, fun i => Stonean.toCompHaus.obj (Y i), @@ -106,7 +106,8 @@ theorem coverDense.inducedTopology_Sieve_iff_EffectiveEpiFamily (X : Stonean) (S rfl lemma coherentTopology_is_induced : - coherentTopology Stonean.{u} = coverDense.inducedTopology := by + coherentTopology Stonean.{u} = + Stonean.toCompHaus.inducedTopologyOfIsCoverDense (coherentTopology _) := by ext X S rw [← coverDense.inducedTopology_Sieve_iff_EffectiveEpiFamily X] rw [← coherentTopology.mem_sieves_iff_hasEffectiveEpiFamily S] @@ -114,18 +115,22 @@ lemma coherentTopology_is_induced : lemma coverPreserving : CoverPreserving (coherentTopology _) (coherentTopology _) Stonean.toCompHaus := by rw [coherentTopology_is_induced] - exact LocallyCoverDense.inducedTopology_coverPreserving (CoverDense.locallyCoverDense coverDense) + apply LocallyCoverDense.inducedTopology_coverPreserving -lemma coverLifting : CoverLifting (coherentTopology _) (coherentTopology _) Stonean.toCompHaus := by +instance coverLifting : + Stonean.toCompHaus.IsCocontinuous (coherentTopology _) (coherentTopology _) := by rw [coherentTopology_is_induced] - exact LocallyCoverDense.inducedTopology_coverLifting (CoverDense.locallyCoverDense coverDense) + apply LocallyCoverDense.inducedTopology_isCocontinuous + +instance : Stonean.toCompHaus.IsContinuous (coherentTopology _) (coherentTopology _) := + Functor.IsCoverDense.isContinuous _ _ _ coverPreserving /-- The equivalence from coherent sheaves on `Stonean` to coherent sheaves on `CompHaus` (i.e. condensed sets). -/ noncomputable def equivalence (A : Type _) [Category.{u+1} A] [HasLimits A] : Sheaf (coherentTopology Stonean) A ≌ Condensed.{u} A := - CoverDense.sheafEquivOfCoverPreservingCoverLifting coverDense coverPreserving coverLifting + Functor.IsCoverDense.sheafEquivOfCoverPreservingCoverLifting Stonean.toCompHaus _ _ _ end StoneanCompHaus @@ -155,7 +160,7 @@ lemma generate_singleton_mem_coherentTopology (B : Profinite) : exact ⟨(), (Profinite.epi_iff_surjective (presentation.π B)).mp (presentation.epi_π B) b⟩ open Profinite in -lemma coverDense : CoverDense (coherentTopology _) Stonean.toProfinite := by +instance coverDense : Stonean.toProfinite.IsCoverDense (coherentTopology _) := by constructor intro B convert generate_singleton_mem_coherentTopology B @@ -170,7 +175,7 @@ lemma coverDense : CoverDense (coherentTopology _) Stonean.toProfinite := by theorem coverDense.inducedTopology_Sieve_iff_EffectiveEpiFamily (X : Stonean) (S : Sieve X) : (∃ (α : Type) (_ : Fintype α) (Y : α → Stonean) (π : (a : α) → (Y a ⟶ X)), EffectiveEpiFamily Y π ∧ (∀ a : α, (S.arrows) (π a)) ) ↔ - (S ∈ coverDense.inducedTopology X) := by + (S ∈ Stonean.toProfinite.inducedTopologyOfIsCoverDense (coherentTopology _) X) := by refine ⟨fun ⟨α, _, Y, π, ⟨H₁, H₂⟩⟩ ↦ ?_, fun hS ↦ ?_⟩ · apply (coherentTopology.mem_sieves_iff_hasEffectiveEpiFamily (Sieve.functorPushforward _ S)).mpr refine ⟨α, inferInstance, fun i => Stonean.toProfinite.obj (Y i), @@ -194,7 +199,8 @@ theorem coverDense.inducedTopology_Sieve_iff_EffectiveEpiFamily (X : Stonean) (S rfl lemma coherentTopology_is_induced : - coherentTopology Stonean.{u} = coverDense.inducedTopology := by + coherentTopology Stonean.{u} = + Stonean.toProfinite.inducedTopologyOfIsCoverDense (coherentTopology _) := by ext X S rw [← coverDense.inducedTopology_Sieve_iff_EffectiveEpiFamily X, ← coherentTopology.mem_sieves_iff_hasEffectiveEpiFamily S] @@ -202,18 +208,21 @@ lemma coherentTopology_is_induced : lemma coverPreserving : CoverPreserving (coherentTopology _) (coherentTopology _) Stonean.toProfinite := by rw [coherentTopology_is_induced] - exact LocallyCoverDense.inducedTopology_coverPreserving (CoverDense.locallyCoverDense coverDense) + apply LocallyCoverDense.inducedTopology_coverPreserving -lemma coverLifting : - CoverLifting (coherentTopology _) (coherentTopology _) Stonean.toProfinite := by +instance isCocontinuous : + Stonean.toProfinite.IsCocontinuous (coherentTopology _) (coherentTopology _) := by rw [coherentTopology_is_induced] - exact LocallyCoverDense.inducedTopology_coverLifting (CoverDense.locallyCoverDense coverDense) + apply LocallyCoverDense.inducedTopology_isCocontinuous + +instance : Stonean.toProfinite.IsContinuous (coherentTopology _) (coherentTopology _) := + Functor.IsCoverDense.isContinuous _ _ _ coverPreserving /-- The equivalence from coherent sheaves on `Stonean` to coherent sheaves on `Profinite`. -/ noncomputable def equivalence (A : Type _) [Category.{u+1} A] [HasLimits A] : Sheaf (coherentTopology Stonean) A ≌ Sheaf (coherentTopology Profinite) A := - CoverDense.sheafEquivOfCoverPreservingCoverLifting coverDense coverPreserving coverLifting + Functor.IsCoverDense.sheafEquivOfCoverPreservingCoverLifting Stonean.toProfinite _ _ _ end StoneanProfinite diff --git a/Mathlib/Control/Random.lean b/Mathlib/Control/Random.lean index 86a8005839d48..b09710502ceb9 100644 --- a/Mathlib/Control/Random.lean +++ b/Mathlib/Control/Random.lean @@ -104,7 +104,7 @@ instance : BoundedRandom Nat where let z ← rand (Fin (hi - lo).succ) pure ⟨ lo + z.val, Nat.le_add_right _ _, - Nat.add_le_of_le_sub_left h (Nat.le_of_succ_le_succ z.isLt) + Nat.add_le_of_le_sub' h (Nat.le_of_succ_le_succ z.isLt) ⟩ instance : BoundedRandom Int where diff --git a/Mathlib/Data/Bool/Basic.lean b/Mathlib/Data/Bool/Basic.lean index da6887305cd23..451b1f5470010 100644 --- a/Mathlib/Data/Bool/Basic.lean +++ b/Mathlib/Data/Bool/Basic.lean @@ -81,8 +81,14 @@ theorem eq_iff_eq_true_iff {a b : Bool} : a = b ↔ ((a = true) ↔ (b = true)) cases a <;> cases b <;> simp -- Porting note: new theorem -theorem beq_eq_decide_eq {α} [DecidableEq α] - (a b : α) : (a == b) = decide (a = b) := rfl +/- Even though `DecidableEq α` implies an instance of (`Lawful`)`BEq α`, we keep the seemingly +redundant typeclass assumptions so that the theorem is also applicable for types that have +overridden this default instance of `LawfulBEq α` -/ +theorem beq_eq_decide_eq {α} [BEq α] [LawfulBEq α] [DecidableEq α] + (a b : α) : (a == b) = decide (a = b) := by + cases h : a == b + · simp [ne_of_beq_false h] + · simp [eq_of_beq h] -- Porting note: new theorem theorem beq_comm {α} [BEq α] [LawfulBEq α] {a b : α} : (a == b) = (b == a) := diff --git a/Mathlib/Data/Fin/Basic.lean b/Mathlib/Data/Fin/Basic.lean index 58ae46afaeba9..404b0f960a7f6 100644 --- a/Mathlib/Data/Fin/Basic.lean +++ b/Mathlib/Data/Fin/Basic.lean @@ -1256,7 +1256,7 @@ lemma eq_one_of_neq_zero (i : Fin 2) (hi : i ≠ 0) : i = 1 := theorem coe_neg_one : ↑(-1 : Fin (n + 1)) = n := by cases n · simp - rw [Fin.coe_neg, Fin.val_one, Nat.succ_sub_one, Nat.mod_eq_of_lt] + rw [Fin.coe_neg, Fin.val_one, Nat.add_one_sub_one, Nat.mod_eq_of_lt] constructor #align fin.coe_neg_one Fin.coe_neg_one diff --git a/Mathlib/Data/Fin/Interval.lean b/Mathlib/Data/Fin/Interval.lean index cc54b4dff12c4..4759ca985de07 100644 --- a/Mathlib/Data/Fin/Interval.lean +++ b/Mathlib/Data/Fin/Interval.lean @@ -219,7 +219,7 @@ theorem card_Ici : (Ici a).card = n - a := by cases n with | zero => exact Fin.elim0 a | succ => - rw [← card_map, map_valEmbedding_Ici, Nat.card_Icc, Nat.succ_sub_one] + rw [← card_map, map_valEmbedding_Ici, Nat.card_Icc, Nat.add_one_sub_one] #align fin.card_Ici Fin.card_Ici @[simp] diff --git a/Mathlib/Data/Finset/Basic.lean b/Mathlib/Data/Finset/Basic.lean index c25a86c9e5b06..a27b38172e0ae 100644 --- a/Mathlib/Data/Finset/Basic.lean +++ b/Mathlib/Data/Finset/Basic.lean @@ -194,6 +194,9 @@ instance decidableMem [_h : DecidableEq α] (a : α) (s : Finset α) : Decidable Multiset.decidableMem _ _ #align finset.decidable_mem Finset.decidableMem +@[simp] lemma forall_mem_not_eq {s : Finset α} {a : α} : (∀ b ∈ s, ¬ a = b) ↔ a ∉ s := by aesop +@[simp] lemma forall_mem_not_eq' {s : Finset α} {a : α} : (∀ b ∈ s, ¬ b = a) ↔ a ∉ s := by aesop + /-! ### set coercion -/ --Porting note: new definition diff --git a/Mathlib/Data/Finset/Card.lean b/Mathlib/Data/Finset/Card.lean index 12e6f055a9999..27b99d7d3fda3 100644 --- a/Mathlib/Data/Finset/Card.lean +++ b/Mathlib/Data/Finset/Card.lean @@ -205,6 +205,13 @@ theorem Multiset.toFinset_card_of_nodup {m : Multiset α} (h : m.Nodup) : congr_arg card <| Multiset.dedup_eq_self.mpr h #align multiset.to_finset_card_of_nodup Multiset.toFinset_card_of_nodup +theorem Multiset.dedup_card_eq_card_iff_nodup {m : Multiset α} : + card m.dedup = card m ↔ m.Nodup := + .trans ⟨fun h ↦ eq_of_le_of_card_le (dedup_le m) h.ge, congr_arg _⟩ dedup_eq_self + +theorem Multiset.toFinset_card_eq_card_iff_nodup {m : Multiset α} : + m.toFinset.card = card m ↔ m.Nodup := dedup_card_eq_card_iff_nodup + theorem List.card_toFinset : l.toFinset.card = l.dedup.length := rfl #align list.card_to_finset List.card_toFinset diff --git a/Mathlib/Data/Finset/Pointwise.lean b/Mathlib/Data/Finset/Pointwise.lean index 6d21ccce88665..36eeaeb618d61 100644 --- a/Mathlib/Data/Finset/Pointwise.lean +++ b/Mathlib/Data/Finset/Pointwise.lean @@ -1869,9 +1869,9 @@ theorem op_smul_finset_mul_eq_mul_smul_finset (a : α) (s : Finset α) (t : Fins end Semigroup -section LeftCancelSemigroup +section IsLeftCancelMul -variable [LeftCancelSemigroup α] [DecidableEq α] (s t : Finset α) (a : α) +variable [Mul α] [IsLeftCancelMul α] [DecidableEq α] (s t : Finset α) (a : α) @[to_additive] theorem pairwiseDisjoint_smul_iff {s : Set α} {t : Finset α} : @@ -1898,11 +1898,11 @@ theorem card_le_card_mul_left {s : Finset α} (hs : s.Nonempty) : t.card ≤ (s #align finset.card_le_card_mul_left Finset.card_le_card_mul_left #align finset.card_le_card_add_left Finset.card_le_card_add_left -end LeftCancelSemigroup +end IsLeftCancelMul section -variable [RightCancelSemigroup α] [DecidableEq α] (s t : Finset α) (a : α) +variable [Mul α] [IsRightCancelMul α] [DecidableEq α] (s t : Finset α) (a : α) @[to_additive (attr := simp)] theorem card_mul_singleton : (s * {a}).card = s.card := diff --git a/Mathlib/Data/Finsupp/Notation.lean b/Mathlib/Data/Finsupp/Notation.lean index 5218793bd4f2e..d5358e1cd2baa 100644 --- a/Mathlib/Data/Finsupp/Notation.lean +++ b/Mathlib/Data/Finsupp/Notation.lean @@ -19,11 +19,16 @@ open Lean open Lean.Parser open Lean.Parser.Term -/-- A variant of `Lean.Parser.Term.matchAlts` with less line wrapping. -/ +-- A variant of `Lean.Parser.Term.matchAlts` with less line wrapping. +@[nolint docBlame] -- we do not want any doc hover on this notation. def fun₀.matchAlts : Parser := leading_parser withPosition $ ppRealGroup <| many1Indent (ppSpace >> ppGroup matchAlt) -@[term_parser, inherit_doc Finsupp] +/-- `fun₀ | i => a` is notation for `Finsupp.single i a`, and with multiple match arms, +`fun₀ ... | i => a` is notation for `Finsupp.update (fun₀ ...) i a`. + +As a result, if multiple match arms coincide, the last one takes precedence. -/ +@[term_parser] def fun₀ := leading_parser:maxPrec ppAllowUngrouped >> unicodeSymbol "λ₀" "fun₀" >> fun₀.matchAlts diff --git a/Mathlib/Data/Fintype/Card.lean b/Mathlib/Data/Fintype/Card.lean index d0a48eff2cf40..1ef85ff6eaf8f 100644 --- a/Mathlib/Data/Fintype/Card.lean +++ b/Mathlib/Data/Fintype/Card.lean @@ -761,12 +761,21 @@ noncomputable def Finset.equivFinOfCardEq {s : Finset α} {n : ℕ} (h : s.card Fintype.equivFinOfCardEq ((Fintype.card_coe _).trans h) #align finset.equiv_fin_of_card_eq Finset.equivFinOfCardEq +theorem Finset.card_eq_of_equiv_fin {s : Finset α} {n : ℕ} (i : s ≃ Fin n) : s.card = n := + Fin.equiv_iff_eq.1 ⟨s.equivFin.symm.trans i⟩ + +theorem Finset.card_eq_of_equiv_fintype {s : Finset α} [Fintype β] (i : s ≃ β) : + s.card = Fintype.card β := card_eq_of_equiv_fin <| i.trans <| Fintype.equivFin β + /-- Noncomputable equivalence between two finsets `s` and `t` as fintypes when there is a proof that `s.card = t.card`.-/ -noncomputable def Finset.equivOfCardEq {s t : Finset α} (h : s.card = t.card) : s ≃ t := - Fintype.equivOfCardEq ((Fintype.card_coe _).trans (h.trans (Fintype.card_coe _).symm)) +noncomputable def Finset.equivOfCardEq {s : Finset α} {t : Finset β} (h : s.card = t.card) : + s ≃ t := Fintype.equivOfCardEq ((Fintype.card_coe _).trans (h.trans (Fintype.card_coe _).symm)) #align finset.equiv_of_card_eq Finset.equivOfCardEq +theorem Finset.card_eq_of_equiv {s : Finset α} {t : Finset β} (i : s ≃ t) : s.card = t.card := + (card_eq_of_equiv_fintype i).trans (Fintype.card_coe _) + @[simp] theorem Fintype.card_prop : Fintype.card Prop = 2 := rfl diff --git a/Mathlib/Data/List/Basic.lean b/Mathlib/Data/List/Basic.lean index 84b7313a29a70..7e5e49fc49e72 100644 --- a/Mathlib/Data/List/Basic.lean +++ b/Mathlib/Data/List/Basic.lean @@ -3865,6 +3865,13 @@ theorem nthLe_enum (l : List α) (i : ℕ) (hi' : i < l.enum.length) l.enum.nthLe i hi' = (i, l.nthLe i hi) := get_enum .. #align list.nth_le_enum List.nthLe_enum +@[simp] +theorem enumFrom_eq_nil {n : ℕ} {l : List α} : List.enumFrom n l = [] ↔ l = [] := by + cases l <;> simp + +@[simp] +theorem enum_eq_nil {l : List α} : List.enum l = [] ↔ l = [] := enumFrom_eq_nil + section Choose variable (p : α → Prop) [DecidablePred p] (l : List α) diff --git a/Mathlib/Data/List/Indexes.lean b/Mathlib/Data/List/Indexes.lean index 9a2873706d4d7..aabc582dd8e33 100644 --- a/Mathlib/Data/List/Indexes.lean +++ b/Mathlib/Data/List/Indexes.lean @@ -192,6 +192,10 @@ theorem length_mapIdx {α β} (l : List α) (f : ℕ → α → β) : (l.mapIdx · simp [IH] #align list.length_map_with_index List.length_mapIdx +@[simp] +theorem mapIdx_eq_nil {α β} {f : ℕ → α → β} {l : List α} : List.mapIdx f l = [] ↔ l = [] := by + rw [List.mapIdx_eq_enum_map, List.map_eq_nil, List.enum_eq_nil] + @[simp, deprecated] theorem nthLe_mapIdx {α β} (l : List α) (f : ℕ → α → β) (i : ℕ) (h : i < l.length) (h' : i < (l.mapIdx f).length := h.trans_le (l.length_mapIdx f).ge) : diff --git a/Mathlib/Data/Matrix/Basic.lean b/Mathlib/Data/Matrix/Basic.lean index 38126f4ee0027..4fab3f1f21f1a 100644 --- a/Mathlib/Data/Matrix/Basic.lean +++ b/Mathlib/Data/Matrix/Basic.lean @@ -1081,6 +1081,11 @@ theorem smul_eq_diagonal_mul [Fintype m] [DecidableEq m] (M : Matrix m n α) (a simp #align matrix.smul_eq_diagonal_mul Matrix.smul_eq_diagonal_mul +theorem op_smul_eq_mul_diagonal [Fintype n] [DecidableEq n] (M : Matrix m n α) (a : α) : + MulOpposite.op a • M = M * (diagonal fun _ : n => a) := by + ext + simp + /-- Left multiplication by a matrix, as an `AddMonoidHom` from matrices to matrices. -/ @[simps] def addMonoidHomMulLeft [Fintype m] (M : Matrix l m α) : Matrix m n α →+ Matrix l n α where @@ -1152,6 +1157,14 @@ theorem map_mul [Fintype n] {L : Matrix m n α} {M : Matrix n o α} [NonAssocSem simp [mul_apply, map_sum] #align matrix.map_mul Matrix.map_mul +theorem smul_one_eq_diagonal [Fintype m] [DecidableEq m] (a : α) : + a • (1 : Matrix m m α) = diagonal fun _ => a := by + rw [smul_eq_diagonal_mul, mul_one] + +theorem op_smul_one_eq_diagonal [Fintype m] [DecidableEq m] (a : α) : + MulOpposite.op a • (1 : Matrix m m α) = diagonal fun _ => a := by + rw [op_smul_eq_mul_diagonal, one_mul] + variable (α n) /-- `Matrix.diagonal` as a `RingHom`. -/ @@ -1252,13 +1265,7 @@ theorem mul_mul_left [Fintype n] (M : Matrix m n α) (N : Matrix n o α) (a : α sending `a` to the diagonal matrix with `a` on the diagonal. -/ def scalar (n : Type u) [DecidableEq n] [Fintype n] : α →+* Matrix n n α := - { (smulAddHom α _).flip (1 : Matrix n n α) with - toFun := fun a => a • (1 : Matrix n n α) - map_one' := by simp - map_mul' := by - intros - ext - simp [mul_assoc] } + (diagonalRingHom n α).comp <| Pi.constRingHom n α #align matrix.scalar Matrix.scalar section Scalar @@ -1266,54 +1273,45 @@ section Scalar variable [DecidableEq n] [Fintype n] @[simp] -theorem coe_scalar : (scalar n : α → Matrix n n α) = fun a => a • (1 : Matrix n n α) := +theorem scalar_apply (a : α) : scalar n a = diagonal fun _ => a := rfl -#align matrix.coe_scalar Matrix.coe_scalar +#align matrix.coe_scalar Matrix.scalar_applyₓ -theorem scalar_apply_eq (a : α) (i : n) : scalar n a i i = a := by - -- Porting note: replaced `Pi.smul_apply` with the new `Matrix.smul_apply` - simp only [coe_scalar, Matrix.smul_apply, one_apply_eq, smul_eq_mul, mul_one] -#align matrix.scalar_apply_eq Matrix.scalar_apply_eq +#noalign matrix.scalar_apply_eq +#noalign matrix.scalar_apply_ne -theorem scalar_apply_ne (a : α) (i j : n) (h : i ≠ j) : scalar n a i j = 0 := by - -- Porting note: replaced `Pi.smul_apply` with the new `Matrix.smul_apply` - simp only [h, coe_scalar, one_apply_ne, Ne.def, not_false_iff, Matrix.smul_apply, smul_zero] -#align matrix.scalar_apply_ne Matrix.scalar_apply_ne - -theorem scalar_inj [Nonempty n] {r s : α} : scalar n r = scalar n s ↔ r = s := by - constructor - · intro h - inhabit n - rw [← scalar_apply_eq r (Inhabited.default (α := n)), - ← scalar_apply_eq s (Inhabited.default (α := n)), h] - · rintro rfl - rfl +theorem scalar_inj [Nonempty n] {r s : α} : scalar n r = scalar n s ↔ r = s := + (diagonal_injective.comp Function.const_injective).eq_iff #align matrix.scalar_inj Matrix.scalar_inj +theorem scalar_commute [Fintype n] [DecidableEq n] (r : α) (hr : ∀ r', Commute r r') + (M : Matrix n n α) : + Commute (scalar n r) M := by + simp_rw [Commute, SemiconjBy, scalar_apply, ←smul_eq_diagonal_mul, ←op_smul_eq_mul_diagonal] + ext i j' + exact hr _ +#align matrix.scalar.commute Matrix.scalar_commuteₓ + end Scalar end Semiring section CommSemiring -variable [CommSemiring α] [Fintype n] +variable [CommSemiring α] -theorem smul_eq_mul_diagonal [DecidableEq n] (M : Matrix m n α) (a : α) : +theorem smul_eq_mul_diagonal [Fintype n] [DecidableEq n] (M : Matrix m n α) (a : α) : a • M = M * diagonal fun _ => a := by ext simp [mul_comm] #align matrix.smul_eq_mul_diagonal Matrix.smul_eq_mul_diagonal @[simp] -theorem mul_mul_right (M : Matrix m n α) (N : Matrix n o α) (a : α) : +theorem mul_mul_right [Fintype n] (M : Matrix m n α) (N : Matrix n o α) (a : α) : (M * of fun i j => a * N i j) = a • (M * N) := mul_smul M a N #align matrix.mul_mul_right Matrix.mul_mul_right -theorem scalar.commute [DecidableEq n] (r : α) (M : Matrix n n α) : Commute (scalar n r) M := by - simp [Commute, SemiconjBy] -#align matrix.scalar.commute Matrix.scalar.commute - end CommSemiring section Algebra @@ -1322,12 +1320,10 @@ variable [Fintype n] [DecidableEq n] variable [CommSemiring R] [Semiring α] [Semiring β] [Algebra R α] [Algebra R β] -instance instAlgebra : Algebra R (Matrix n n α) := - { (Matrix.scalar n).comp (algebraMap R α) with - commutes' := fun r x => by - ext - simp [Matrix.scalar, Matrix.mul_apply, Matrix.one_apply, Algebra.commutes, smul_ite] - smul_def' := fun r x => by ext; simp [Matrix.scalar, Algebra.smul_def r] } +instance instAlgebra : Algebra R (Matrix n n α) where + toRingHom := (Matrix.scalar n).comp (algebraMap R α) + commutes' r x := scalar_commute _ (fun r' => Algebra.commutes _ _) _ + smul_def' r x := by ext; simp [Matrix.scalar, Algebra.smul_def r] #align matrix.algebra Matrix.instAlgebra theorem algebraMap_matrix_apply {r : R} {i j : n} : @@ -1337,18 +1333,13 @@ theorem algebraMap_matrix_apply {r : R} {i j : n} : #align matrix.algebra_map_matrix_apply Matrix.algebraMap_matrix_apply theorem algebraMap_eq_diagonal (r : R) : - algebraMap R (Matrix n n α) r = diagonal (algebraMap R (n → α) r) := - Matrix.ext fun _ _ => algebraMap_matrix_apply + algebraMap R (Matrix n n α) r = diagonal (algebraMap R (n → α) r) := rfl #align matrix.algebra_map_eq_diagonal Matrix.algebraMap_eq_diagonal -@[simp] -theorem algebraMap_eq_smul (r : R) : algebraMap R (Matrix n n R) r = r • (1 : Matrix n n R) := - rfl -#align matrix.algebra_map_eq_smul Matrix.algebraMap_eq_smul +#align matrix.algebra_map_eq_smul Algebra.algebraMap_eq_smul_one theorem algebraMap_eq_diagonalRingHom : - algebraMap R (Matrix n n α) = (diagonalRingHom n α).comp (algebraMap R _) := - RingHom.ext algebraMap_eq_diagonal + algebraMap R (Matrix n n α) = (diagonalRingHom n α).comp (algebraMap R _) := rfl #align matrix.algebra_map_eq_diagonal_ring_hom Matrix.algebraMap_eq_diagonalRingHom @[simp] diff --git a/Mathlib/Data/Nat/Basic.lean b/Mathlib/Data/Nat/Basic.lean index 98d40b50e16f6..5f999e12a50f9 100644 --- a/Mathlib/Data/Nat/Basic.lean +++ b/Mathlib/Data/Nat/Basic.lean @@ -288,11 +288,11 @@ theorem exists_eq_add_of_lt (h : m < n) : ∃ k : ℕ, n = m + k + 1 := /-! ### `pred` -/ @[simp] -theorem add_succ_sub_one (n m : ℕ) : n + succ m - 1 = n + m := by rw [add_succ, succ_sub_one] +theorem add_succ_sub_one (n m : ℕ) : n + succ m - 1 = n + m := by rw [add_succ, Nat.add_one_sub_one] #align nat.add_succ_sub_one Nat.add_succ_sub_one @[simp] -theorem succ_add_sub_one (n m : ℕ) : succ n + m - 1 = n + m := by rw [succ_add, succ_sub_one] +theorem succ_add_sub_one (n m : ℕ) : succ n + m - 1 = n + m := by rw [succ_add, Nat.add_one_sub_one] #align nat.succ_add_sub_one Nat.succ_add_sub_one theorem pred_eq_sub_one (n : ℕ) : pred n = n - 1 := diff --git a/Mathlib/Data/Nat/Bitwise.lean b/Mathlib/Data/Nat/Bitwise.lean index 54789c59e8254..7e63942b5660b 100644 --- a/Mathlib/Data/Nat/Bitwise.lean +++ b/Mathlib/Data/Nat/Bitwise.lean @@ -214,6 +214,9 @@ theorem zero_of_testBit_eq_false {n : ℕ} (h : ∀ i, testBit n i = false) : n rw [this, bit_false, bit0_val, hn fun i => by rw [← h (i + 1), testBit_succ], mul_zero] #align nat.zero_of_test_bit_eq_ff Nat.zero_of_testBit_eq_false +theorem testBit_eq_false_of_lt {n i} (h : n < 2 ^ i) : n.testBit i = false := by + simp [testBit, shiftRight_eq_div_pow, Nat.div_eq_of_lt h] + @[simp] theorem zero_testBit (i : ℕ) : testBit 0 i = false := by simp only [testBit, zero_shiftRight, bodd_zero] @@ -350,6 +353,17 @@ theorem xor_comm (n m : ℕ) : n ^^^ m = m ^^^ n := bitwise_comm (Bool.bne_eq_xor ▸ Bool.xor_comm) n m #align nat.lxor_comm Nat.xor_comm +lemma and_two_pow (n i : ℕ) : n &&& 2 ^ i = (n.testBit i).toNat * 2 ^ i := by + refine eq_of_testBit_eq fun j => ?_ + obtain rfl | hij := Decidable.eq_or_ne i j <;> cases' h : n.testBit i + · simp [h] + · simp [h] + · simp [h, testBit_two_pow_of_ne hij] + · simp [h, testBit_two_pow_of_ne hij] + +lemma two_pow_and (n i : ℕ) : 2 ^ i &&& n = 2 ^ i * (n.testBit i).toNat := by + rw [mul_comm, land_comm, and_two_pow] + @[simp] theorem zero_xor (n : ℕ) : 0 ^^^ n = n := by simp [HXor.hXor, Xor.xor, xor] #align nat.zero_lxor Nat.zero_xor @@ -486,4 +500,34 @@ theorem lt_xor_cases {a b c : ℕ} (h : a < b ^^^ c) : a ^^^ c < b ∨ a ^^^ b < (or_iff_right fun h' => (h.asymm h').elim).1 <| xor_trichotomy h.ne #align nat.lt_lxor_cases Nat.lt_xor_cases +@[simp] theorem bit_lt_two_pow_succ_iff {b x n} : bit b x < 2 ^ (n + 1) ↔ x < 2 ^ n := by + rw [pow_succ', ←bit0_eq_two_mul] + cases b <;> simp + +/-- If `x` and `y` fit within `n` bits, then the result of any bitwise operation on `x` and `y` also +fits within `n` bits -/ +theorem bitwise_lt {f x y n} (hx : x < 2 ^ n) (hy : y < 2 ^ n) : + bitwise f x y < 2 ^ n := by + induction x using Nat.binaryRec' generalizing n y with + | z => + simp only [bitwise_zero_left] + split <;> assumption + | @f bx nx hnx ih => + cases y using Nat.binaryRec' with + | z => + simp only [bitwise_zero_right] + split <;> assumption + | f «by» ny hny => + rw [bitwise_bit' _ _ _ _ hnx hny] + cases n <;> simp_all + +lemma shiftLeft_lt {x n m : ℕ} (h : x < 2 ^ n) : x <<< m < 2 ^ (n + m) := by + simp only [pow_add, shiftLeft_eq, mul_lt_mul_right (two_pow_pos _), h] + +/-- Note that the LHS is the expression used within `Std.BitVec.append`, hence the name. -/ +lemma append_lt {x y n m} (hx : x < 2 ^ n) (hy : y < 2 ^ m) : y <<< n ||| x < 2 ^ (n + m) := by + apply bitwise_lt + · rw [add_comm]; apply shiftLeft_lt hy + · apply lt_of_lt_of_le hx <| pow_le_pow (le_succ _) (le_add_right _ _) + end Nat diff --git a/Mathlib/Data/Nat/Factorial/Cast.lean b/Mathlib/Data/Nat/Factorial/Cast.lean index fb7730b8dcf50..77f9d8cd4e6f8 100644 --- a/Mathlib/Data/Nat/Factorial/Cast.lean +++ b/Mathlib/Data/Nat/Factorial/Cast.lean @@ -41,7 +41,7 @@ theorem cast_descFactorial : rw [← ascPochhammer_eval_cast, ascPochhammer_nat_eq_descFactorial] induction' b with b · simp - · simp_rw [add_succ, succ_sub_one] + · simp_rw [add_succ, Nat.add_one_sub_one] obtain h | h := le_total a b · rw [descFactorial_of_lt (lt_succ_of_le h), descFactorial_of_lt (lt_succ_of_le _)] rw [tsub_eq_zero_iff_le.mpr h, zero_add] diff --git a/Mathlib/Data/Nat/Fib.lean b/Mathlib/Data/Nat/Fib/Basic.lean similarity index 100% rename from Mathlib/Data/Nat/Fib.lean rename to Mathlib/Data/Nat/Fib/Basic.lean diff --git a/Mathlib/Data/Nat/Fib/Zeckendorf.lean b/Mathlib/Data/Nat/Fib/Zeckendorf.lean index d446ad1dce8dd..19afbb61cfeaf 100644 --- a/Mathlib/Data/Nat/Fib/Zeckendorf.lean +++ b/Mathlib/Data/Nat/Fib/Zeckendorf.lean @@ -3,7 +3,7 @@ Copyright (c) 2023 Yaël Dillies. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies -/ -import Mathlib.Data.Nat.Fib +import Mathlib.Data.Nat.Fib.Basic /-! # Zeckendorf's Theorem diff --git a/Mathlib/Data/Nat/Order/Basic.lean b/Mathlib/Data/Nat/Order/Basic.lean index 2d108d231bc39..5148474b27966 100644 --- a/Mathlib/Data/Nat/Order/Basic.lean +++ b/Mathlib/Data/Nat/Order/Basic.lean @@ -321,7 +321,7 @@ theorem lt_mul_self_iff : ∀ {n : ℕ}, n < n * n ↔ 1 < n theorem add_sub_one_le_mul (hm : m ≠ 0) (hn : n ≠ 0) : m + n - 1 ≤ m * n := by cases m · cases hm rfl - · rw [succ_add, succ_sub_one, succ_mul] + · rw [succ_add, Nat.add_one_sub_one, succ_mul] exact add_le_add_right (le_mul_of_one_le_right' $ succ_le_iff.2 $ pos_iff_ne_zero.2 hn) _ #align nat.add_sub_one_le_mul Nat.add_sub_one_le_mul diff --git a/Mathlib/Data/Nat/Order/Lemmas.lean b/Mathlib/Data/Nat/Order/Lemmas.lean index 10ff749e6d21e..347d22795547b 100644 --- a/Mathlib/Data/Nat/Order/Lemmas.lean +++ b/Mathlib/Data/Nat/Order/Lemmas.lean @@ -83,6 +83,12 @@ protected theorem div_eq_zero_iff {a b : ℕ} (hb : 0 < b) : a / b = 0 ↔ a < b mul_zero, add_zero]⟩ #align nat.div_eq_zero_iff Nat.div_eq_zero_iff +protected lemma div_ne_zero_iff (hb : b ≠ 0) : a / b ≠ 0 ↔ b ≤ a := by + rw [ne_eq, Nat.div_eq_zero_iff hb.bot_lt, not_lt] + +protected lemma div_pos_iff (hb : b ≠ 0) : 0 < a / b ↔ b ≤ a := by + rw [pos_iff_ne_zero, Nat.div_ne_zero_iff hb] + #align nat.div_eq_zero Nat.div_eq_of_lt /-! ### `mod`, `dvd` -/ diff --git a/Mathlib/Data/Nat/Squarefree.lean b/Mathlib/Data/Nat/Squarefree.lean index dab9f9911b948..cb49fb02d565c 100644 --- a/Mathlib/Data/Nat/Squarefree.lean +++ b/Mathlib/Data/Nat/Squarefree.lean @@ -23,6 +23,7 @@ squarefree, multiplicity -/ +open Finset namespace Nat @@ -37,12 +38,13 @@ theorem Squarefree.nodup_factors {n : ℕ} (hn : Squarefree n) : n.factors.Nodup (Nat.squarefree_iff_nodup_factors hn.ne_zero).mp hn namespace Nat +variable {s : Finset ℕ} {m n p : ℕ} theorem squarefree_iff_prime_squarefree {n : ℕ} : Squarefree n ↔ ∀ x, Prime x → ¬x * x ∣ n := squarefree_iff_irreducible_sq_not_dvd_of_exists_irreducible ⟨_, prime_two⟩ #align nat.squarefree_iff_prime_squarefree Nat.squarefree_iff_prime_squarefree -theorem Squarefree.factorization_le_one {n : ℕ} (p : ℕ) (hn : Squarefree n) : +theorem _root_.Squarefree.natFactorization_le_one {n : ℕ} (p : ℕ) (hn : Squarefree n) : n.factorization p ≤ 1 := by rcases eq_or_ne n 0 with (rfl | hn') · simp @@ -54,7 +56,11 @@ theorem Squarefree.factorization_le_one {n : ℕ} (p : ℕ) (hn : Squarefree n) exact mod_cast this · rw [factorization_eq_zero_of_non_prime _ hp] exact zero_le_one -#align nat.squarefree.factorization_le_one Nat.Squarefree.factorization_le_one +#align nat.squarefree.factorization_le_one Squarefree.natFactorization_le_one + +lemma factorization_eq_one_of_squarefree (hn : Squarefree n) (hp : p.Prime) (hpn : p ∣ n) : + factorization n p = 1 := + (hn.natFactorization_le_one _).antisymm $ (hp.dvd_iff_one_le_factorization hn.ne_zero).1 hpn theorem squarefree_of_factorization_le_one {n : ℕ} (hn : n ≠ 0) (hn' : ∀ p, n.factorization p ≤ 1) : Squarefree n := by @@ -66,7 +72,7 @@ theorem squarefree_of_factorization_le_one {n : ℕ} (hn : n ≠ 0) (hn' : ∀ p theorem squarefree_iff_factorization_le_one {n : ℕ} (hn : n ≠ 0) : Squarefree n ↔ ∀ p, n.factorization p ≤ 1 := - ⟨fun p hn => Squarefree.factorization_le_one hn p, squarefree_of_factorization_le_one hn⟩ + ⟨fun hn => hn.natFactorization_le_one, squarefree_of_factorization_le_one hn⟩ #align nat.squarefree_iff_factorization_le_one Nat.squarefree_iff_factorization_le_one theorem Squarefree.ext_iff {n m : ℕ} (hn : Squarefree n) (hm : Squarefree m) : @@ -76,8 +82,8 @@ theorem Squarefree.ext_iff {n m : ℕ} (hn : Squarefree n) (hm : Squarefree m) : · have h₁ := h _ hp rw [← not_iff_not, hp.dvd_iff_one_le_factorization hn.ne_zero, not_le, lt_one_iff, hp.dvd_iff_one_le_factorization hm.ne_zero, not_le, lt_one_iff] at h₁ - have h₂ := Squarefree.factorization_le_one p hn - have h₃ := Squarefree.factorization_le_one p hm + have h₂ := hn.natFactorization_le_one p + have h₃ := hm.natFactorization_le_one p rw [Nat.le_add_one_iff, le_zero_iff] at h₂ h₃ cases' h₂ with h₂ h₂ · rwa [h₂, eq_comm, ← h₁] @@ -386,6 +392,44 @@ theorem squarefree_mul_iff {m n : ℕ} : ⟨fun h => ⟨coprime_of_squarefree_mul h, (squarefree_mul $ coprime_of_squarefree_mul h).mp h⟩, fun h => (squarefree_mul h.1).mpr h.2⟩ +lemma coprime_div_gcd_of_squarefree (hm : Squarefree m) (hn : n ≠ 0) : Coprime (m / gcd m n) n := by + have : Coprime (m / gcd m n) (gcd m n) := + coprime_of_squarefree_mul $ by simpa [Nat.div_mul_cancel, gcd_dvd_left] + simpa [Nat.div_mul_cancel, gcd_dvd_right] using + (coprime_div_gcd_div_gcd (m := m) (gcd_ne_zero_right hn).bot_lt).mul_right this + +lemma prod_primeFactors_of_squarefree (hn : Squarefree n) : ∏ p in n.primeFactors, p = n := by + convert factorization_prod_pow_eq_self hn.ne_zero + refine prod_congr rfl fun p hp ↦ ?_ + simp only [support_factorization, toFinset_factors, mem_primeFactors_of_ne_zero hn.ne_zero] at hp + simp_rw [factorization_eq_one_of_squarefree hn hp.1 hp.2, pow_one] + +lemma primeFactors_prod (hs : ∀ p ∈ s, p.Prime) : primeFactors (∏ p in s, p) = s := by + have hn : ∏ p in s, p ≠ 0 := prod_ne_zero_iff.2 fun p hp ↦ (hs _ hp).ne_zero + ext p + rw [mem_primeFactors_of_ne_zero hn, and_congr_right (fun hp ↦ hp.prime.dvd_finset_prod_iff _)] + refine' ⟨_, fun hp ↦ ⟨hs _ hp, _, hp, dvd_rfl⟩⟩ + rintro ⟨hp, q, hq, hpq⟩ + rwa [←((hs _ hq).dvd_iff_eq hp.ne_one).1 hpq] + +lemma primeFactors_div_gcd (hm : Squarefree m) (hn : n ≠ 0) : + primeFactors (m / m.gcd n) = primeFactors m \ primeFactors n := by + ext p + have : m / m.gcd n ≠ 0 := + (Nat.div_ne_zero_iff $ gcd_ne_zero_right hn).2 $ gcd_le_left _ hm.ne_zero.bot_lt + simp only [mem_primeFactors, ne_eq, this, not_false_eq_true, and_true, not_and, mem_sdiff, + hm.ne_zero, hn, dvd_div_iff (gcd_dvd_left _ _)] + refine ⟨fun hp ↦ ⟨⟨hp.1, dvd_of_mul_left_dvd hp.2⟩, fun _ hpn ↦ hp.1.not_unit $ hm _ $ + (mul_dvd_mul_right (dvd_gcd (dvd_of_mul_left_dvd hp.2) hpn) _).trans hp.2⟩, fun hp ↦ + ⟨hp.1.1, Coprime.mul_dvd_of_dvd_of_dvd ?_ (gcd_dvd_left _ _) hp.1.2⟩⟩ + rw [coprime_comm, hp.1.1.coprime_iff_not_dvd] + exact fun hpn ↦ hp.2 hp.1.1 $ hpn.trans $ gcd_dvd_right _ _ + +lemma prod_primeFactors_invOn_squarefree : + Set.InvOn (fun n : ℕ ↦ (factorization n).support) (fun s ↦ ∏ p in s, p) + {s | ∀ p ∈ s, p.Prime} {n | Squarefree n} := + ⟨fun _s ↦ primeFactors_prod, fun _n ↦ prod_primeFactors_of_squarefree⟩ + theorem prod_factors_toFinset_of_squarefree {n : ℕ} (hn : Squarefree n) : ∏ p in n.factors.toFinset, p = n := by rw [List.prod_toFinset _ hn.nodup_factors, List.map_id'', Nat.prod_factors hn.ne_zero] diff --git a/Mathlib/Data/Opposite.lean b/Mathlib/Data/Opposite.lean index 3df9cdf371842..2d4e22256e5c8 100644 --- a/Mathlib/Data/Opposite.lean +++ b/Mathlib/Data/Opposite.lean @@ -110,6 +110,10 @@ theorem unop_eq_iff_eq_op {x} {y : α} : unop x = y ↔ x = op y := instance [Inhabited α] : Inhabited αᵒᵖ := ⟨op default⟩ +instance [Nonempty α] : Nonempty αᵒᵖ := Nonempty.map op ‹_› + +instance [Subsingleton α] : Subsingleton αᵒᵖ := unop_injective.subsingleton + /-- A recursor for `Opposite`. The `@[eliminator]` attribute makes it the default induction principle for `Opposite` so you don't need to use `induction x using Opposite.rec'`. -/ diff --git a/Mathlib/Data/Polynomial/Derivative.lean b/Mathlib/Data/Polynomial/Derivative.lean index ba62878b67843..e3c921c66158a 100644 --- a/Mathlib/Data/Polynomial/Derivative.lean +++ b/Mathlib/Data/Polynomial/Derivative.lean @@ -64,7 +64,7 @@ theorem coeff_derivative (p : R[X]) (n : ℕ) : · intros rw [Nat.cast_zero, mul_zero, zero_mul] · intro _ H - rw [Nat.succ_sub_one, if_neg (mt (congr_arg Nat.succ) H.symm), mul_zero] + rw [Nat.add_one_sub_one, if_neg (mt (congr_arg Nat.succ) H.symm), mul_zero] · rw [if_pos (add_tsub_cancel_right n 1).symm, mul_one, Nat.cast_add, Nat.cast_one, mem_support_iff] intro h @@ -314,7 +314,7 @@ theorem derivative_mul {f g : R[X]} : derivative (f * g) = derivative f * g + f cases n <;> cases m <;> simp_rw [add_smul, mul_smul_comm, smul_mul_assoc, X_pow_mul_assoc, ← mul_assoc, ← C_mul, mul_assoc, ← pow_add] <;> - simp [Nat.add_succ, Nat.succ_add, Nat.succ_sub_one, zero_smul, add_comm]) + simp [Nat.add_succ, Nat.succ_add, Nat.add_one_sub_one, zero_smul, add_comm]) _ = derivative f * g + f * derivative g := by conv => rhs @@ -468,7 +468,7 @@ theorem derivative_pow_succ (p : R[X]) (n : ℕ) : theorem derivative_pow (p : R[X]) (n : ℕ) : derivative (p ^ n) = C (n : R) * p ^ (n - 1) * derivative p := Nat.casesOn n (by rw [pow_zero, derivative_one, Nat.cast_zero, C_0, zero_mul, zero_mul]) fun n => - by rw [p.derivative_pow_succ n, n.succ_sub_one, n.cast_succ] + by rw [p.derivative_pow_succ n, Nat.add_one_sub_one, n.cast_succ] #align polynomial.derivative_pow Polynomial.derivative_pow theorem derivative_sq (p : R[X]) : derivative (p ^ 2) = C 2 * p * derivative p := by diff --git a/Mathlib/Data/Rat/Cast/CharZero.lean b/Mathlib/Data/Rat/Cast/CharZero.lean index 22be3631afb0f..8885af1a69a1f 100644 --- a/Mathlib/Data/Rat/Cast/CharZero.lean +++ b/Mathlib/Data/Rat/Cast/CharZero.lean @@ -121,7 +121,7 @@ theorem cast_mk (a b : ℤ) : (a /. b : α) = a / b := by #align rat.cast_mk Rat.cast_mk @[simp, norm_cast] -theorem cast_pow (q : ℚ) (k : ℕ) : (↑(q ^ k) : α) = (q : α) ^ k := +theorem cast_pow (q : ℚ) (k : ℕ) : ↑(q ^ k) = (q : α) ^ k := (castHom α).map_pow q k #align rat.cast_pow Rat.cast_pow diff --git a/Mathlib/Data/Real/GoldenRatio.lean b/Mathlib/Data/Real/GoldenRatio.lean index 351769456b0fb..d1b7e21d0f2cf 100644 --- a/Mathlib/Data/Real/GoldenRatio.lean +++ b/Mathlib/Data/Real/GoldenRatio.lean @@ -4,7 +4,7 @@ Released under Apache 2.0 license as described in the file LICENSE. Authors: Anatole Dedecker, Alexey Soloyev, Junyan Xu -/ import Mathlib.Data.Real.Irrational -import Mathlib.Data.Nat.Fib +import Mathlib.Data.Nat.Fib.Basic import Mathlib.Data.Nat.PrimeNormNum import Mathlib.Data.Fin.VecNotation import Mathlib.Algebra.LinearRecurrence diff --git a/Mathlib/Data/Set/Pointwise/SMul.lean b/Mathlib/Data/Set/Pointwise/SMul.lean index 016a7e5fa0ebc..fc7cd705c3915 100644 --- a/Mathlib/Data/Set/Pointwise/SMul.lean +++ b/Mathlib/Data/Set/Pointwise/SMul.lean @@ -857,9 +857,9 @@ theorem op_smul_set_mul_eq_mul_smul_set (a : α) (s : Set α) (t : Set α) : end Semigroup -section LeftCancelSemigroup +section IsLeftCancelMul -variable [LeftCancelSemigroup α] {s t : Set α} +variable [Mul α] [IsLeftCancelMul α] {s t : Set α} @[to_additive] theorem pairwiseDisjoint_smul_iff : @@ -868,7 +868,7 @@ theorem pairwiseDisjoint_smul_iff : #align set.pairwise_disjoint_smul_iff Set.pairwiseDisjoint_smul_iff #align set.pairwise_disjoint_vadd_iff Set.pairwiseDisjoint_vadd_iff -end LeftCancelSemigroup +end IsLeftCancelMul section Group diff --git a/Mathlib/Data/Stream/Init.lean b/Mathlib/Data/Stream/Init.lean index cc6b3eff963e7..24eaf6b30e45c 100644 --- a/Mathlib/Data/Stream/Init.lean +++ b/Mathlib/Data/Stream/Init.lean @@ -608,7 +608,7 @@ theorem get?_take_succ (n : Nat) (s : Stream' α) : @[simp] theorem dropLast_take {xs : Stream' α} : (Stream'.take n xs).dropLast = Stream'.take (n-1) xs := by cases n; case zero => simp - case succ n => rw [take_succ', List.dropLast_concat, Nat.succ_sub_one] + case succ n => rw [take_succ', List.dropLast_concat, Nat.add_one_sub_one] @[simp] theorem append_take_drop : ∀ (n : Nat) (s : Stream' α), diff --git a/Mathlib/Data/ZMod/Basic.lean b/Mathlib/Data/ZMod/Basic.lean index 67b49f60f1a95..b580604a670f5 100644 --- a/Mathlib/Data/ZMod/Basic.lean +++ b/Mathlib/Data/ZMod/Basic.lean @@ -1106,13 +1106,13 @@ theorem valMinAbs_natAbs_eq_min {n : ℕ} [hpos : NeZero n] (a : ZMod n) : · rw [Int.natAbs_ofNat] symm apply - min_eq_left (le_trans h (le_trans (Nat.half_le_of_sub_le_half _) (Nat.sub_le_sub_left n h))) + min_eq_left (le_trans h (le_trans (Nat.half_le_of_sub_le_half _) (Nat.sub_le_sub_left h n))) rw [Nat.sub_sub_self (Nat.div_le_self _ _)] · rw [← Int.natAbs_neg, neg_sub, ← Nat.cast_sub a.val_le] symm apply min_eq_right - (le_trans (le_trans (Nat.sub_le_sub_left n (lt_of_not_ge h)) (Nat.le_half_of_half_lt_sub _)) + (le_trans (le_trans (Nat.sub_le_sub_left (lt_of_not_ge h) n) (Nat.le_half_of_half_lt_sub _)) (le_of_not_ge h)) rw [Nat.sub_sub_self (Nat.div_lt_self (lt_of_le_of_ne' (Nat.zero_le _) hpos.1) one_lt_two)] apply Nat.lt_succ_self diff --git a/Mathlib/Data/ZMod/Units.lean b/Mathlib/Data/ZMod/Units.lean index bf5836c52ebc0..be63e460b092f 100644 --- a/Mathlib/Data/ZMod/Units.lean +++ b/Mathlib/Data/ZMod/Units.lean @@ -30,7 +30,7 @@ lemma unitsMap_comp {d : ℕ} (hm : n ∣ m) (hd : m ∣ d) : lemma unitsMap_self (n : ℕ) : unitsMap (dvd_refl n) = MonoidHom.id _ := by simp [unitsMap, castHom_self] -lemma IsUnit_cast_of_dvd (hm : n ∣ m) (a : Units (ZMod m)) : IsUnit ((a : ZMod m) : ZMod n) := +lemma IsUnit_cast_of_dvd (hm : n ∣ m) (a : Units (ZMod m)) : IsUnit ((a : ZMod m) : ZMod n) := Units.isUnit (unitsMap hm a) end ZMod diff --git a/Mathlib/Geometry/Euclidean/Angle/Oriented/Affine.lean b/Mathlib/Geometry/Euclidean/Angle/Oriented/Affine.lean index 4a1b712e025fb..dcaaa3b16797c 100644 --- a/Mathlib/Geometry/Euclidean/Angle/Oriented/Affine.lean +++ b/Mathlib/Geometry/Euclidean/Angle/Oriented/Affine.lean @@ -742,7 +742,7 @@ theorem _root_.Collinear.oangle_sign_of_sameRay_vsub {p₁ p₂ p₃ p₄ : P} ( exact smul_vsub_rev_mem_vectorSpan_pair _ _ _ have hp₁p₂s : (p₁, p₅, p₂) ∈ s := by simp_rw [Set.mem_image, Set.mem_prod, Set.mem_setOf, Set.mem_univ, true_and_iff, Prod.ext_iff] - refine' ⟨⟨⟨p₁, left_mem_affineSpan_pair _ _ _⟩, p₂ -ᵥ p₁⟩, + refine' ⟨⟨⟨p₁, left_mem_affineSpan_pair ℝ _ _⟩, p₂ -ᵥ p₁⟩, ⟨SameRay.rfl, vsub_ne_zero.2 hp₁p₂.symm⟩, _⟩ simp have hp₃p₄s : (p₃, p₅, p₄) ∈ s := by diff --git a/Mathlib/Geometry/Euclidean/Circumcenter.lean b/Mathlib/Geometry/Euclidean/Circumcenter.lean index 911510adbbc5c..30d24013b0ba2 100644 --- a/Mathlib/Geometry/Euclidean/Circumcenter.lean +++ b/Mathlib/Geometry/Euclidean/Circumcenter.lean @@ -633,7 +633,7 @@ theorem centroid_eq_affineCombination_of_pointsWithCircumcenter {n : ℕ} (s : S sum_pointsWithCircumcenter, centroidWeightsWithCircumcenter, pointsWithCircumcenter_point, zero_smul, add_zero, centroidWeights, Set.sum_indicator_subset_of_eq_zero (Function.const (Fin (n + 1)) (card fs : ℝ)⁻¹) - (fun i wi => wi • (s.points i -ᵥ Classical.choice AddTorsor.Nonempty)) fs.subset_univ fun _ => + (fun i wi => wi • (s.points i -ᵥ Classical.choice AddTorsor.nonempty)) fs.subset_univ fun _ => zero_smul ℝ _, Set.indicator_apply] congr diff --git a/Mathlib/GroupTheory/Congruence.lean b/Mathlib/GroupTheory/Congruence.lean index 6a6b88ae91f20..dfdf33111e291 100644 --- a/Mathlib/GroupTheory/Congruence.lean +++ b/Mathlib/GroupTheory/Congruence.lean @@ -121,13 +121,13 @@ instance : Inhabited (Con M) := --Porting note: upgraded to FunLike /-- A coercion from a congruence relation to its underlying binary relation. -/ @[to_additive "A coercion from an additive congruence relation to its underlying binary relation."] -instance : FunLike (Con M) M (fun _ => M → Prop) := - { coe := fun c => fun x y => @Setoid.r _ c.toSetoid x y - coe_injective' := fun x y h => by - rcases x with ⟨⟨x, _⟩, _⟩ - rcases y with ⟨⟨y, _⟩, _⟩ - have : x = y := h - subst x; rfl } +instance : FunLike (Con M) M (fun _ => M → Prop) where + coe c := c.r + coe_injective' := fun x y h => by + rcases x with ⟨⟨x, _⟩, _⟩ + rcases y with ⟨⟨y, _⟩, _⟩ + have : x = y := h + subst x; rfl @[to_additive (attr := simp)] theorem rel_eq_coe (c : Con M) : c.r = c := @@ -178,11 +178,7 @@ variable {c} /-- The map sending a congruence relation to its underlying binary relation is injective. -/ @[to_additive "The map sending an additive congruence relation to its underlying binary relation is injective."] -theorem ext' {c d : Con M} (H : c.r = d.r) : c = d := by - rcases c with ⟨⟨⟩⟩ - rcases d with ⟨⟨⟩⟩ - cases H - congr +theorem ext' {c d : Con M} (H : ⇑c = ⇑d) : c = d := FunLike.coe_injective H #align con.ext' Con.ext' #align add_con.ext' AddCon.ext' @@ -211,10 +207,9 @@ theorem ext_iff {c d : Con M} : (∀ x y, c x y ↔ d x y) ↔ c = d := /-- Two congruence relations are equal iff their underlying binary relations are equal. -/ @[to_additive "Two additive congruence relations are equal iff their underlying binary relations are equal."] -theorem ext'_iff {c d : Con M} : c.r = d.r ↔ c = d := - ⟨ext', fun h => h ▸ rfl⟩ -#align con.ext'_iff Con.ext'_iff -#align add_con.ext'_iff AddCon.ext'_iff +theorem coe_inj {c d : Con M} : ⇑c = ⇑d ↔ c = d := FunLike.coe_injective.eq_iff +#align con.ext'_iff Con.coe_inj +#align add_con.ext'_iff AddCon.coe_inj /-- The kernel of a multiplication-preserving function as a congruence relation. -/ @[to_additive "The kernel of an addition-preserving function as an additive congruence relation."] @@ -413,8 +408,8 @@ protected def congr {c d : Con M} (h : c = d) : c.Quotient ≃* d.Quotient := `x` is related to `y` by `d` if `x` is related to `y` by `c`. -/ @[to_additive "For additive congruence relations `c, d` on a type `M` with an addition, `c ≤ d` iff `∀ x y ∈ M`, `x` is related to `y` by `d` if `x` is related to `y` by `c`."] -instance : LE (Con M) := - ⟨fun c d => ∀ ⦃x y⦄, c x y → d x y⟩ +instance : LE (Con M) where + le c d := ∀ ⦃x y⦄, c x y → d x y /-- Definition of `≤` for congruence relations. -/ @[to_additive "Definition of `≤` for additive congruence relations."] @@ -426,12 +421,12 @@ theorem le_def {c d : Con M} : c ≤ d ↔ ∀ {x y}, c x y → d x y := /-- The infimum of a set of congruence relations on a given type with a multiplication. -/ @[to_additive "The infimum of a set of additive congruence relations on a given type with an addition."] -instance : InfSet (Con M) := - ⟨fun S => - ⟨⟨fun x y => ∀ c : Con M, c ∈ S → c x y, - ⟨fun x c _ => c.refl x, fun h c hc => c.symm <| h c hc, fun h1 h2 c hc => - c.trans (h1 c hc) <| h2 c hc⟩⟩, - fun h1 h2 c hc => c.mul (h1 c hc) <| h2 c hc⟩⟩ +instance : InfSet (Con M) where + sInf S := + { r := fun x y => ∀ c : Con M, c ∈ S → c x y + iseqv := ⟨fun x c _ => c.refl x, fun h c hc => c.symm <| h c hc, + fun h1 h2 c hc => c.trans (h1 c hc) <| h2 c hc⟩ + mul' := fun h1 h2 c hc => c.mul (h1 c hc) <| h2 c hc } /-- The infimum of a set of congruence relations is the same as the infimum of the set's image under the map to the underlying equivalence relation. -/ @@ -445,51 +440,49 @@ theorem sInf_toSetoid (S : Set (Con M)) : (sInf S).toSetoid = sInf (toSetoid '' /-- The infimum of a set of congruence relations is the same as the infimum of the set's image under the map to the underlying binary relation. -/ -@[to_additive "The infimum of a set of additive congruence relations is the same as the infimum -of the set's image under the map to the underlying binary relation."] -theorem sInf_def (S : Set (Con M)) : - ⇑(sInf S) = sInf (@Set.image (Con M) (M → M → Prop) (↑) S) := by +@[to_additive (attr := simp, norm_cast) + "The infimum of a set of additive congruence relations is the same as the infimum + of the set's image under the map to the underlying binary relation."] +theorem coe_sInf (S : Set (Con M)) : + ⇑(sInf S) = sInf ((⇑) '' S) := by ext simp only [sInf_image, iInf_apply, iInf_Prop_eq] rfl -#align con.Inf_def Con.sInf_def -#align add_con.Inf_def AddCon.sInf_def +#align con.Inf_def Con.coe_sInf +#align add_con.Inf_def AddCon.coe_sInf @[to_additive] instance : PartialOrder (Con M) where - le := (· ≤ ·) - lt c d := c ≤ d ∧ ¬d ≤ c le_refl _ _ _ := id le_trans _ _ _ h1 h2 _ _ h := h2 <| h1 h - lt_iff_le_not_le _ _ := Iff.rfl le_antisymm _ _ hc hd := ext fun _ _ => ⟨fun h => hc h, fun h => hd h⟩ /-- The complete lattice of congruence relations on a given type with a multiplication. -/ @[to_additive "The complete lattice of additive congruence relations on a given type with an addition."] -instance : CompleteLattice (Con M) := - { (completeLatticeOfInf (Con M)) fun s => +instance : CompleteLattice (Con M) where + __ := completeLatticeOfInf (Con M) fun s => ⟨fun r hr x y h => (h : ∀ r ∈ s, (r : Con M) x y) r hr, fun r hr x y h r' hr' => hr hr' - h⟩ with - inf := fun c d => - ⟨c.toSetoid ⊓ d.toSetoid, fun h1 h2 => ⟨c.mul h1.1 h2.1, d.mul h1.2 h2.2⟩⟩ - inf_le_left := fun _ _ _ _ h => h.1 - inf_le_right := fun _ _ _ _ h => h.2 - le_inf := fun _ _ _ hb hc _ _ h => ⟨hb h, hc h⟩ - top := { Setoid.completeLattice.top with mul' := by tauto } - le_top := fun _ _ _ _ => trivial - bot := { Setoid.completeLattice.bot with mul' := fun h1 h2 => h1 ▸ h2 ▸ rfl } - bot_le := fun c x y h => h ▸ c.refl x } + h⟩ + inf c d := ⟨c.toSetoid ⊓ d.toSetoid, fun h1 h2 => ⟨c.mul h1.1 h2.1, d.mul h1.2 h2.2⟩⟩ + inf_le_left _ _ := fun _ _ h => h.1 + inf_le_right _ _ := fun _ _ h => h.2 + le_inf _ _ _ hb hc := fun _ _ h => ⟨hb h, hc h⟩ + top := { Setoid.completeLattice.top with mul' := by tauto } + le_top _ := fun _ _ _ => trivial + bot := { Setoid.completeLattice.bot with mul' := fun h1 h2 => h1 ▸ h2 ▸ rfl } + bot_le c := fun x y h => h ▸ c.refl x /-- The infimum of two congruence relations equals the infimum of the underlying binary operations. -/ -@[to_additive "The infimum of two additive congruence relations equals the infimum of the -underlying binary operations."] -theorem inf_def {c d : Con M} : (c ⊓ d).r = c.r ⊓ d.r := +@[to_additive (attr := simp, norm_cast) + "The infimum of two additive congruence relations equals the infimum of the underlying binary + operations."] +theorem coe_inf {c d : Con M} : ⇑(c ⊓ d) = ⇑c ⊓ ⇑d := rfl -#align con.inf_def Con.inf_def -#align add_con.inf_def AddCon.inf_def +#align con.inf_def Con.coe_inf +#align add_con.inf_def AddCon.coe_inf /-- Definition of the infimum of two congruence relations. -/ @[to_additive "Definition of the infimum of two additive congruence relations."] @@ -505,9 +498,9 @@ containing a binary relation `r` equals the infimum of the set of additive congr containing `r`."] theorem conGen_eq (r : M → M → Prop) : conGen r = sInf { s : Con M | ∀ x y, r x y → s x y } := le_antisymm - (le_sInf (fun s hs x y (hxy : (conGen r).r x y) => - show s.r x y by - apply ConGen.Rel.recOn (motive := fun x y _ => s.r x y) hxy + (le_sInf (fun s hs x y (hxy : (conGen r) x y) => + show s x y by + apply ConGen.Rel.recOn (motive := fun x y _ => s x y) hxy · exact fun x y h => hs x y h · exact s.refl' · exact fun _ => s.symm' @@ -521,7 +514,7 @@ theorem conGen_eq (r : M → M → Prop) : conGen r = sInf { s : Con M | ∀ x y congruence relation containing `r`. -/ @[to_additive addConGen_le "The smallest additive congruence relation containing a binary relation `r` is contained in any additive congruence relation containing `r`."] -theorem conGen_le {r : M → M → Prop} {c : Con M} (h : ∀ x y, r x y → @Setoid.r _ c.toSetoid x y) : +theorem conGen_le {r : M → M → Prop} {c : Con M} (h : ∀ x y, r x y → c x y) : conGen r ≤ c := by rw [conGen_eq]; exact sInf_le h #align con.con_gen_le Con.conGen_le #align add_con.add_con_gen_le AddCon.addConGen_le @@ -571,7 +564,7 @@ theorem sup_eq_conGen (c d : Con M) : c ⊔ d = conGen fun x y => c x y ∨ d x the supremum of the underlying binary operations. -/ @[to_additive "The supremum of two additive congruence relations equals the smallest additive congruence relation containing the supremum of the underlying binary operations."] -theorem sup_def {c d : Con M} : c ⊔ d = conGen (c.r ⊔ d.r) := by rw [sup_eq_conGen]; rfl +theorem sup_def {c d : Con M} : c ⊔ d = conGen (⇑c ⊔ ⇑d) := by rw [sup_eq_conGen]; rfl #align con.sup_def Con.sup_def #align add_con.sup_def AddCon.sup_def @@ -596,7 +589,7 @@ theorem sSup_eq_conGen (S : Set (Con M)) : additive congruence relation containing the supremum of the set's image under the map to the underlying binary relation."] theorem sSup_def {S : Set (Con M)} : - sSup S = conGen (sSup (@Set.image (Con M) (M → M → Prop) ((⇑) : Con M → M → M → Prop) S)) := by + sSup S = conGen (sSup ((⇑) '' S)) := by rw [sSup_eq_conGen, sSup_image] congr with (x y) simp only [sSup_image, iSup_apply, iSup_Prop_eq, exists_prop, rel_eq_coe] diff --git a/Mathlib/GroupTheory/CoprodI.lean b/Mathlib/GroupTheory/CoprodI.lean index 89b835c227d00..8722cd4736ff6 100644 --- a/Mathlib/GroupTheory/CoprodI.lean +++ b/Mathlib/GroupTheory/CoprodI.lean @@ -152,7 +152,7 @@ def lift : (∀ i, M i →* N) ≃ (CoprodI M →* N) where toFun fi := Con.lift _ (FreeMonoid.lift fun p : Σi, M i => fi p.fst p.snd) <| Con.conGen_le <| by - simp_rw [Con.rel_eq_coe, Con.ker_rel] + simp_rw [Con.ker_rel] rintro _ _ (i | ⟨x, y⟩) · change FreeMonoid.lift _ (FreeMonoid.of _) = FreeMonoid.lift _ 1 simp only [MonoidHom.map_one, FreeMonoid.lift_eval_of] diff --git a/Mathlib/GroupTheory/GroupAction/Defs.lean b/Mathlib/GroupTheory/GroupAction/Defs.lean index 54e106c3b274b..27e155f2274ad 100644 --- a/Mathlib/GroupTheory/GroupAction/Defs.lean +++ b/Mathlib/GroupTheory/GroupAction/Defs.lean @@ -1121,12 +1121,13 @@ variable {α} This is generalized to bundled endomorphisms by: * `Equiv.Perm.applyMulAction` * `AddMonoid.End.applyDistribMulAction` +* `AddMonoid.End.applyModule` * `AddAut.applyDistribMulAction` * `MulAut.applyMulDistribMulAction` -* `RingHom.applyDistribMulAction` * `LinearEquiv.applyDistribMulAction` * `LinearMap.applyModule` * `RingHom.applyMulSemiringAction` +* `RingAut.applyMulSemiringAction` * `AlgEquiv.applyMulSemiringAction` -/ instance Function.End.applyMulAction : diff --git a/Mathlib/GroupTheory/GroupAction/SubMulAction/Pointwise.lean b/Mathlib/GroupTheory/GroupAction/SubMulAction/Pointwise.lean index 4b9a8d2134463..b6e86fff267fc 100644 --- a/Mathlib/GroupTheory/GroupAction/SubMulAction/Pointwise.lean +++ b/Mathlib/GroupTheory/GroupAction/SubMulAction/Pointwise.lean @@ -115,14 +115,14 @@ instance : Monoid (SubMulAction R M) := { SubMulAction.semiGroup, SubMulAction.mulOneClass with } -theorem coe_pow (p : SubMulAction R M) : ∀ {n : ℕ} (_ : n ≠ 0), ↑(p ^ n) = ((p : Set M) ^ n) +theorem coe_pow (p : SubMulAction R M) : ∀ {n : ℕ} (_ : n ≠ 0), ↑(p ^ n) = (p : Set M) ^ n | 0, hn => (hn rfl).elim | 1, _ => by rw [pow_one, pow_one] | n + 2, _ => by rw [pow_succ _ (n + 1), pow_succ _ (n + 1), coe_mul, coe_pow _ n.succ_ne_zero] #align sub_mul_action.coe_pow SubMulAction.coe_pow -theorem subset_coe_pow (p : SubMulAction R M) : ∀ {n : ℕ}, ((p : Set M) ^ n) ⊆ ↑(p ^ n) +theorem subset_coe_pow (p : SubMulAction R M) : ∀ {n : ℕ}, (p : Set M) ^ n ⊆ ↑(p ^ n) | 0 => by rw [pow_zero, pow_zero] exact subset_coe_one diff --git a/Mathlib/Init/Data/Nat/Lemmas.lean b/Mathlib/Init/Data/Nat/Lemmas.lean index eef918bc45fa2..abd877037e31c 100644 --- a/Mathlib/Init/Data/Nat/Lemmas.lean +++ b/Mathlib/Init/Data/Nat/Lemmas.lean @@ -416,7 +416,7 @@ Many lemmas are proven more generally in mathlib `algebra/order/sub` -/ #align nat.sub_one Nat.sub_one -#align nat.succ_sub_one Nat.succ_sub_one +#align nat.succ_sub_one Nat.add_one_sub_one #align nat.succ_pred_eq_of_pos Nat.succ_pred_eq_of_pos diff --git a/Mathlib/Init/Set.lean b/Mathlib/Init/Set.lean index c5faffdc85c5a..166d606428cdd 100644 --- a/Mathlib/Init/Set.lean +++ b/Mathlib/Init/Set.lean @@ -38,9 +38,9 @@ set_option autoImplicit true /-- A set is a collection of elements of some type `α`. - Although `Set` is defined as `α → Prop`, this is an implementation detail which should not be - relied on. Instead, `setOf` and membership of a set (`∈`) should be used to convert between sets - and predicates. +Although `Set` is defined as `α → Prop`, this is an implementation detail which should not be +relied on. Instead, `setOf` and membership of a set (`∈`) should be used to convert between sets +and predicates. -/ def Set (α : Type u) := α → Prop #align set Set diff --git a/Mathlib/Lean/Expr/Basic.lean b/Mathlib/Lean/Expr/Basic.lean index b46d0decd7ea3..cd63ffc56a8e5 100644 --- a/Mathlib/Lean/Expr/Basic.lean +++ b/Mathlib/Lean/Expr/Basic.lean @@ -284,6 +284,11 @@ def fvarId? : Expr → Option FVarId | .fvar n => n | _ => none +/-- If an `Expr` has the form `Type u`, then return `some u`, otherwise `none`. -/ +def type? : Expr → Option Level + | .sort u => u.dec + | _ => none + /-- `isConstantApplication e` checks whether `e` is syntactically an application of the form `(fun x₁ ⋯ xₙ => H) y₁ ⋯ yₙ` where `H` does not contain the variable `xₙ`. In other words, it does a syntactic check that the expression does not depend on `yₙ`. -/ diff --git a/Mathlib/Lean/Expr/ExtraRecognizers.lean b/Mathlib/Lean/Expr/ExtraRecognizers.lean new file mode 100644 index 0000000000000..0bf2ba6df23d1 --- /dev/null +++ b/Mathlib/Lean/Expr/ExtraRecognizers.lean @@ -0,0 +1,30 @@ +/- +Copyright (c) 2023 Kyle Miller. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Kyle Miller +-/ +import Mathlib.Lean.Expr.Basic +import Mathlib.Data.Set.Basic + +/-! +# Additional Expr recognizers needing theory imports + +-/ + +namespace Lean.Expr + +/-- If `e` is a coercion of a set to a type, return the set. +Succeeds either for `Set.Elem s` terms or `{x // x ∈ s}` subtype terms. -/ +def coeTypeSet? (e : Expr) : Option Expr := do + if e.isAppOfArity ``Set.Elem 2 then + return e.appArg! + else if e.isAppOfArity ``Subtype 2 then + let .lam _ _ body _ := e.appArg! | failure + guard <| body.isAppOfArity ``Membership.mem 5 + let #[_, _, inst, .bvar 0, s] := body.getAppArgs | failure + guard <| inst.isAppOfArity ``Set.instMembershipSet 1 + return s + else + failure + +end Lean.Expr diff --git a/Mathlib/Lean/PrettyPrinter/Delaborator.lean b/Mathlib/Lean/PrettyPrinter/Delaborator.lean index f3fe10b4c90cb..1cae05ba0a25e 100644 --- a/Mathlib/Lean/PrettyPrinter/Delaborator.lean +++ b/Mathlib/Lean/PrettyPrinter/Delaborator.lean @@ -46,3 +46,10 @@ def withBindingBodyUnusedName' {α} (d : Syntax → Expr → DelabM α) : DelabM let n ← getUnusedName (← getExpr).bindingName! (← getExpr).bindingBody! let stxN ← annotateCurPos (mkIdent n) withBindingBody' n $ d stxN + +/-- Update `OptionsPerPos` at the given position, setting the key `n` +to have the boolean value `v`. -/ +def OptionsPerPos.setBool (opts : OptionsPerPos) (p : SubExpr.Pos) (n : Name) (v : Bool) : + OptionsPerPos := + let e := opts.findD p {} |>.setBool n v + opts.insert p e diff --git a/Mathlib/LinearAlgebra/AffineSpace/AffineMap.lean b/Mathlib/LinearAlgebra/AffineSpace/AffineMap.lean index 394e4209c9fe0..47c7a6e92ba48 100644 --- a/Mathlib/LinearAlgebra/AffineSpace/AffineMap.lean +++ b/Mathlib/LinearAlgebra/AffineSpace/AffineMap.lean @@ -65,7 +65,7 @@ instance AffineMap.funLike (k : Type*) {V1 : Type*} (P1 : Type*) {V2 : Type*} (P where coe := AffineMap.toFun coe_injective' := fun ⟨f, f_linear, f_add⟩ ⟨g, g_linear, g_add⟩ => fun (h : f = g) => by - cases' (AddTorsor.Nonempty : Nonempty P1) with p + cases' (AddTorsor.nonempty : Nonempty P1) with p congr with v apply vadd_right_cancel (f p) erw [← f_add, h, ← g_add] @@ -198,7 +198,7 @@ theorem linear_eq_zero_iff_exists_const (f : P1 →ᵃ[k] P2) : #align affine_map.linear_eq_zero_iff_exists_const AffineMap.linear_eq_zero_iff_exists_const instance nonempty : Nonempty (P1 →ᵃ[k] P2) := - (AddTorsor.Nonempty : Nonempty P2).elim fun p => ⟨const k P1 p⟩ + (AddTorsor.nonempty : Nonempty P2).map <| const k P1 #align affine_map.nonempty AffineMap.nonempty /-- Construct an affine map by verifying the relation between the map and its linear part at one diff --git a/Mathlib/LinearAlgebra/AffineSpace/AffineSubspace.lean b/Mathlib/LinearAlgebra/AffineSpace/AffineSubspace.lean index fc306d948cbfa..076439246f681 100644 --- a/Mathlib/LinearAlgebra/AffineSpace/AffineSubspace.lean +++ b/Mathlib/LinearAlgebra/AffineSpace/AffineSubspace.lean @@ -390,7 +390,6 @@ def toAddTorsor (s : AffineSubspace k P) [Nonempty s] : AddTorsor s.direction s ext apply add_vadd vsub a b := ⟨(a : P) -ᵥ (b : P), (vsub_left_mem_direction_iff_mem a.2 _).mpr b.2⟩ - Nonempty := by infer_instance vsub_vadd' a b := by ext apply AddTorsor.vsub_vadd' @@ -767,7 +766,7 @@ variable (P) /-- The direction of `⊤` is the whole module as a submodule. -/ @[simp] theorem direction_top : (⊤ : AffineSubspace k P).direction = ⊤ := by - cases' S.Nonempty with p + cases' S.nonempty with p ext v refine' ⟨imp_intro Submodule.mem_top, fun _hv => _⟩ have hpv : (v +ᵥ p -ᵥ p : V) ∈ (⊤ : AffineSubspace k P).direction := diff --git a/Mathlib/LinearAlgebra/AffineSpace/Combination.lean b/Mathlib/LinearAlgebra/AffineSpace/Combination.lean index bc989326f7902..b8cab2b2369ce 100644 --- a/Mathlib/LinearAlgebra/AffineSpace/Combination.lean +++ b/Mathlib/LinearAlgebra/AffineSpace/Combination.lean @@ -248,7 +248,7 @@ from the given points, as a linear map on the weights. This is intended to be used when the sum of the weights is 0; that condition is specified as a hypothesis on those lemmas that require it. -/ def weightedVSub (p : ι → P) : (ι → k) →ₗ[k] V := - s.weightedVSubOfPoint p (Classical.choice S.Nonempty) + s.weightedVSubOfPoint p (Classical.choice S.nonempty) #align finset.weighted_vsub Finset.weightedVSub /-- Applying `weightedVSub` with given weights. This is for the case @@ -258,7 +258,7 @@ that base point will cancel out later); a more typical use case for `weightedVSub_eq_weightedVSubOfPoint_of_sum_eq_zero` and then using `weightedVSubOfPoint_apply`. -/ theorem weightedVSub_apply (w : ι → k) (p : ι → P) : - s.weightedVSub p w = ∑ i in s, w i • (p i -ᵥ Classical.choice S.Nonempty) := by + s.weightedVSub p w = ∑ i in s, w i • (p i -ᵥ Classical.choice S.nonempty) := by simp [weightedVSub, LinearMap.sum_apply] #align finset.weighted_vsub_apply Finset.weightedVSub_apply @@ -371,7 +371,7 @@ points with the given weights; that condition is specified as a hypothesis on those lemmas that require it. -/ def affineCombination (p : ι → P) : (ι → k) →ᵃ[k] P where - toFun w := s.weightedVSubOfPoint p (Classical.choice S.Nonempty) w +ᵥ Classical.choice S.Nonempty + toFun w := s.weightedVSubOfPoint p (Classical.choice S.nonempty) w +ᵥ Classical.choice S.nonempty linear := s.weightedVSub p map_vadd' w₁ w₂ := by simp_rw [vadd_vadd, weightedVSub, vadd_eq_add, LinearMap.map_add] #align finset.affine_combination Finset.affineCombination @@ -395,7 +395,7 @@ point with then using `weightedVSubOfPoint_apply`. -/ theorem affineCombination_apply (w : ι → k) (p : ι → P) : (s.affineCombination k p) w = - s.weightedVSubOfPoint p (Classical.choice S.Nonempty) w +ᵥ Classical.choice S.Nonempty := + s.weightedVSubOfPoint p (Classical.choice S.nonempty) w +ᵥ Classical.choice S.nonempty := rfl #align finset.affine_combination_apply Finset.affineCombination_apply @@ -438,8 +438,8 @@ theorem attach_affineCombination_of_injective [DecidableEq P] (s : Finset P) (w s.attach.affineCombination k f (w ∘ f) = (image f univ).affineCombination k id w := by simp only [affineCombination, weightedVSubOfPoint_apply, id.def, vadd_right_cancel_iff, Function.comp_apply, AffineMap.coe_mk] - let g₁ : s → V := fun i => w (f i) • (f i -ᵥ Classical.choice S.Nonempty) - let g₂ : P → V := fun i => w i • (i -ᵥ Classical.choice S.Nonempty) + let g₁ : s → V := fun i => w (f i) • (f i -ᵥ Classical.choice S.nonempty) + let g₂ : P → V := fun i => w i • (i -ᵥ Classical.choice S.nonempty) change univ.sum g₁ = (image f univ).sum g₂ have hgf : g₁ = g₂ ∘ f := by ext @@ -633,8 +633,8 @@ variable {k V} theorem map_affineCombination {V₂ P₂ : Type*} [AddCommGroup V₂] [Module k V₂] [AffineSpace V₂ P₂] (p : ι → P) (w : ι → k) (hw : s.sum w = 1) (f : P →ᵃ[k] P₂) : f (s.affineCombination k p w) = s.affineCombination k (f ∘ p) w := by - have b := Classical.choice (inferInstance : AffineSpace V P).Nonempty - have b₂ := Classical.choice (inferInstance : AffineSpace V₂ P₂).Nonempty + have b := Classical.choice (inferInstance : AffineSpace V P).nonempty + have b₂ := Classical.choice (inferInstance : AffineSpace V₂ P₂).nonempty rw [s.affineCombination_eq_weightedVSubOfPoint_vadd_of_sum_eq_one w p hw b, s.affineCombination_eq_weightedVSubOfPoint_vadd_of_sum_eq_one w (f ∘ p) hw b₂, ← s.weightedVSubOfPoint_vadd_eq_of_sum_eq_one w (f ∘ p) hw (f b) b₂] diff --git a/Mathlib/LinearAlgebra/AffineSpace/Independent.lean b/Mathlib/LinearAlgebra/AffineSpace/Independent.lean index 94bea196e2f64..71d355e43e595 100644 --- a/Mathlib/LinearAlgebra/AffineSpace/Independent.lean +++ b/Mathlib/LinearAlgebra/AffineSpace/Independent.lean @@ -551,7 +551,7 @@ theorem exists_subset_affineIndependent_affineSpan_eq_top {s : Set P} (h : AffineIndependent k (fun p => p : s → P)) : ∃ t : Set P, s ⊆ t ∧ AffineIndependent k (fun p => p : t → P) ∧ affineSpan k t = ⊤ := by rcases s.eq_empty_or_nonempty with (rfl | ⟨p₁, hp₁⟩) - · have p₁ : P := AddTorsor.Nonempty.some + · have p₁ : P := AddTorsor.nonempty.some let hsv := Basis.ofVectorSpace k V have hsvi := hsv.linearIndependent have hsvt := hsv.span_eq @@ -805,7 +805,7 @@ instance [Inhabited P] : Inhabited (Simplex k P 0) := ⟨mkOfPoint k default⟩ instance nonempty : Nonempty (Simplex k P 0) := - ⟨mkOfPoint k <| AddTorsor.Nonempty.some⟩ + ⟨mkOfPoint k <| AddTorsor.nonempty.some⟩ #align affine.simplex.nonempty Affine.Simplex.nonempty variable {k} diff --git a/Mathlib/LinearAlgebra/Charpoly/ToMatrix.lean b/Mathlib/LinearAlgebra/Charpoly/ToMatrix.lean index fb7e98829ffe5..6e43a7308660a 100644 --- a/Mathlib/LinearAlgebra/Charpoly/ToMatrix.lean +++ b/Mathlib/LinearAlgebra/Charpoly/ToMatrix.lean @@ -36,7 +36,6 @@ namespace LinearMap section Basic -set_option maxHeartbeats 250000 in /-- `charpoly f` is the characteristic polynomial of the matrix of `f` in any basis. -/ @[simp] theorem charpoly_toMatrix {ι : Type w} [DecidableEq ι] [Fintype ι] (b : Basis ι R M) : @@ -68,7 +67,8 @@ theorem charpoly_toMatrix {ι : Type w} [DecidableEq ι] [Fintype ι] (b : Basis C.mapMatrix (φ₁ P) * C.mapMatrix A' * C.mapMatrix (φ₃ Q)) := by rw [Matrix.mul_assoc ((scalar ι') X), hPQ, Matrix.mul_one] _ = det (C.mapMatrix (φ₁ P) * scalar ι' X * C.mapMatrix (φ₃ Q) - - C.mapMatrix (φ₁ P) * C.mapMatrix A' * C.mapMatrix (φ₃ Q)) := by simp + C.mapMatrix (φ₁ P) * C.mapMatrix A' * C.mapMatrix (φ₃ Q)) := by + rw [scalar_commute _ commute_X] _ = det (C.mapMatrix (φ₁ P) * (scalar ι' X - C.mapMatrix A') * C.mapMatrix (φ₃ Q)) := by rw [← Matrix.sub_mul, ← Matrix.mul_sub] _ = det (C.mapMatrix (φ₁ P)) * det (scalar ι' X - C.mapMatrix A') * det (C.mapMatrix (φ₃ Q)) := diff --git a/Mathlib/LinearAlgebra/LinearIndependent.lean b/Mathlib/LinearAlgebra/LinearIndependent.lean index b9bf2cfb15c2c..2a08a3178440e 100644 --- a/Mathlib/LinearAlgebra/LinearIndependent.lean +++ b/Mathlib/LinearAlgebra/LinearIndependent.lean @@ -9,6 +9,7 @@ import Mathlib.LinearAlgebra.Prod import Mathlib.SetTheory.Cardinal.Basic import Mathlib.Tactic.FinCases import Mathlib.Tactic.LinearCombination +import Mathlib.Lean.Expr.ExtraRecognizers #align_import linear_algebra.linear_independent from "leanprover-community/mathlib"@"9d684a893c52e1d6692a504a118bfccbae04feeb" @@ -100,6 +101,26 @@ def LinearIndependent : Prop := LinearMap.ker (Finsupp.total ι M R v) = ⊥ #align linear_independent LinearIndependent +open Lean PrettyPrinter.Delaborator SubExpr in +/-- Delaborator for `LinearIndependent` that suggests pretty printing with type hints +in case the family of vectors is over a `Set`. + +Type hints look like `LinearIndependent fun (v : ↑s) => ↑v` or `LinearIndependent (ι := ↑s) f`, +depending on whether the family is a lambda expression or not. -/ +@[delab app.LinearIndependent] +def delabLinearIndependent : Delab := + whenPPOption getPPNotation <| + whenNotPPOption getPPAnalysisSkip <| + withOptionAtCurrPos `pp.analysis.skip true do + let e ← getExpr + guard <| e.isAppOfArity ``LinearIndependent 7 + let some _ := (e.getArg! 0).coeTypeSet? | failure + let optionsPerPos ← if (e.getArg! 3).isLambda then + withNaryArg 3 do return (← read).optionsPerPos.setBool (← getPos) pp.funBinderTypes.name true + else + withNaryArg 0 do return (← read).optionsPerPos.setBool (← getPos) `pp.analysis.namedArg true + withTheReader Context ({· with optionsPerPos}) delab + variable {R} {v} theorem linearIndependent_iff : LinearIndependent R v ↔ ∀ l, Finsupp.total ι M R v l = 0 → l = 0 := diff --git a/Mathlib/LinearAlgebra/Matrix/Charpoly/Basic.lean b/Mathlib/LinearAlgebra/Matrix/Charpoly/Basic.lean index ac7514701ceeb..4a8a4a3b438e7 100644 --- a/Mathlib/LinearAlgebra/Matrix/Charpoly/Basic.lean +++ b/Mathlib/LinearAlgebra/Matrix/Charpoly/Basic.lean @@ -49,22 +49,23 @@ def charmatrix (M : Matrix n n R) : Matrix n n R[X] := #align charmatrix charmatrix theorem charmatrix_apply (M : Matrix n n R) (i j : n) : - charmatrix M i j = X * (1 : Matrix n n R[X]) i j - C (M i j) := + charmatrix M i j = (Matrix.diagonal fun _ : n => X) i j - C (M i j) := rfl #align charmatrix_apply charmatrix_apply @[simp] theorem charmatrix_apply_eq (M : Matrix n n R) (i : n) : charmatrix M i i = (X : R[X]) - C (M i i) := by - simp only [charmatrix, RingHom.mapMatrix_apply, sub_apply, scalar_apply_eq, map_apply] + simp only [charmatrix, RingHom.mapMatrix_apply, sub_apply, scalar_apply, map_apply, + diagonal_apply_eq] #align charmatrix_apply_eq charmatrix_apply_eq @[simp] theorem charmatrix_apply_ne (M : Matrix n n R) (i j : n) (h : i ≠ j) : charmatrix M i j = -C (M i j) := by - simp only [charmatrix, RingHom.mapMatrix_apply, sub_apply, scalar_apply_ne _ _ _ h, map_apply, - sub_eq_neg_self] + simp only [charmatrix, RingHom.mapMatrix_apply, sub_apply, scalar_apply, diagonal_apply_ne _ h, + map_apply, sub_eq_neg_self] #align charmatrix_apply_ne charmatrix_apply_ne theorem matPolyEquiv_charmatrix (M : Matrix n n R) : matPolyEquiv (charmatrix M) = X - C M := by diff --git a/Mathlib/LinearAlgebra/Matrix/Charpoly/Coeff.lean b/Mathlib/LinearAlgebra/Matrix/Charpoly/Coeff.lean index 20a00e89e17db..d4e116a3051e0 100644 --- a/Mathlib/LinearAlgebra/Matrix/Charpoly/Coeff.lean +++ b/Mathlib/LinearAlgebra/Matrix/Charpoly/Coeff.lean @@ -160,7 +160,8 @@ theorem trace_eq_neg_charpoly_coeff [Nonempty n] (M : Matrix n n R) : theorem matPolyEquiv_symm_map_eval (M : (Matrix n n R)[X]) (r : R) : (matPolyEquiv.symm M).map (eval r) = M.eval (scalar n r) := by suffices ((aeval r).mapMatrix.comp matPolyEquiv.symm.toAlgHom : (Matrix n n R)[X] →ₐ[R] _) = - (eval₂AlgHom' (AlgHom.id R _) (scalar n r) fun x => (scalar.commute _ _).symm) from + (eval₂AlgHom' (AlgHom.id R _) (scalar n r) + fun x => (scalar_commute _ (Commute.all _) _).symm) from FunLike.congr_fun this M ext : 1 · ext M : 1 @@ -249,7 +250,8 @@ theorem coeff_charpoly_mem_ideal_pow {I : Ideal R} (h : ∀ i j, M i j ∈ I) (k apply coeff_prod_mem_ideal_pow_tsub rintro i - (_ | k) · rw [Nat.zero_eq] -- porting note: `rw [Nat.zero_eq]` was not present - rw [tsub_zero, pow_one, charmatrix_apply, coeff_sub, coeff_X_mul_zero, coeff_C_zero, zero_sub] + rw [tsub_zero, pow_one, charmatrix_apply, coeff_sub, ←smul_one_eq_diagonal, smul_apply, + smul_eq_mul, coeff_X_mul_zero, coeff_C_zero, zero_sub] apply neg_mem -- porting note: was `rw [neg_mem_iff]`, but Lean could not synth `NegMemClass` exact h (c i) i · rw [Nat.succ_eq_one_add, tsub_self_add, pow_zero, Ideal.one_eq_top] @@ -283,14 +285,15 @@ lemma reverse_charpoly (M : Matrix n n R) : have hp : toLaurentAlg M.charpoly = p := by simp [charpoly, charmatrix, AlgHom.map_det, map_sub, map_smul'] have hq : toLaurentAlg M.charpolyRev = q := by - simp [charpolyRev, AlgHom.map_det, map_sub, map_smul'] + simp [charpolyRev, AlgHom.map_det, map_sub, map_smul', smul_eq_diagonal_mul] suffices : t_inv ^ Fintype.card n * p = invert q · apply toLaurent_injective rwa [toLaurent_reverse, ← coe_toLaurentAlg, hp, hq, ← involutive_invert.injective.eq_iff, invert.map_mul, involutive_invert p, charpoly_natDegree_eq_dim, ← mul_one (Fintype.card n : ℤ), ← T_pow, invert.map_pow, invert_T, mul_comm] - rw [← det_smul, smul_sub, coe_scalar, ← smul_assoc, smul_eq_mul, ht, one_smul, invert.map_det] - simp [map_smul'] + rw [← det_smul, smul_sub, scalar_apply, ← diagonal_smul, Pi.smul_def, smul_eq_mul, ht, + diagonal_one, invert.map_det] + simp [map_smul', smul_eq_diagonal_mul] @[simp] lemma eval_charpolyRev : eval 0 M.charpolyRev = 1 := by diff --git a/Mathlib/LinearAlgebra/Matrix/Charpoly/LinearMap.lean b/Mathlib/LinearAlgebra/Matrix/Charpoly/LinearMap.lean index 16250022f6142..f11afc36fe559 100644 --- a/Mathlib/LinearAlgebra/Matrix/Charpoly/LinearMap.lean +++ b/Mathlib/LinearAlgebra/Matrix/Charpoly/LinearMap.lean @@ -146,6 +146,10 @@ theorem Matrix.Represents.smul {A : Matrix ι ι R} {f : Module.End R M} (h : A. rw [SMulHomClass.map_smul, SMulHomClass.map_smul, h] #align matrix.represents.smul Matrix.Represents.smul +theorem Matrix.Represents.algebraMap (r : R) : + (algebraMap _ (Matrix ι ι R) r).Represents b (algebraMap _ (Module.End R M) r) := by + simpa only [Algebra.algebraMap_eq_smul_one] using Matrix.Represents.one.smul r + theorem Matrix.Represents.eq {A : Matrix ι ι R} {f f' : Module.End R M} (h : A.Represents b f) (h' : A.Represents b f') : f = f' := PiToModule.fromEnd_injective R b hb (h.symm.trans h') @@ -161,7 +165,7 @@ def Matrix.isRepresentation : Subalgebra R (Matrix ι ι R) where one_mem' := ⟨1, Matrix.Represents.one⟩ add_mem' := fun ⟨f₁, e₁⟩ ⟨f₂, e₂⟩ => ⟨f₁ + f₂, e₁.add e₂⟩ zero_mem' := ⟨0, Matrix.Represents.zero⟩ - algebraMap_mem' r := ⟨r • (1 : Module.End R M), Matrix.Represents.one.smul r⟩ + algebraMap_mem' r := ⟨algebraMap _ _ r, .algebraMap _⟩ #align matrix.is_representation Matrix.isRepresentation /-- The map sending a matrix to the endomorphism it represents. This is an `R`-algebra morphism. -/ @@ -173,7 +177,7 @@ noncomputable def Matrix.isRepresentation.toEnd : Matrix.isRepresentation R b map_zero' := (0 : Matrix.isRepresentation R b).2.choose_spec.eq hb Matrix.Represents.zero map_add' A₁ A₂ := (A₁ + A₂).2.choose_spec.eq hb (A₁.2.choose_spec.add A₂.2.choose_spec) commutes' r := - (r • (1 : Matrix.isRepresentation R b)).2.choose_spec.eq hb (Matrix.Represents.one.smul r) + (algebraMap _ (Matrix.isRepresentation R b) r).2.choose_spec.eq hb (.algebraMap r) #align matrix.is_representation.to_End Matrix.isRepresentation.toEnd theorem Matrix.isRepresentation.toEnd_represents (A : Matrix.isRepresentation R b) : diff --git a/Mathlib/LinearAlgebra/Matrix/Circulant.lean b/Mathlib/LinearAlgebra/Matrix/Circulant.lean index 1ed889e2fc160..6f27198c5f261 100644 --- a/Mathlib/LinearAlgebra/Matrix/Circulant.lean +++ b/Mathlib/LinearAlgebra/Matrix/Circulant.lean @@ -171,7 +171,7 @@ theorem circulant_single_one (α n) [Zero α] [One α] [DecidableEq n] [AddGroup theorem circulant_single (n) [Semiring α] [DecidableEq n] [AddGroup n] [Fintype n] (a : α) : circulant (Pi.single 0 a : n → α) = scalar n a := by ext i j - simp [Pi.single_apply, one_apply, sub_eq_zero] + simp [Pi.single_apply, diagonal_apply, sub_eq_zero] #align matrix.circulant_single Matrix.circulant_single /-- Note we use `↑i = 0` instead of `i = 0` as `Fin 0` has no `0`. diff --git a/Mathlib/LinearAlgebra/Matrix/ToLin.lean b/Mathlib/LinearAlgebra/Matrix/ToLin.lean index 13cc00a9731a5..e85545fb8f914 100644 --- a/Mathlib/LinearAlgebra/Matrix/ToLin.lean +++ b/Mathlib/LinearAlgebra/Matrix/ToLin.lean @@ -410,7 +410,7 @@ theorem LinearMap.toMatrix'_mul [Fintype m] [DecidableEq m] (f g : (m → R) → @[simp] theorem LinearMap.toMatrix'_algebraMap (x : R) : LinearMap.toMatrix' (algebraMap R (Module.End R (n → R)) x) = scalar n x := by - simp [Module.algebraMap_end_eq_smul_id] + simp [Module.algebraMap_end_eq_smul_id, smul_eq_diagonal_mul] #align linear_map.to_matrix'_algebra_map LinearMap.toMatrix'_algebraMap theorem Matrix.ker_toLin'_eq_bot_iff {M : Matrix n n R} : @@ -661,7 +661,7 @@ lemma LinearMap.toMatrix_pow (f : M₁ →ₗ[R] M₁) (k : ℕ) : @[simp] theorem LinearMap.toMatrix_algebraMap (x : R) : LinearMap.toMatrix v₁ v₁ (algebraMap R (Module.End R M₁) x) = scalar n x := by - simp [Module.algebraMap_end_eq_smul_id, LinearMap.toMatrix_id] + simp [Module.algebraMap_end_eq_smul_id, LinearMap.toMatrix_id, smul_eq_diagonal_mul] #align linear_map.to_matrix_algebra_map LinearMap.toMatrix_algebraMap theorem LinearMap.toMatrix_mulVec_repr (f : M₁ →ₗ[R] M₂) (x : M₁) : diff --git a/Mathlib/LinearAlgebra/TensorProduct.lean b/Mathlib/LinearAlgebra/TensorProduct.lean index f16d5d672c804..844a1c2ec67bf 100644 --- a/Mathlib/LinearAlgebra/TensorProduct.lean +++ b/Mathlib/LinearAlgebra/TensorProduct.lean @@ -88,11 +88,6 @@ namespace TensorProduct section Module --- porting note: This is added as a local instance for `SMul.aux`. --- For some reason type-class inference in Lean 3 unfolded this definition. -def addMonoid : AddMonoid (M ⊗[R] N) := - { (addConGen (TensorProduct.Eqv R M N)).addMonoid with } - protected instance add : Add (M ⊗[R] N) := (addConGen (TensorProduct.Eqv R M N)).hasAdd @@ -143,6 +138,40 @@ protected theorem induction_on {motive : M ⊗[R] N → Prop} (z : M ⊗[R] N) exact add _ _ (tmul ..) ih #align tensor_product.induction_on TensorProduct.induction_on +/-- Lift a map that is additive in both arguments to the tensor product, provided scalar +multiplication in either argument is equivalent. + +Note that strictly the first action should be a right-action by `R`, but for now `R` is commutative +so it doesn't matter. -/ +-- TODO: use this to implement `lift` and `SMul.aux`. For now we do not do this as it causes +-- performance issues elsewhere. +def liftAddHom (f : M →+ N →+ P) + (hf : ∀ (r : R) (m : M) (n : N), f (r • m) n = f m (r • n)) : + M ⊗[R] N →+ P := + (addConGen (TensorProduct.Eqv R M N)).lift (FreeAddMonoid.lift (fun mn : M × N => f mn.1 mn.2)) <| + AddCon.addConGen_le fun x y hxy => + match x, y, hxy with + | _, _, .of_zero_left n => + (AddCon.ker_rel _).2 <| by simp_rw [map_zero, FreeAddMonoid.lift_eval_of, map_zero, + AddMonoidHom.zero_apply] + | _, _, .of_zero_right m => + (AddCon.ker_rel _).2 <| by simp_rw [map_zero, FreeAddMonoid.lift_eval_of, map_zero] + | _, _, .of_add_left m₁ m₂ n => + (AddCon.ker_rel _).2 <| by simp_rw [map_add, FreeAddMonoid.lift_eval_of, map_add, + AddMonoidHom.add_apply] + | _, _, .of_add_right m n₁ n₂ => + (AddCon.ker_rel _).2 <| by simp_rw [map_add, FreeAddMonoid.lift_eval_of, map_add] + | _, _, .of_smul s m n => + (AddCon.ker_rel _).2 <| by rw [FreeAddMonoid.lift_eval_of, FreeAddMonoid.lift_eval_of, hf] + | _, _, .add_comm x y => + (AddCon.ker_rel _).2 <| by simp_rw [map_add, add_comm] + +@[simp] +theorem liftAddHom_tmul (f : M →+ N →+ P) + (hf : ∀ (r : R) (m : M) (n : N), f (r • m) n = f m (r • n)) (m : M) (n : N) : + liftAddHom f hf (m ⊗ₜ n) = f m n := + rfl + variable (M) @[simp] @@ -216,12 +245,16 @@ theorem smul_tmul [DistribMulAction R' N] [CompatibleSMul R R' M N] (r : R') (m CompatibleSMul.smul_tmul _ _ _ #align tensor_product.smul_tmul TensorProduct.smul_tmul -attribute [local instance] addMonoid +-- porting note: This is added as a local instance for `SMul.aux`. +-- For some reason type-class inference in Lean 3 unfolded this definition. +private def addMonoidWithWrongNSMul : AddMonoid (M ⊗[R] N) := + { (addConGen (TensorProduct.Eqv R M N)).addMonoid with } + +attribute [local instance] addMonoidWithWrongNSMul in /-- Auxiliary function to defining scalar multiplication on tensor product. -/ def SMul.aux {R' : Type*} [SMul R' M] (r : R') : FreeAddMonoid (M × N) →+ M ⊗[R] N := FreeAddMonoid.lift fun p : M × N => (r • p.1) ⊗ₜ p.2 #align tensor_product.smul.aux TensorProduct.SMul.aux -attribute [-instance] addMonoid theorem SMul.aux_of {R' : Type*} [SMul R' M] (r : R') (m : M) (n : N) : SMul.aux r (.of (m, n)) = (r • m) ⊗ₜ[R] n := diff --git a/Mathlib/LinearAlgebra/TensorProduct/Tower.lean b/Mathlib/LinearAlgebra/TensorProduct/Tower.lean index 7bb911dafa240..eadb31e280940 100644 --- a/Mathlib/LinearAlgebra/TensorProduct/Tower.lean +++ b/Mathlib/LinearAlgebra/TensorProduct/Tower.lean @@ -450,3 +450,54 @@ end CommSemiring end AlgebraTensorModule end TensorProduct + +namespace Submodule + +open TensorProduct + +variable {R M : Type*} (A : Type*) [CommSemiring R] [Semiring A] [Algebra R A] + [AddCommMonoid M] [Module R M] (p : Submodule R M) + +/-- If `A` is an `R`-algebra, any `R`-submodule `p` of an `R`-module `M` may be pushed forward to +an `A`-submodule of `A ⊗ M`. + +This "base change" operation is also known as "extension of scalars". -/ +def baseChange : Submodule A (A ⊗[R] M) := + span A <| p.map (TensorProduct.mk R A M 1) + +@[simp] +lemma baseChange_bot : (⊥ : Submodule R M).baseChange A = ⊥ := by simp [baseChange] + +@[simp] +lemma baseChange_top : (⊤ : Submodule R M).baseChange A = ⊤ := by + rw [baseChange, map_top, eq_top_iff'] + intro x + refine x.induction_on (by simp) (fun a y ↦ ?_) (fun _ _ ↦ Submodule.add_mem _) + rw [← mul_one a, ← smul_eq_mul, ← smul_tmul'] + refine smul_mem _ _ (subset_span ?_) + simp + +variable {A p} in +lemma tmul_mem_baseChange_of_mem (a : A) {m : M} (hm : m ∈ p) : + a ⊗ₜ[R] m ∈ p.baseChange A := by + rw [← mul_one a, ← smul_eq_mul, ← smul_tmul'] + exact smul_mem (baseChange A p) a (subset_span ⟨m, hm, rfl⟩) + +@[simp] +lemma baseChange_span (s : Set M) : + (span R s).baseChange A = span A (TensorProduct.mk R A M 1 '' s) := by + simp only [baseChange, map_coe] + refine le_antisymm (span_le.mpr ?_) (span_mono <| Set.image_subset _ subset_span) + rintro - ⟨m : M, hm : m ∈ span R s, rfl⟩ + apply span_induction (p := fun m' ↦ (1 : A) ⊗ₜ[R] m' ∈ span A (TensorProduct.mk R A M 1 '' s)) hm + · intro m hm + exact subset_span ⟨m, hm, rfl⟩ + · simp + · intro m₁ m₂ hm₁ hm₂ + rw [tmul_add] + exact Submodule.add_mem _ hm₁ hm₂ + · intro r m' hm' + rw [tmul_smul, ← one_smul A ((1 : A) ⊗ₜ[R] m'), ← smul_assoc] + exact smul_mem _ (r • 1) hm' + +end Submodule diff --git a/Mathlib/LinearAlgebra/Trace.lean b/Mathlib/LinearAlgebra/Trace.lean index 6364bca1a57d6..9a3d8b6b8a8f4 100644 --- a/Mathlib/LinearAlgebra/Trace.lean +++ b/Mathlib/LinearAlgebra/Trace.lean @@ -328,6 +328,15 @@ lemma trace_comp_eq_mul_of_commute_of_isNilpotent [IsReduced R] {f g : Module.En have : f ∘ₗ algebraMap R _ μ = μ • f := by ext; simp -- TODO Surely exists? rw [hμ, comp_add, map_add, hg, add_zero, this, LinearMap.map_smul, smul_eq_mul] +@[simp] +lemma trace_baseChange [Module.Free R M] [Module.Finite R M] + (f : M →ₗ[R] M) (A : Type*) [CommRing A] [Algebra R A] : + trace A _ (f.baseChange A) = algebraMap R A (trace R _ f) := by + let b := Module.Free.chooseBasis R M + let b' := Algebra.TensorProduct.basis A b + change _ = (algebraMap R A : R →+ A) _ + simp [trace_eq_matrix_trace R b, trace_eq_matrix_trace A b', AddMonoidHom.map_trace] + end end LinearMap diff --git a/Mathlib/Logic/Basic.lean b/Mathlib/Logic/Basic.lean index dc01311e30963..dfb637a0dd3ca 100644 --- a/Mathlib/Logic/Basic.lean +++ b/Mathlib/Logic/Basic.lean @@ -790,6 +790,12 @@ theorem exists_apply_eq (a : α) (b : β) : ∃ f : α → β, f a = b := ⟨fun ⟨fun ⟨_, ⟨a, ha⟩, hb⟩ ↦ ⟨a, ha.symm ▸ hb⟩, fun ⟨a, ha⟩ ↦ ⟨f a, ⟨a, rfl⟩, ha⟩⟩ #align exists_exists_eq_and exists_exists_eq_and +@[simp] theorem exists_exists_and_exists_and_eq_and {α β γ : Type*} + {f : α → β → γ} {p : α → Prop} {q : β → Prop} {r : γ → Prop} : + (∃ c, (∃ a, p a ∧ ∃ b, q b ∧ f a b = c) ∧ r c) ↔ ∃ a, p a ∧ ∃ b, q b ∧ r (f a b) := + ⟨fun ⟨_, ⟨a, ha, b, hb, hab⟩, hc⟩ ↦ ⟨a, ha, b, hb, hab.symm ▸ hc⟩, + fun ⟨a, ha, b, hb, hab⟩ ↦ ⟨f a b, ⟨a, ha, b, hb, rfl⟩, hab⟩⟩ + @[simp] theorem exists_or_eq_left (y : α) (p : α → Prop) : ∃ x : α, x = y ∨ p x := ⟨y, .inl rfl⟩ #align exists_or_eq_left exists_or_eq_left diff --git a/Mathlib/Logic/Small/Basic.lean b/Mathlib/Logic/Small/Basic.lean index 5a5fb6ac09305..7ae8cda23a4a5 100644 --- a/Mathlib/Logic/Small/Basic.lean +++ b/Mathlib/Logic/Small/Basic.lean @@ -3,21 +3,15 @@ Copyright (c) 2021 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison -/ +import Mathlib.Logic.Small.Defs import Mathlib.Logic.Equiv.Set -import Mathlib.Tactic.PPWithUniv #align_import logic.small.basic from "leanprover-community/mathlib"@"d012cd09a9b256d870751284dd6a29882b0be105" /-! -# Small types +# Instances and theorems for `Small`. -A type is `w`-small if there exists an equivalence to some `S : Type w`. - -We provide a noncomputable model `Shrink α : Type w`, and `equivShrink α : α ≃ Shrink α`. - -A subsingleton type is `w`-small for any `w`. - -If `α ≃ β`, then `Small.{w} α ↔ Small.{w} β`. +In particular we prove `small_of_injective` and `small_of_surjective`. -/ set_option autoImplicit true @@ -25,80 +19,10 @@ set_option autoImplicit true universe u w v v' -/-- A type is `Small.{w}` if there exists an equivalence to some `S : Type w`. --/ -@[mk_iff, pp_with_univ] -class Small (α : Type v) : Prop where - /-- If a type is `Small.{w}`, then there exists an equivalence with some `S : Type w` -/ - equiv_small : ∃ S : Type w, Nonempty (α ≃ S) -#align small Small - -/-- Constructor for `Small α` from an explicit witness type and equivalence. --/ -theorem Small.mk' {α : Type v} {S : Type w} (e : α ≃ S) : Small.{w} α := - ⟨⟨S, ⟨e⟩⟩⟩ -#align small.mk' Small.mk' - -/-- An arbitrarily chosen model in `Type w` for a `w`-small type. --/ -def Shrink (α : Type v) [Small.{w} α] : Type w := - Classical.choose (@Small.equiv_small α _) -#align shrink Shrink - -/-- The noncomputable equivalence between a `w`-small type and a model. --/ -noncomputable def equivShrink (α : Type v) [Small.{w} α] : α ≃ Shrink α := - Nonempty.some (Classical.choose_spec (@Small.equiv_small α _)) -#align equiv_shrink equivShrink - -@[ext] -theorem Shrink.ext {α : Type v} [Small.{w} α] {x y : Shrink α} - (w : (equivShrink _).symm x = (equivShrink _).symm y) : x = y := by - simpa using w - --- It would be nice to mark this as `aesop cases` if --- https://github.com/JLimperg/aesop/issues/59 --- is resolved. -@[eliminator] -protected noncomputable def Shrink.rec [Small.{w} α] {F : Shrink α → Sort v} - (h : ∀ X, F (equivShrink _ X)) : ∀ X, F X := - fun X => ((equivShrink _).apply_symm_apply X) ▸ (h _) - ---Porting note: Priority changed to 101 -instance (priority := 101) small_self (α : Type v) : Small.{v} α := - Small.mk' <| Equiv.refl α -#align small_self small_self - -theorem small_map {α : Type*} {β : Type*} [hβ : Small.{w} β] (e : α ≃ β) : Small.{w} α := - let ⟨_, ⟨f⟩⟩ := hβ.equiv_small - Small.mk' (e.trans f) -#align small_map small_map - -theorem small_lift (α : Type u) [hα : Small.{v} α] : Small.{max v w} α := - let ⟨⟨_, ⟨f⟩⟩⟩ := hα - Small.mk' <| f.trans (Equiv.ulift.{w}).symm -#align small_lift small_lift - -instance (priority := 100) small_max (α : Type v) : Small.{max w v} α := - small_lift.{v, w} α -#align small_max small_max - -instance small_ulift (α : Type u) [Small.{v} α] : Small.{v} (ULift.{w} α) := - small_map Equiv.ulift -#align small_ulift small_ulift - -theorem small_type : Small.{max (u + 1) v} (Type u) := - small_max.{max (u + 1) v} _ -#align small_type small_type - section open Classical -theorem small_congr {α : Type*} {β : Type*} (e : α ≃ β) : Small.{w} α ↔ Small.{w} β := - ⟨fun h => @small_map _ _ h e.symm, fun h => @small_map _ _ h e⟩ -#align small_congr small_congr - instance small_subtype (α : Type v) [Small.{w} α] (P : α → Prop) : Small.{w} { x // P x } := small_map (equivShrink α).subtypeEquivOfSubtype' #align small_subtype small_subtype @@ -139,22 +63,15 @@ theorem small_of_injective_of_exists {α : Type v} {β : Type w} {γ : Type v'} /-! We don't define `small_of_fintype` or `small_of_countable` in this file, -to keep imports to `logic` to a minimum. +to keep imports to `Logic` to a minimum. -/ - instance small_Pi {α} (β : α → Type*) [Small.{w} α] [∀ a, Small.{w} (β a)] : Small.{w} (∀ a, β a) := ⟨⟨∀ a' : Shrink α, Shrink (β ((equivShrink α).symm a')), ⟨Equiv.piCongr (equivShrink α) fun a => by simpa using equivShrink (β a)⟩⟩⟩ #align small_Pi small_Pi -instance small_sigma {α} (β : α → Type*) [Small.{w} α] [∀ a, Small.{w} (β a)] : - Small.{w} (Σa, β a) := - ⟨⟨Σa' : Shrink α, Shrink (β ((equivShrink α).symm a')), - ⟨Equiv.sigmaCongr (equivShrink α) fun a => by simpa using equivShrink (β a)⟩⟩⟩ -#align small_sigma small_sigma - instance small_prod {α β} [Small.{w} α] [Small.{w} β] : Small.{w} (α × β) := ⟨⟨Shrink α × Shrink β, ⟨Equiv.prodCongr (equivShrink α) (equivShrink β)⟩⟩⟩ #align small_prod small_prod @@ -177,12 +94,4 @@ instance small_image {α : Type v} {β : Type w} (f : α → β) (S : Set α) [S small_of_surjective Set.surjective_onto_image #align small_image small_image -theorem not_small_type : ¬Small.{u} (Type max u v) - | ⟨⟨S, ⟨e⟩⟩⟩ => - @Function.cantor_injective (Σα, e.symm α) (fun a => ⟨_, cast (e.3 _).symm a⟩) fun a b e => by - dsimp at e - injection e with h₁ h₂ - simpa using h₂ -#align not_small_type not_small_type - end diff --git a/Mathlib/Logic/Small/Defs.lean b/Mathlib/Logic/Small/Defs.lean new file mode 100644 index 0000000000000..7aa4431008634 --- /dev/null +++ b/Mathlib/Logic/Small/Defs.lean @@ -0,0 +1,126 @@ +/- +Copyright (c) 2021 Scott Morrison. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Scott Morrison +-/ +import Mathlib.Logic.Equiv.Defs +import Mathlib.Tactic.MkIffOfInductiveProp +import Mathlib.Tactic.PPWithUniv + +#align_import logic.small.basic from "leanprover-community/mathlib"@"d012cd09a9b256d870751284dd6a29882b0be105" + +/-! +# Small types + +A type is `w`-small if there exists an equivalence to some `S : Type w`. + +We provide a noncomputable model `Shrink α : Type w`, and `equivShrink α : α ≃ Shrink α`. + +A subsingleton type is `w`-small for any `w`. + +If `α ≃ β`, then `Small.{w} α ↔ Small.{w} β`. + +See `Mathlib.Logic.Small.Basic` for further instances and theorems. +-/ + +set_option autoImplicit true + + +universe u w v v' + +/-- A type is `Small.{w}` if there exists an equivalence to some `S : Type w`. +-/ +@[mk_iff, pp_with_univ] +class Small (α : Type v) : Prop where + /-- If a type is `Small.{w}`, then there exists an equivalence with some `S : Type w` -/ + equiv_small : ∃ S : Type w, Nonempty (α ≃ S) +#align small Small + +/-- Constructor for `Small α` from an explicit witness type and equivalence. +-/ +theorem Small.mk' {α : Type v} {S : Type w} (e : α ≃ S) : Small.{w} α := + ⟨⟨S, ⟨e⟩⟩⟩ +#align small.mk' Small.mk' + +/-- An arbitrarily chosen model in `Type w` for a `w`-small type. +-/ +@[pp_with_univ] +def Shrink (α : Type v) [Small.{w} α] : Type w := + Classical.choose (@Small.equiv_small α _) +#align shrink Shrink + +/-- The noncomputable equivalence between a `w`-small type and a model. +-/ +noncomputable def equivShrink (α : Type v) [Small.{w} α] : α ≃ Shrink α := + Nonempty.some (Classical.choose_spec (@Small.equiv_small α _)) +#align equiv_shrink equivShrink + +@[ext] +theorem Shrink.ext {α : Type v} [Small.{w} α] {x y : Shrink α} + (w : (equivShrink _).symm x = (equivShrink _).symm y) : x = y := by + simpa using w + +-- It would be nice to mark this as `aesop cases` if +-- https://github.com/JLimperg/aesop/issues/59 +-- is resolved. +@[eliminator] +protected noncomputable def Shrink.rec [Small.{w} α] {F : Shrink α → Sort v} + (h : ∀ X, F (equivShrink _ X)) : ∀ X, F X := + fun X => ((equivShrink _).apply_symm_apply X) ▸ (h _) + +--Porting note: Priority changed to 101 +instance (priority := 101) small_self (α : Type v) : Small.{v} α := + Small.mk' <| Equiv.refl α +#align small_self small_self + +theorem small_map {α : Type*} {β : Type*} [hβ : Small.{w} β] (e : α ≃ β) : Small.{w} α := + let ⟨_, ⟨f⟩⟩ := hβ.equiv_small + Small.mk' (e.trans f) +#align small_map small_map + +theorem small_lift (α : Type u) [hα : Small.{v} α] : Small.{max v w} α := + let ⟨⟨_, ⟨f⟩⟩⟩ := hα + Small.mk' <| f.trans (Equiv.ulift.{w}).symm +#align small_lift small_lift + +/- This was an instance but useless due to https://github.com/leanprover/lean4/issues/2297. -/ +lemma small_max (α : Type v) : Small.{max w v} α := + small_lift.{v, w} α +#align small_max small_max + +instance small_zero (α : Type) : Small.{w} α := small_max α + +instance (priority := 100) small_succ (α : Type v) : Small.{v+1} α := + small_lift.{v, v+1} α + +instance small_ulift (α : Type u) [Small.{v} α] : Small.{v} (ULift.{w} α) := + small_map Equiv.ulift +#align small_ulift small_ulift + +theorem small_type : Small.{max (u + 1) v} (Type u) := + small_max.{max (u + 1) v} _ +#align small_type small_type + +section + +open Classical + +theorem small_congr {α : Type*} {β : Type*} (e : α ≃ β) : Small.{w} α ↔ Small.{w} β := + ⟨fun h => @small_map _ _ h e.symm, fun h => @small_map _ _ h e⟩ +#align small_congr small_congr + +instance small_sigma {α} (β : α → Type*) [Small.{w} α] [∀ a, Small.{w} (β a)] : + Small.{w} (Σa, β a) := + ⟨⟨Σa' : Shrink α, Shrink (β ((equivShrink α).symm a')), + ⟨Equiv.sigmaCongr (equivShrink α) fun a => by simpa using equivShrink (β a)⟩⟩⟩ +#align small_sigma small_sigma + +theorem not_small_type : ¬Small.{u} (Type max u v) + | ⟨⟨S, ⟨e⟩⟩⟩ => + @Function.cantor_injective (Σα, e.symm α) (fun a => ⟨_, cast (e.3 _).symm a⟩) fun a b e => by + dsimp at e + injection e with h₁ h₂ + simpa using h₂ +#align not_small_type not_small_type + +end diff --git a/Mathlib/Logic/Small/Group.lean b/Mathlib/Logic/Small/Group.lean index 27eefebeee222..ca65dc37c13f1 100644 --- a/Mathlib/Logic/Small/Group.lean +++ b/Mathlib/Logic/Small/Group.lean @@ -3,7 +3,7 @@ Copyright (c) 2021 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison -/ -import Mathlib.Logic.Small.Basic +import Mathlib.Logic.Small.Defs import Mathlib.Logic.Equiv.TransferInstance /-! diff --git a/Mathlib/Logic/Small/Ring.lean b/Mathlib/Logic/Small/Ring.lean index e0c64d1f0c392..6abade1ce4c3a 100644 --- a/Mathlib/Logic/Small/Ring.lean +++ b/Mathlib/Logic/Small/Ring.lean @@ -3,7 +3,7 @@ Copyright (c) 2021 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison -/ -import Mathlib.Logic.Small.Basic +import Mathlib.Logic.Small.Defs import Mathlib.Logic.Equiv.TransferInstance /-! diff --git a/Mathlib/Logic/UnivLE.lean b/Mathlib/Logic/UnivLE.lean index 7bf39d4553306..022ac1934516c 100644 --- a/Mathlib/Logic/UnivLE.lean +++ b/Mathlib/Logic/UnivLE.lean @@ -3,18 +3,15 @@ Copyright (c) 2023 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison -/ -import Mathlib.Logic.Small.Basic +import Mathlib.Logic.Small.Defs /-! # UnivLE A proposition expressing a universe inequality. `UnivLE.{u, v}` expresses that `u ≤ v`, -in the form `∀ α : Type max u v, Small.{v} α`. +in the form `∀ α : Type u, Small.{v} α`. -See the doc-string for the comparison with an alternative weaker definition. - -See also `Mathlib.CategoryTheory.UnivLE` for the statement -`UnivLE.{u,v} ↔ EssSurj (uliftFunctor : Type v ⥤ Type max u v)`. +See the doc-string for the comparison with an alternative stronger definition. -/ set_option autoImplicit true @@ -24,30 +21,42 @@ noncomputable section /-- A class expressing a universe inequality. `UnivLE.{u, v}` expresses that `u ≤ v`. -There are (at least) two plausible definitions for `u ≤ v`: -* strong: `∀ α : Type max u v, Small.{v} α` -* weak: `∀ α : Type u, Small.{v} α` - -The weak definition has the advantage of being transitive. -However only under the strong definition do we have `Small.{v} ((α : Type u) → (β : Type v))`, -which is essential for proving that `Type v` has `Type u`-indexed limits when `u ≤ v`. +There used to be a stronger definition `∀ α : Type max u v, Small.{v} α` that immediately implies +`Small.{v} ((α : Type u) → (β : Type v))` which is essential for proving that `Type v` has +`Type u`-indexed limits when `u ≤ v`. However the current weaker condition +`∀ α : Type u, Small.{v} α` also implies the same, so we switched to use it for +its simplicity and transitivity. -The strong definition implies the weaker definition (see below), +The strong definition easily implies the weaker definition (see below), but we can not prove the reverse implication. This is because in Lean's type theory, while `max u v` is at least at big as `u` and `v`, it could be bigger than both! +See also `Mathlib.CategoryTheory.UnivLE` for the statement that the stronger definition is +equivalent to `EssSurj (uliftFunctor : Type v ⥤ Type max u v)`. -/ @[pp_with_univ] -abbrev UnivLE.{u, v} : Prop := ∀ α : Type max u v, Small.{v} α +abbrev UnivLE.{u, v} : Prop := ∀ α : Type u, Small.{v} α example : UnivLE.{u, u} := inferInstance example : UnivLE.{u, u+1} := inferInstance example : UnivLE.{0, u} := inferInstance -example : UnivLE.{u, max u v} := inferInstance +/- This is useless as an instance due to https://github.com/leanprover/lean4/issues/2297 -/ +theorem univLE_max : UnivLE.{u, max u v} := fun α ↦ small_max.{v} α + +theorem Small.trans_univLE.{u, v} (α : Type w) [hα : Small.{u} α] [h : UnivLE.{u, v}] : + Small.{v} α := + let ⟨β, ⟨f⟩⟩ := hα.equiv_small + let ⟨_, ⟨g⟩⟩ := (h β).equiv_small + ⟨_, ⟨f.trans g⟩⟩ + +theorem UnivLE.trans [UnivLE.{u, v}] [UnivLE.{v, w}] : UnivLE.{u, w} := + fun α ↦ Small.trans_univLE α + +/- This is the crucial instance that subsumes `univLE_max`. -/ +instance univLE_of_max [UnivLE.{max u v, v}] : UnivLE.{u, v} := @UnivLE.trans univLE_max.{v,u} ‹_› -instance [UnivLE.{u, v}] (α : Type u) : Small.{v} α := - ⟨Shrink.{v, max u v} (ULift.{v} α), - ⟨Equiv.ulift.symm.trans (equivShrink (ULift α))⟩⟩ +/- When `small_Pi` from `Mathlib.Logic.Small.Basic` is imported, we have : -/ +-- example (α : Type u) (β : Type v) [UnivLE.{u, v}] : Small.{v} (α → β) := inferInstance example : ¬ UnivLE.{u+1, u} := by simp only [Small_iff, not_forall, not_exists, not_nonempty_iff] diff --git a/Mathlib/Mathport/Rename.lean b/Mathlib/Mathport/Rename.lean index 0b3aa8c79294a..14c2ce5471dc1 100644 --- a/Mathlib/Mathport/Rename.lean +++ b/Mathlib/Mathport/Rename.lean @@ -54,11 +54,17 @@ def RenameMap.insert (m : RenameMap) (e : NameEntry) : RenameMap := /-- Look up a lean 4 name from the lean 3 name. Also return the `dubious` error message. -/ def RenameMap.find? (m : RenameMap) : Name → Option (String × Name) := m.toLean4.find? +-- TODO: upstream into core/std +instance [Inhabited α] : Inhabited (Thunk α) where + default := .pure default + /-- This extension stores the lookup data generated from `#align` commands. -/ -initialize renameExtension : SimplePersistentEnvExtension NameEntry RenameMap ← +-- wrap state in `Thunk` as downstream projects rarely actually query it; it only +-- gets queried when new `#align`s are added. +initialize renameExtension : SimplePersistentEnvExtension NameEntry (Thunk RenameMap) ← registerSimplePersistentEnvExtension { - addEntryFn := (·.insert) - addImportedFn := mkStateFromImportedEntries (·.insert) {} + addEntryFn := fun t e => t.map (·.insert e) + addImportedFn := fun es => ⟨fun _ => mkStateFromImportedEntries (·.insert) {} es⟩ } /-- Insert a new name alignment into the rename extension. -/ @@ -121,7 +127,7 @@ syntax (name := align) "#align " ident ppSpace ident : command /-- Checks that `id` has not already been `#align`ed or `#noalign`ed. -/ def ensureUnused [Monad m] [MonadEnv m] [MonadError m] (id : Name) : m Unit := do - if let some (_, n) := (renameExtension.getState (← getEnv)).toLean4.find? id then + if let some (_, n) := (renameExtension.getState (← getEnv)).get.toLean4.find? id then if n.isAnonymous then throwError "{id} has already been no-aligned" else @@ -191,7 +197,7 @@ syntax (name := lookup3) "#lookup3 " ident : command @[command_elab lookup3] def elabLookup3 : CommandElab | `(#lookup3%$tk $id3:ident) => do let n3 := id3.getId - let m := renameExtension.getState (← getEnv) + let m := renameExtension.getState (← getEnv) |>.get match m.find? n3 with | none => logInfoAt tk s!"name `{n3} not found" | some (dubious, n4) => do diff --git a/Mathlib/MeasureTheory/Constructions/BorelSpace/Basic.lean b/Mathlib/MeasureTheory/Constructions/BorelSpace/Basic.lean index d9ab3bae5a555..73de6ff1a4a1e 100644 --- a/Mathlib/MeasureTheory/Constructions/BorelSpace/Basic.lean +++ b/Mathlib/MeasureTheory/Constructions/BorelSpace/Basic.lean @@ -283,6 +283,12 @@ instance Subtype.borelSpace {α : Type*} [TopologicalSpace α] [MeasurableSpace ⟨by borelize α; symm; apply borel_comap⟩ #align subtype.borel_space Subtype.borelSpace +instance Countable.instBorelSpace [Countable α] [MeasurableSpace α] [MeasurableSingletonClass α] + [TopologicalSpace α] [DiscreteTopology α] : BorelSpace α := by + have : ∀ s, @MeasurableSet α inferInstance s := fun s ↦ s.to_countable.measurableSet + have : ∀ s, @MeasurableSet α (borel α) s := fun s ↦ measurableSet_generateFrom (isOpen_discrete s) + exact ⟨by aesop⟩ + instance Subtype.opensMeasurableSpace {α : Type*} [TopologicalSpace α] [MeasurableSpace α] [h : OpensMeasurableSpace α] (s : Set α) : OpensMeasurableSpace s := ⟨by diff --git a/Mathlib/MeasureTheory/Constructions/BorelSpace/Metrizable.lean b/Mathlib/MeasureTheory/Constructions/BorelSpace/Metrizable.lean index 2f8aa0b9e2d98..4c5345d85f132 100644 --- a/Mathlib/MeasureTheory/Constructions/BorelSpace/Metrizable.lean +++ b/Mathlib/MeasureTheory/Constructions/BorelSpace/Metrizable.lean @@ -5,6 +5,7 @@ Authors: Floris van Doorn -/ import Mathlib.MeasureTheory.Constructions.BorelSpace.Basic import Mathlib.Topology.Metrizable.Basic +import Mathlib.Topology.IndicatorConstPointwise #align_import measure_theory.constructions.borel_space.metrizable from "leanprover-community/mathlib"@"bf6a01357ff5684b1ebcd0f1a13be314fc82c0bf" @@ -12,7 +13,6 @@ import Mathlib.Topology.Metrizable.Basic # Measurable functions in (pseudo-)metrizable Borel spaces -/ - open Filter MeasureTheory TopologicalSpace open Classical Topology NNReal ENNReal MeasureTheory @@ -182,19 +182,20 @@ variable {ι : Type*} (L : Filter ι) [IsCountablyGenerated L] {As : ι → Set /-- If the indicator functions of measurable sets `Aᵢ` converge to the indicator function of a set `A` along a nontrivial countably generated filter, then `A` is also measurable. -/ lemma measurableSet_of_tendsto_indicator [NeBot L] (As_mble : ∀ i, MeasurableSet (As i)) - (h_lim : Tendsto (fun i ↦ (As i).indicator (1 : α → ℝ≥0∞)) L (𝓝 (A.indicator 1))) : + (h_lim : ∀ x, ∀ᶠ i in L, x ∈ As i ↔ x ∈ A) : MeasurableSet A := by simp_rw [← measurable_indicator_const_iff (1 : ℝ≥0∞)] at As_mble ⊢ - exact measurable_of_tendsto_ennreal' L As_mble h_lim + exact measurable_of_tendsto_ennreal' L As_mble + ((tendsto_indicator_const_iff_forall_eventually L (1 : ℝ≥0∞)).mpr h_lim) /-- If the indicator functions of a.e.-measurable sets `Aᵢ` converge a.e. to the indicator function of a set `A` along a nontrivial countably generated filter, then `A` is also a.e.-measurable. -/ lemma nullMeasurableSet_of_tendsto_indicator [NeBot L] {μ : Measure α} (As_mble : ∀ i, NullMeasurableSet (As i) μ) - (h_lim : ∀ᵐ x ∂μ, Tendsto (fun i ↦ (As i).indicator (1 : α → ℝ≥0∞) x) - L (𝓝 (A.indicator 1 x))) : + (h_lim : ∀ᵐ x ∂μ, ∀ᶠ i in L, x ∈ As i ↔ x ∈ A) : NullMeasurableSet A μ := by simp_rw [← aemeasurable_indicator_const_iff (1 : ℝ≥0∞)] at As_mble ⊢ - exact aemeasurable_of_tendsto_metrizable_ae L As_mble h_lim + apply aemeasurable_of_tendsto_metrizable_ae L As_mble + simpa [tendsto_indicator_const_apply_iff_eventually] using h_lim end TendstoIndicator diff --git a/Mathlib/MeasureTheory/Integral/Indicator.lean b/Mathlib/MeasureTheory/Integral/Indicator.lean index 723879d828e9e..0c43ee10dd403 100644 --- a/Mathlib/MeasureTheory/Integral/Indicator.lean +++ b/Mathlib/MeasureTheory/Integral/Indicator.lean @@ -42,21 +42,20 @@ tend to the measure of `A`. -/ lemma tendsto_measure_of_tendsto_indicator [NeBot L] {μ : Measure α} (As_mble : ∀ i, MeasurableSet (As i)) {B : Set α} (B_mble : MeasurableSet B) (B_finmeas : μ B ≠ ∞) (As_le_B : ∀ᶠ i in L, As i ⊆ B) - (h_lim : Tendsto (fun i ↦ (As i).indicator (1 : α → ℝ≥0∞)) L (𝓝 (A.indicator 1))) : + (h_lim : ∀ x, ∀ᶠ i in L, x ∈ As i ↔ x ∈ A) : Tendsto (fun i ↦ μ (As i)) L (𝓝 (μ A)) := by apply tendsto_measure_of_ae_tendsto_indicator L ?_ As_mble B_mble B_finmeas As_le_B - · exact eventually_of_forall (by simpa only [tendsto_pi_nhds] using h_lim) - · exact measurableSet_of_tendsto_indicator L As_mble h_lim + (ae_of_all μ h_lim) + exact measurableSet_of_tendsto_indicator L As_mble h_lim /-- If `μ` is a finite measure and the indicators of measurable sets `Aᵢ` tend pointwise to the indicator of a set `A`, then the measures `μ Aᵢ` tend to the measure `μ A`. -/ lemma tendsto_measure_of_tendsto_indicator_of_isFiniteMeasure [NeBot L] (μ : Measure α) [IsFiniteMeasure μ] (As_mble : ∀ i, MeasurableSet (As i)) - (h_lim : Tendsto (fun i ↦ (As i).indicator (1 : α → ℝ≥0∞)) L (𝓝 (A.indicator 1))) : + (h_lim : ∀ x, ∀ᶠ i in L, x ∈ As i ↔ x ∈ A) : Tendsto (fun i ↦ μ (As i)) L (𝓝 (μ A)) := by - apply tendsto_measure_of_ae_tendsto_indicator_of_isFiniteMeasure L ?_ As_mble - · exact eventually_of_forall (by simpa only [tendsto_pi_nhds] using h_lim) - · exact measurableSet_of_tendsto_indicator L As_mble h_lim + apply tendsto_measure_of_ae_tendsto_indicator_of_isFiniteMeasure L ?_ As_mble (ae_of_all μ h_lim) + exact measurableSet_of_tendsto_indicator L As_mble h_lim end TendstoIndicator -- section diff --git a/Mathlib/MeasureTheory/Integral/Lebesgue.lean b/Mathlib/MeasureTheory/Integral/Lebesgue.lean index 90f61f43a650d..45e3226467c92 100644 --- a/Mathlib/MeasureTheory/Integral/Lebesgue.lean +++ b/Mathlib/MeasureTheory/Integral/Lebesgue.lean @@ -7,6 +7,7 @@ import Mathlib.Dynamics.Ergodic.MeasurePreserving import Mathlib.MeasureTheory.Function.SimpleFunc import Mathlib.MeasureTheory.Measure.MutuallySingular import Mathlib.MeasureTheory.Measure.Count +import Mathlib.Topology.IndicatorConstPointwise #align_import measure_theory.integral.lebesgue from "leanprover-community/mathlib"@"c14c8fcde993801fca8946b0d80131a1a81d1520" @@ -1827,26 +1828,25 @@ the measures of `Aᵢ` tend to the measure of `A`. -/ lemma tendsto_measure_of_ae_tendsto_indicator {μ : Measure α} (A_mble : MeasurableSet A) (As_mble : ∀ i, MeasurableSet (As i)) {B : Set α} (B_mble : MeasurableSet B) (B_finmeas : μ B ≠ ∞) (As_le_B : ∀ᶠ i in L, As i ⊆ B) - (h_lim : ∀ᵐ x ∂μ, Tendsto (fun i ↦ (As i).indicator (1 : α → ℝ≥0∞) x) - L (𝓝 (A.indicator 1 x))) : + (h_lim : ∀ᵐ x ∂μ, ∀ᶠ i in L, x ∈ As i ↔ x ∈ A) : Tendsto (fun i ↦ μ (As i)) L (𝓝 (μ A)) := by simp_rw [← MeasureTheory.lintegral_indicator_one A_mble, ← MeasureTheory.lintegral_indicator_one (As_mble _)] refine tendsto_lintegral_filter_of_dominated_convergence (B.indicator (1 : α → ℝ≥0∞)) - (eventually_of_forall ?_) ?_ ?_ h_lim + (eventually_of_forall ?_) ?_ ?_ ?_ · exact fun i ↦ Measurable.indicator measurable_const (As_mble i) · filter_upwards [As_le_B] with i hi exact eventually_of_forall (fun x ↦ indicator_le_indicator_of_subset hi (by simp) x) · rwa [← lintegral_indicator_one B_mble] at B_finmeas + · simpa only [show (OfNat.ofNat 1 : α → ℝ≥0∞) = (fun _ ↦ 1) by rfl, + tendsto_indicator_const_apply_iff_eventually] using h_lim /-- If `μ` is a finite measure and the indicators of measurable sets `Aᵢ` tend pointwise almost everywhere to the indicator of a measurable set `A`, then the measures `μ Aᵢ` tend to the measure `μ A`. -/ lemma tendsto_measure_of_ae_tendsto_indicator_of_isFiniteMeasure [IsCountablyGenerated L] {μ : Measure α} [IsFiniteMeasure μ] (A_mble : MeasurableSet A) - (As_mble : ∀ i, MeasurableSet (As i)) - (h_lim : ∀ᵐ x ∂μ, Tendsto (fun i ↦ (As i).indicator (1 : α → ℝ≥0∞) x) - L (𝓝 (A.indicator 1 x))) : + (As_mble : ∀ i, MeasurableSet (As i)) (h_lim : ∀ᵐ x ∂μ, ∀ᶠ i in L, x ∈ As i ↔ x ∈ A) : Tendsto (fun i ↦ μ (As i)) L (𝓝 (μ A)) := tendsto_measure_of_ae_tendsto_indicator L A_mble As_mble MeasurableSet.univ (measure_ne_top μ univ) (eventually_of_forall (fun i ↦ subset_univ (As i))) h_lim diff --git a/Mathlib/MeasureTheory/Measure/Portmanteau.lean b/Mathlib/MeasureTheory/Measure/Portmanteau.lean index 16f7861445864..20e1b1dc848f0 100644 --- a/Mathlib/MeasureTheory/Measure/Portmanteau.lean +++ b/Mathlib/MeasureTheory/Measure/Portmanteau.lean @@ -550,7 +550,7 @@ implies -/ -variable {Ω : Type} [MeasurableSpace Ω] [TopologicalSpace Ω] [OpensMeasurableSpace Ω] +variable {Ω : Type*} [MeasurableSpace Ω] [TopologicalSpace Ω] [OpensMeasurableSpace Ω] lemma lintegral_le_liminf_lintegral_of_forall_isOpen_measure_le_liminf_measure {μ : Measure Ω} {μs : ℕ → Measure Ω} {f : Ω → ℝ} (f_cont : Continuous f) (f_nn : 0 ≤ f) diff --git a/Mathlib/ModelTheory/Satisfiability.lean b/Mathlib/ModelTheory/Satisfiability.lean index 540e51fbdcbc3..13fc3262995d1 100644 --- a/Mathlib/ModelTheory/Satisfiability.lean +++ b/Mathlib/ModelTheory/Satisfiability.lean @@ -228,8 +228,8 @@ section -- Porting note: This instance interrupts synthesizing instances. attribute [-instance] FirstOrder.Language.withConstants_expansion -/-- The Upward Löwenheim–Skolem Theorem: If `κ` is a cardinal greater than the cardinalities of `L` -and an infinite `L`-structure `M`, then `M` has an elementary extension of cardinality `κ`. -/ +/-- The **Upward Löwenheim–Skolem Theorem**: If `κ` is a cardinal greater than the cardinalities of +`L` and an infinite `L`-structure `M`, then `M` has an elementary extension of cardinality `κ`. -/ theorem exists_elementaryEmbedding_card_eq_of_ge (M : Type w') [L.Structure M] [iM : Infinite M] (κ : Cardinal.{w}) (h1 : Cardinal.lift.{w} L.card ≤ Cardinal.lift.{max u v} κ) (h2 : Cardinal.lift.{w} #M ≤ Cardinal.lift.{w'} κ) : diff --git a/Mathlib/ModelTheory/Skolem.lean b/Mathlib/ModelTheory/Skolem.lean index 7e2910278fb18..0d3ac622dd087 100644 --- a/Mathlib/ModelTheory/Skolem.lean +++ b/Mathlib/ModelTheory/Skolem.lean @@ -124,7 +124,7 @@ theorem exists_small_elementarySubstructure : ∃ S : L.ElementarySubstructure M variable {M} -/-- The Downward Löwenheim–Skolem theorem : +/-- The **Downward Löwenheim–Skolem theorem** : If `s` is a set in an `L`-structure `M` and `κ` an infinite cardinal such that `max (#s, L.card) ≤ κ` and `κ ≤ # M`, then `M` has an elementary substructure containing `s` of cardinality `κ`. -/ diff --git a/Mathlib/NumberTheory/Cyclotomic/Discriminant.lean b/Mathlib/NumberTheory/Cyclotomic/Discriminant.lean index 0f3b2a7f9517b..2b2ebcd67a95f 100644 --- a/Mathlib/NumberTheory/Cyclotomic/Discriminant.lean +++ b/Mathlib/NumberTheory/Cyclotomic/Discriminant.lean @@ -70,7 +70,7 @@ theorem discr_prime_pow_ne_two [IsCyclotomicExtension {p ^ (k + 1)} K L] [hp : F haveI se : IsSeparable K L := (isGalois (p ^ (k + 1)) K L).to_isSeparable rw [discr_powerBasis_eq_norm, finrank L hirr, hζ.powerBasis_gen _, ← hζ.minpoly_eq_cyclotomic_of_irreducible hirr, PNat.pow_coe, - totient_prime_pow hp.out (succ_pos k), succ_sub_one] + totient_prime_pow hp.out (succ_pos k), Nat.add_one_sub_one] have coe_two : ((2 : ℕ+) : ℕ) = 2 := rfl have hp2 : p = 2 → k ≠ 0 := by rintro rfl rfl diff --git a/Mathlib/NumberTheory/DirichletCharacter/Basic.lean b/Mathlib/NumberTheory/DirichletCharacter/Basic.lean index 0f0036b22508c..6dcbd5bf14dcb 100644 --- a/Mathlib/NumberTheory/DirichletCharacter/Basic.lean +++ b/Mathlib/NumberTheory/DirichletCharacter/Basic.lean @@ -25,7 +25,7 @@ Main definitions: ## TODO -- reduction, even, odd +- even, odd - add `lemma unitsMap_surjective {n m : ℕ} (h : n ∣ m) (hm : m ≠ 0) : Function.Surjective (unitsMap h)` and then @@ -45,10 +45,10 @@ dirichlet character, multiplicative character -/ /-- The type of Dirichlet characters of level `n`. -/ -abbrev DirichletCharacter (R : Type) [CommMonoidWithZero R] (n : ℕ) := MulChar (ZMod n) R +abbrev DirichletCharacter (R : Type*) [CommMonoidWithZero R] (n : ℕ) := MulChar (ZMod n) R open MulChar -variable {R : Type} [CommMonoidWithZero R] {n : ℕ} (χ : DirichletCharacter R n) +variable {R : Type*} [CommMonoidWithZero R] {n : ℕ} (χ : DirichletCharacter R n) namespace DirichletCharacter lemma toUnitHom_eq_char' {a : ZMod n} (ha : IsUnit a) : χ a = χ.toUnitHom ha.unit := by simp @@ -64,7 +64,7 @@ lemma periodic {m : ℕ} (hm : n ∣ m) : Function.Periodic χ m := by /-- A function that modifies the level of a Dirichlet character to some multiple of its original level. -/ -noncomputable def changeLevel {R : Type} [CommMonoidWithZero R] {n m : ℕ} (hm : n ∣ m) : +noncomputable def changeLevel {R : Type*} [CommMonoidWithZero R] {n m : ℕ} (hm : n ∣ m) : DirichletCharacter R n →* DirichletCharacter R m := { toFun := fun ψ ↦ MulChar.ofUnitHom (ψ.toUnitHom.comp (ZMod.unitsMap hm)), map_one' := by ext; simp, @@ -182,6 +182,15 @@ lemma conductor_eq_zero_iff_level_eq_zero : conductor χ = 0 ↔ n = 0 := by rintro rfl exact Nat.sInf_eq_zero.mpr <| Or.inl <| level_mem_conductorSet χ +lemma conductor_le_conductor_mem_conductorSet {d : ℕ} (hd : d ∈ conductorSet χ) : + χ.conductor ≤ (Classical.choose hd.2).conductor := by + refine Nat.sInf_le <| (mem_conductorSet_iff χ).mpr <| + ⟨dvd_trans (conductor_dvd_level _) hd.1, + (factorsThrough_conductor (Classical.choose hd.2)).2.choose, ?_⟩ + rw [changeLevel_trans _ (conductor_dvd_level _) (FactorsThrough.dvd _ hd), + ← (factorsThrough_conductor (Classical.choose hd.2)).2.choose_spec] + exact FactorsThrough.eq_changeLevel χ hd + variable (χ) /-- A character is primitive if its level is equal to its conductor. -/ @@ -199,4 +208,21 @@ lemma conductor_one_dvd (n : ℕ) : conductor (1 : DirichletCharacter R 1) ∣ n rw [(isPrimitive_def _).mp isPrimitive_one_level_one] apply one_dvd _ +/-- The primitive character associated to a Dirichlet character. -/ +noncomputable def primitiveCharacter : DirichletCharacter R χ.conductor := + Classical.choose (factorsThrough_conductor χ).choose_spec + +lemma primitiveCharacter_isPrimitive : isPrimitive (χ.primitiveCharacter) := by + by_cases h : χ.conductor = 0 + · rw [isPrimitive_def] + convert conductor_eq_zero_iff_level_eq_zero.mpr h + · exact le_antisymm (Nat.le_of_dvd (Nat.pos_of_ne_zero h) (conductor_dvd_level _)) <| + conductor_le_conductor_mem_conductorSet <| conductor_mem_conductorSet χ + +lemma primitiveCharacter_one (hn : n ≠ 0) : + (1 : DirichletCharacter R n).primitiveCharacter = 1 := by + rw [eq_one_iff_conductor_eq_one <| (@conductor_one R _ _ hn) ▸ Nat.one_ne_zero, + (isPrimitive_def _).1 (1 : DirichletCharacter R n).primitiveCharacter_isPrimitive, + conductor_one hn] + end DirichletCharacter diff --git a/Mathlib/NumberTheory/LegendreSymbol/GaussEisensteinLemmas.lean b/Mathlib/NumberTheory/LegendreSymbol/GaussEisensteinLemmas.lean index 5be06f3e6cced..fa2ec2014e92d 100644 --- a/Mathlib/NumberTheory/LegendreSymbol/GaussEisensteinLemmas.lean +++ b/Mathlib/NumberTheory/LegendreSymbol/GaussEisensteinLemmas.lean @@ -67,7 +67,7 @@ private theorem gauss_lemma_aux₁ (p : ℕ) [Fact p.Prime] {a : ℤ} calc (a ^ (p / 2) * (p / 2)! : ZMod p) = ∏ x in Ico 1 (p / 2).succ, a * x := by rw [prod_mul_distrib, ← prod_natCast, prod_Ico_id_eq_factorial, prod_const, card_Ico, - succ_sub_one]; simp + Nat.add_one_sub_one]; simp _ = ∏ x in Ico 1 (p / 2).succ, ↑((a * x : ZMod p).val) := by simp _ = ∏ x in Ico 1 (p / 2).succ, (if (a * x : ZMod p).val ≤ p / 2 then (1 : ZMod p) else -1) * (a * x : ZMod p).valMinAbs.natAbs := diff --git a/Mathlib/NumberTheory/SumTwoSquares.lean b/Mathlib/NumberTheory/SumTwoSquares.lean index 4829dcc69b667..11f672e903dc2 100644 --- a/Mathlib/NumberTheory/SumTwoSquares.lean +++ b/Mathlib/NumberTheory/SumTwoSquares.lean @@ -231,7 +231,7 @@ theorem Nat.eq_sq_add_sq_iff {n : ℕ} : refine' ⟨a, b, hab.symm, (ZMod.isSquare_neg_one_iff hb).mpr fun {q} hqp hqb hq4 => _⟩ refine' Nat.odd_iff_not_even.mp _ (H hqp hq4) have hqb' : padicValNat q b = 1 := - b.factorization_def hqp ▸ le_antisymm (Nat.Squarefree.factorization_le_one _ hb) + b.factorization_def hqp ▸ le_antisymm (hb.natFactorization_le_one _) ((hqp.dvd_iff_one_le_factorization hb₀.ne').mp hqb) haveI hqi : Fact q.Prime := ⟨hqp⟩ simp_rw [← hab, padicValNat.mul (pow_ne_zero 2 ha₀.ne') hb₀.ne', hqb', diff --git a/Mathlib/NumberTheory/Wilson.lean b/Mathlib/NumberTheory/Wilson.lean index 47c6172a17b90..2405518e79d5d 100644 --- a/Mathlib/NumberTheory/Wilson.lean +++ b/Mathlib/NumberTheory/Wilson.lean @@ -54,7 +54,7 @@ theorem wilsons_lemma : ((p - 1)! : ZMod p) = -1 := by symm refine' prod_bij (fun a _ => (a : ZMod p).val) _ _ _ _ · intro a ha - rw [mem_Ico, ← Nat.succ_sub hp, Nat.succ_sub_one] + rw [mem_Ico, ← Nat.succ_sub hp, Nat.add_one_sub_one] constructor · apply Nat.pos_of_ne_zero; rw [← @val_zero p] intro h; apply Units.ne_zero a (val_injective p h) @@ -62,7 +62,7 @@ theorem wilsons_lemma : ((p - 1)! : ZMod p) = -1 := by · rintro a -; simp only [cast_id, nat_cast_val] · intro _ _ _ _ h; rw [Units.ext_iff]; exact val_injective p h · intro b hb - rw [mem_Ico, Nat.succ_le_iff, ← succ_sub hp, succ_sub_one, pos_iff_ne_zero] at hb + rw [mem_Ico, Nat.succ_le_iff, ← succ_sub hp, Nat.add_one_sub_one, pos_iff_ne_zero] at hb refine' ⟨Units.mk0 b _, Finset.mem_univ _, _⟩ · intro h; apply hb.1; apply_fun val at h simpa only [val_cast_of_lt hb.right, val_zero] using h @@ -75,7 +75,7 @@ theorem prod_Ico_one_prime : ∏ x in Ico 1 p, (x : ZMod p) = -1 := by conv => congr congr - rw [← succ_sub_one p, succ_sub (Fact.out (p := p.Prime)).pos] + rw [← Nat.add_one_sub_one p, succ_sub (Fact.out (p := p.Prime)).pos] rw [← prod_natCast, Finset.prod_Ico_id_eq_factorial, wilsons_lemma] #align zmod.prod_Ico_one_prime ZMod.prod_Ico_one_prime diff --git a/Mathlib/Order/BooleanAlgebra.lean b/Mathlib/Order/BooleanAlgebra.lean index cbd23365dd416..37d38f5edf533 100644 --- a/Mathlib/Order/BooleanAlgebra.lean +++ b/Mathlib/Order/BooleanAlgebra.lean @@ -417,6 +417,15 @@ theorem sdiff_sdiff_sup_sdiff' : z \ (x \ y ⊔ y \ x) = z ⊓ x ⊓ y ⊔ z \ x _ = z ⊓ x ⊓ y ⊔ z \ x ⊓ z \ y := by ac_rfl #align sdiff_sdiff_sup_sdiff' sdiff_sdiff_sup_sdiff' +lemma sdiff_sdiff_sdiff_cancel_left (hca : z ≤ x) : (x \ y) \ (x \ z) = z \ y := + sdiff_sdiff_sdiff_le_sdiff.antisymm <| + (disjoint_sdiff_self_right.mono_left sdiff_le).le_sdiff_of_le_left <| sdiff_le_sdiff_right hca + +lemma sdiff_sdiff_sdiff_cancel_right (hcb : z ≤ y) : (x \ z) \ (y \ z) = x \ y := by + rw [le_antisymm_iff, sdiff_le_comm] + exact ⟨sdiff_sdiff_sdiff_le_sdiff, + (disjoint_sdiff_self_left.mono_right sdiff_le).le_sdiff_of_le_left <| sdiff_le_sdiff_left hcb⟩ + theorem inf_sdiff : (x ⊓ y) \ z = x \ z ⊓ y \ z := sdiff_unique (calc diff --git a/Mathlib/Order/JordanHolder.lean b/Mathlib/Order/JordanHolder.lean index 9caa6428dd51e..a4db51542741a 100644 --- a/Mathlib/Order/JordanHolder.lean +++ b/Mathlib/Order/JordanHolder.lean @@ -287,7 +287,7 @@ theorem length_ofList (l : List X) (hl : l ≠ []) (hc : List.Chain' IsMaximal l theorem ofList_toList (s : CompositionSeries X) : ofList s.toList s.toList_ne_nil s.chain'_toList = s := by refine' ext_fun _ _ - · rw [length_ofList, length_toList, Nat.succ_sub_one] + · rw [length_ofList, length_toList, Nat.add_one_sub_one] · rintro ⟨i, hi⟩ -- Porting note: Was `dsimp [ofList, toList]; rw [List.nthLe_ofFn']`. simp [ofList, toList, -List.ofFn_succ] @@ -411,7 +411,8 @@ theorem mem_eraseTop_of_ne_of_mem {s : CompositionSeries X} {x : X} (hx : x ≠ x ∈ s.eraseTop := by rcases hxs with ⟨i, rfl⟩ have hi : (i : ℕ) < (s.length - 1).succ := by - conv_rhs => rw [← Nat.succ_sub (length_pos_of_mem_ne ⟨i, rfl⟩ s.top_mem hx), Nat.succ_sub_one] + conv_rhs => rw [← Nat.succ_sub (length_pos_of_mem_ne ⟨i, rfl⟩ s.top_mem hx), + Nat.add_one_sub_one] exact lt_of_le_of_ne (Nat.le_of_lt_succ i.2) (by simpa [top, s.inj, Fin.ext_iff] using hx) refine' ⟨Fin.castSucc (n := s.length + 1) i, _⟩ simp [Fin.ext_iff, Nat.mod_eq_of_lt hi] @@ -424,7 +425,7 @@ theorem mem_eraseTop {s : CompositionSeries X} {x : X} (h : 0 < s.length) : constructor · rintro ⟨i, rfl⟩ have hi : (i : ℕ) < s.length := by - conv_rhs => rw [← Nat.succ_sub_one s.length, Nat.succ_sub h] + conv_rhs => rw [← Nat.add_one_sub_one s.length, Nat.succ_sub h] exact i.2 -- Porting note: Was `simp [top, Fin.ext_iff, ne_of_lt hi]`. simp [top, Fin.ext_iff, ne_of_lt hi, -Set.mem_range, Set.mem_range_self] @@ -440,7 +441,7 @@ theorem lt_top_of_mem_eraseTop {s : CompositionSeries X} {x : X} (h : 0 < s.leng theorem isMaximal_eraseTop_top {s : CompositionSeries X} (h : 0 < s.length) : IsMaximal s.eraseTop.top s.top := by have : s.length - 1 + 1 = s.length := by - conv_rhs => rw [← Nat.succ_sub_one s.length]; rw [Nat.succ_sub h] + conv_rhs => rw [← Nat.add_one_sub_one s.length]; rw [Nat.succ_sub h] rw [top_eraseTop, top] convert s.step ⟨s.length - 1, Nat.sub_lt h zero_lt_one⟩; ext; simp [this] #align composition_series.is_maximal_erase_top_top CompositionSeries.isMaximal_eraseTop_top diff --git a/Mathlib/Probability/Distributions/Exponential.lean b/Mathlib/Probability/Distributions/Exponential.lean new file mode 100644 index 0000000000000..be513cc6f7e84 --- /dev/null +++ b/Mathlib/Probability/Distributions/Exponential.lean @@ -0,0 +1,235 @@ +/- +Copyright (c) 2023 Claus Clausen. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Claus Clausen, Patrick Massot +-/ + +import Mathlib.Analysis.SpecialFunctions.Gaussian +import Mathlib.Probability.Notation +import Mathlib.Probability.Cdf + +/-! # Exponential distributions over ℝ + +Define the Exponential Measure over the Reals + +## Main definitions +* `exponentialPdfReal`: the function `r x ↦ r * (Real.exp (-(r * ↑x))` for `0 ≤ x` + or `0` else, which is the probability density function of a exponential distribution with + rate `r` (when `hr : 0 < r`). +* `exponentialPdf`: `ℝ≥0∞`-valued pdf, + `exponentialPdf r = ENNReal.ofReal (exponentialPdfReal r)`. +* `expMeasure`: an exponential measure on `ℝ`, parametrized by its rate `r`. +* `exponentialCdfReal`: the Cdf given by the Definition of CDF in `ProbabilityTheory.Cdf` on + +## Main results +* `exponentialCdfReal_eq`: Proof that the `exponentialCdfReal` given by the Definition equals the + known function given as `r x ↦ 1 - (Real.exp (-(r * ↑x))` for `0 ≤ x` or `0` else. +-/ + +open scoped ENNReal NNReal Real + +open MeasureTheory Real Set Filter Topology + + /-- A Lebesgue Integral from -∞ to y can be expressed as the sum of one from -∞ to 0 and 0 to x -/ +lemma lintegral_Iic_eq_lintegral_Iio_add_Icc {y z : ℝ} (f : ℝ → ℝ≥0∞) (hzy : z ≤ y) : + ∫⁻ x in Iic y, f x = (∫⁻ x in Iio z, f x) + ∫⁻ x in Icc z y, f x := by + rw [← Iio_union_Icc_eq_Iic hzy, lintegral_union measurableSet_Icc] + rw [Set.disjoint_iff] + rintro x ⟨h1 : x < _, h2, _⟩ + linarith + +lemma lintegral_eq_lintegral_Ici_add_Iio (f : ℝ → ℝ≥0∞) (c : ℝ) : + ∫⁻ x, f x = (∫⁻ x in Ici c, f x) + ∫⁻ x in Iio c, f x := by + have union : univ = {x: ℝ | x ≥ c} ∪ {x : ℝ | x < c} := by + ext x; simp [le_or_lt] + have : IsOpen {x : ℝ | x < c} := by exact isOpen_gt' c + calc + ∫⁻ x, f x = ∫⁻ x in univ, f x ∂volume := (set_lintegral_univ f).symm + _ = ∫⁻ x in {x | x ≥ c} ∪ {x | x < c} , f x ∂volume := by rw [← union] + _ = _ := by + apply lintegral_union this.measurableSet + rw [Set.disjoint_iff] + rintro x ⟨hxge : x ≥ _, hxlt : x < _⟩ + linarith + +namespace ProbabilityTheory + +section ExponentialPdf + +/-- Define the PDF of the exponential distribution depending on its rate-/ +noncomputable +def exponentialPdfReal (r x : ℝ) : ℝ := + if 0 ≤ x then r * exp (-(r * x)) else 0 + +/-- The PDF of the exponential Distribution on the extended real Numbers-/ +noncomputable +def exponentialPdf (r x : ℝ) : ℝ≥0∞ := + ENNReal.ofReal (exponentialPdfReal r x) + +lemma exponentialPdf_eq (r x : ℝ) : + exponentialPdf r x = ENNReal.ofReal (if 0 ≤ x then r * exp (-(r * x)) else 0) := rfl + +lemma exponentialPdf_of_neg {r x : ℝ} (hx : x < 0) : exponentialPdf r x = 0 := by + simp only [exponentialPdf_eq, if_neg (not_le.mpr hx), ENNReal.ofReal_zero] + +lemma exponentialPdf_of_nonneg {r x : ℝ} (hx : 0 ≤ x) : + exponentialPdf r x = ENNReal.ofReal (r * rexp (-(r * x))) := by + simp only [exponentialPdf_eq, if_pos hx] + +lemma hasDerivAt_exp_neg {r x : ℝ} (hr : 0 < r) : + HasDerivAt (fun a ↦ -1/r * exp (-(r * a))) (exp (-(r * x))) x := by + convert (((hasDerivAt_id x).const_mul (-r)).exp.const_mul (-1/r)) using 1 <;> field_simp + +lemma hasDerivAt_neg_exp_mul_exp {r x : ℝ} : + HasDerivAt (fun a ↦ -exp (-(r * a))) (r * exp (-(r * x))) x := by + convert (((hasDerivAt_id x).const_mul (-r)).exp.const_mul (-1)) using 1 + · simp only [one_mul, id_eq, neg_mul] + simp only [id_eq, neg_mul, mul_one, mul_neg, one_mul, neg_neg, mul_comm] + +/-- the Lebesgue-Integral of the exponential PDF over nonpositive Reals equals 0-/ +lemma lintegral_exponentialPdf_of_nonpos {x r : ℝ} (hx : x ≤ 0) : + ∫⁻ y in Iio x, exponentialPdf r y = 0 := by + rw [set_lintegral_congr_fun (g := fun _ ↦ 0) measurableSet_Iio] + · rw [lintegral_zero, ← ENNReal.ofReal_zero] + · simp only [exponentialPdf_eq, ge_iff_le, ENNReal.ofReal_eq_zero] + apply ae_of_all _ fun a (_ : a < _) ↦ by rw [if_neg (by linarith)] + +/-- The exponential pdf is measurable. -/ +lemma measurable_exponentialPdfReal (r : ℝ) : + Measurable (exponentialPdfReal r) := by + refine Measurable.ite ?hp ((measurable_id'.const_mul r).neg.exp.const_mul r) ?hg + · exact measurableSet_Ici + · exact measurable_const + +/-- The exponential Pdf is strongly measurable -/ +lemma stronglyMeasurable_exponentialPdfReal (r : ℝ) : + StronglyMeasurable (exponentialPdfReal r) := + (measurable_exponentialPdfReal r).stronglyMeasurable + +/-- the exponential Pdf is positive for all positive reals-/ +lemma exponentialPdfReal_pos {x r : ℝ} {hr : 0 < r} (hx : 0 < x) : + 0 < exponentialPdfReal r x := by + simp only [exponentialPdfReal, if_pos hx.le] + positivity + +/-- The exponential Pdf is nonnegative-/ +lemma exponentialPdfReal_nonneg {r : ℝ} (hr : 0 < r) (x : ℝ) : + 0 ≤ exponentialPdfReal r x := by + unfold exponentialPdfReal; split_ifs <;> positivity + +/-- A negative exponential function is integrable on Intervals in R≥0 -/ +lemma exp_neg_integrableOn_Ioc {b x : ℝ} (hb : 0 < b) : + IntegrableOn (fun x ↦ rexp (-(b * x))) (Ioc 0 x) := by + simp only [neg_mul_eq_neg_mul] + exact (exp_neg_integrableOn_Ioi _ hb).mono_set Ioc_subset_Ioi_self + +open Measure + +/-- The Pdf of the exponential Distribution integrates to 1-/ +@[simp] +lemma lintegral_exponentialPdf_eq_one (r : ℝ) (hr : 0 < r) : ∫⁻ x, exponentialPdf r x = 1 := by + rw [lintegral_eq_lintegral_Ici_add_Iio (exponentialPdf r) 0, ← ENNReal.toReal_eq_one_iff] + have leftSide : ∫⁻ x in Iio 0, exponentialPdf r x = 0 := by + rw [set_lintegral_congr_fun measurableSet_Iio + (ae_of_all _ (fun x (hx : x < 0) ↦ exponentialPdf_of_neg hx)), lintegral_zero] + have rightSide : + ∫⁻ x in Ici 0, exponentialPdf r x = ∫⁻ x in Ici 0, ENNReal.ofReal (r * rexp (-(r * x))) := by + exact set_lintegral_congr_fun isClosed_Ici.measurableSet + (ae_of_all _ (fun x (hx : 0 ≤ x) ↦ exponentialPdf_of_nonneg hx)) + simp only [leftSide, add_zero] + rw [rightSide, ENNReal.toReal_eq_one_iff, ←ENNReal.toReal_eq_one_iff] + rw [← integral_eq_lintegral_of_nonneg_ae (ae_of_all _ (fun _ ↦ by positivity))] + · have IntegrOn : IntegrableOn (fun x ↦ rexp (-(r * x))) (Ioi 0) := by + simp only [← neg_mul, exp_neg_integrableOn_Ioi 0 hr] + rw [integral_mul_left, integral_Ici_eq_integral_Ioi, + integral_Ioi_of_hasDerivAt_of_tendsto' (m:=0) (fun _ _ ↦ hasDerivAt_exp_neg hr) IntegrOn] + · field_simp + · rw [← mul_zero (-1/r)] + exact tendsto_const_nhds.mul (tendsto_exp_neg_atTop_nhds_0.comp + ((tendsto_const_mul_atTop_of_pos hr).2 tendsto_id)) + · exact ((measurable_id'.const_mul r).neg.exp.const_mul r).stronglyMeasurable.aestronglyMeasurable + +end ExponentialPdf + +open MeasureTheory + +/-- Measure defined by the exponential Distribution -/ +noncomputable +def expMeasure (r : ℝ) : Measure ℝ := + volume.withDensity (exponentialPdf r) + +instance instIsProbabilityMeasureExponential (r : ℝ) [Fact (0 < r)] : + IsProbabilityMeasure (expMeasure r) where + measure_univ := by simp [expMeasure, lintegral_exponentialPdf_eq_one r Fact.out] + +section ExponentialCdf + +/-- CDF of the exponential Distribution -/ +noncomputable +def exponentialCdfReal (r : ℝ) : StieltjesFunction := + cdf (expMeasure r) + +lemma exponentialCdfReal_eq_integral (r x : ℝ) [Fact (0 < r)] : + exponentialCdfReal r x = ∫ x in Iic x, exponentialPdfReal r x := by + rw [exponentialCdfReal,cdf_eq_toReal] + simp only [expMeasure, measurableSet_Iic, withDensity_apply] + rw [integral_eq_lintegral_of_nonneg_ae]; exact rfl + · exact ae_of_all _ fun a ↦ by simp [Pi.zero_apply, exponentialPdfReal_nonneg Fact.out a] + · exact (Measurable.aestronglyMeasurable (measurable_exponentialPdfReal r)).restrict + +lemma exponentialCdfReal_eq_lintegral (r x : ℝ) [Fact (0 < r)] : + exponentialCdfReal r x = ENNReal.toReal (∫⁻ x in Iic x, exponentialPdf r x) := by + simp only [exponentialPdf, exponentialCdfReal, cdf_eq_toReal] + simp only [expMeasure, measurableSet_Iic, withDensity_apply] + rfl + +open Topology + +lemma lintegral_exponentialPdf_eq_antiDeriv (r x : ℝ) (hr : 0 < r) : + ∫⁻ y in Iic x, exponentialPdf r y + = ENNReal.ofReal (if 0 ≤ x then 1 - exp (-(r * x)) else 0) := by + split_ifs with h + case neg => + simp only [exponentialPdf_eq] + rw [set_lintegral_congr_fun measurableSet_Iic, lintegral_zero, ENNReal.ofReal_zero] + exact ae_of_all _ fun a (_ : a ≤ _) ↦ by rw [if_neg (by linarith), ENNReal.ofReal_eq_zero] + case pos => + rw [lintegral_Iic_eq_lintegral_Iio_add_Icc _ h, lintegral_exponentialPdf_of_nonpos (le_refl 0), + zero_add] + simp only [exponentialPdf_eq] + rw[set_lintegral_congr_fun measurableSet_Icc (ae_of_all _ + (by intro a ⟨(hle : _ ≤ a), _⟩; rw [if_pos hle]))] + rw [←ENNReal.toReal_eq_toReal _ ENNReal.ofReal_ne_top, ←integral_eq_lintegral_of_nonneg_ae + (eventually_of_forall fun _ ↦ le_of_lt (mul_pos hr (exp_pos _)))] + have : ∫ a in uIoc 0 x, r * rexp (-(r * a)) = ∫ a in (0)..x, r * rexp (-(r * a)) := by + rw [intervalIntegral.intervalIntegral_eq_integral_uIoc, smul_eq_mul, if_pos h, one_mul] + rw [integral_Icc_eq_integral_Ioc, ← uIoc_of_le h, this] + rw [intervalIntegral.integral_eq_sub_of_hasDeriv_right_of_le h + (f := fun a ↦ -1 * rexp (-(r * a))) _ _] + rw [ENNReal.toReal_ofReal_eq_iff.2 (by norm_num; positivity)] + · norm_num; ring + · simp only [intervalIntegrable_iff, uIoc_of_le h] + exact Integrable.const_mul (exp_neg_integrableOn_Ioc hr) _ + · have : Continuous (fun a ↦ rexp (-(r * a))) := by + simp only [← neg_mul]; exact (continuous_mul_left (-r)).exp + exact Continuous.continuousOn (Continuous.comp' (continuous_mul_left (-1)) this) + · simp only [neg_mul, one_mul] + exact fun _ _ ↦ HasDerivAt.hasDerivWithinAt hasDerivAt_neg_exp_mul_exp + · apply Integrable.aestronglyMeasurable (Integrable.const_mul _ _) + rw [← integrableOn_def, integrableOn_Icc_iff_integrableOn_Ioc] + exact exp_neg_integrableOn_Ioc hr + · refine ne_of_lt (IntegrableOn.set_lintegral_lt_top ?_) + rw [integrableOn_Icc_iff_integrableOn_Ioc] + exact Integrable.const_mul (exp_neg_integrableOn_Ioc hr) _ + +/-- The Definition of the CDF equals the known Formular ``1 - exp (-(r * x))``-/ +lemma exponentialCdfReal_eq {r : ℝ} [Fact (0 < r)] (x : ℝ) : + exponentialCdfReal r x = if 0 ≤ x then 1 - exp (-(r * x)) else 0 := by + simp only [exponentialCdfReal_eq_lintegral, lintegral_exponentialPdf_eq_antiDeriv _ _ Fact.out, + ENNReal.toReal_ofReal_eq_iff] + split_ifs with h + · simp only [sub_nonneg, exp_le_one_iff, Left.neg_nonpos_iff, gt_iff_lt, ge_iff_le] + exact mul_nonneg (le_of_lt Fact.out) h + · simp + +end ExponentialCdf diff --git a/Mathlib/RepresentationTheory/GroupCohomology/Resolution.lean b/Mathlib/RepresentationTheory/GroupCohomology/Resolution.lean index 0b51992e29c79..d6d44c7768ac7 100644 --- a/Mathlib/RepresentationTheory/GroupCohomology/Resolution.lean +++ b/Mathlib/RepresentationTheory/GroupCohomology/Resolution.lean @@ -446,7 +446,7 @@ cover of the classifying space of `G` as a simplicial set. -/ def cechNerveTerminalFromIsoCompForget : cechNerveTerminalFrom G ≅ classifyingSpaceUniversalCover G ⋙ forget _ := NatIso.ofComponents (fun _ => Types.productIso _) fun _ => - Matrix.ext fun _ _ => Types.Limit.lift_π_apply _ _ _ _ + Matrix.ext fun _ _ => Types.Limit.lift_π_apply (Discrete.functor fun _ ↦ G) _ _ _ #align classifying_space_universal_cover.cech_nerve_terminal_from_iso_comp_forget classifyingSpaceUniversalCover.cechNerveTerminalFromIsoCompForget variable (k) diff --git a/Mathlib/RingTheory/Polynomial/Eisenstein/IsIntegral.lean b/Mathlib/RingTheory/Polynomial/Eisenstein/IsIntegral.lean index 8570397b2f578..914e39021dc72 100644 --- a/Mathlib/RingTheory/Polynomial/Eisenstein/IsIntegral.lean +++ b/Mathlib/RingTheory/Polynomial/Eisenstein/IsIntegral.lean @@ -95,7 +95,7 @@ theorem cyclotomic_prime_pow_comp_X_add_one_isEisensteinAt [hp : Fact p.Prime] ( · simp only [ite_eq_right_iff] rintro ⟨k, hk⟩ rw [natDegree_comp, show (X + 1 : ℤ[X]) = X + C 1 by simp, natDegree_X_add_C, mul_one, - natDegree_cyclotomic, Nat.totient_prime_pow hp.out (Nat.succ_pos _), Nat.succ_sub_one] + natDegree_cyclotomic, Nat.totient_prime_pow hp.out (Nat.succ_pos _), Nat.add_one_sub_one] at hn hi rw [hk, pow_succ, mul_assoc] at hi rw [hk, mul_comm, Nat.mul_div_cancel _ hp.out.pos] diff --git a/Mathlib/RingTheory/Polynomial/Hermite/Basic.lean b/Mathlib/RingTheory/Polynomial/Hermite/Basic.lean index a9439a14dd0c0..5eb12b5e3804c 100644 --- a/Mathlib/RingTheory/Polynomial/Hermite/Basic.lean +++ b/Mathlib/RingTheory/Polynomial/Hermite/Basic.lean @@ -156,7 +156,7 @@ theorem coeff_hermite_explicit : | n + 1, 0 => by convert coeff_hermite_succ_zero (2 * n + 1) using 1 -- porting note: ring_nf did not solve the goal on line 165 - rw [coeff_hermite_explicit n 1, (by rw [Nat.left_distrib, mul_one, Nat.succ_sub_one] : + rw [coeff_hermite_explicit n 1, (by rw [Nat.left_distrib, mul_one, Nat.add_one_sub_one] : 2 * (n + 1) - 1 = 2 * n + 1), Nat.doubleFactorial_add_one, Nat.choose_zero_right, Nat.choose_one_right, pow_succ] push_cast @@ -178,7 +178,7 @@ theorem coeff_hermite_explicit : -- Factor out double factorials. norm_cast -- porting note: ring_nf did not solve the goal on line 186 - rw [(by rw [Nat.left_distrib, mul_one, Nat.succ_sub_one] : 2 * (n + 1) - 1 = 2 * n + 1), + rw [(by rw [Nat.left_distrib, mul_one, Nat.add_one_sub_one] : 2 * (n + 1) - 1 = 2 * n + 1), Nat.doubleFactorial_add_one, mul_comm (2 * n + 1)] simp only [mul_assoc, ← mul_add] congr 1 diff --git a/Mathlib/RingTheory/Polynomial/Vieta.lean b/Mathlib/RingTheory/Polynomial/Vieta.lean index af37463a4db49..fc49834d5d851 100644 --- a/Mathlib/RingTheory/Polynomial/Vieta.lean +++ b/Mathlib/RingTheory/Polynomial/Vieta.lean @@ -68,7 +68,7 @@ theorem prod_X_add_C_coeff (s : Multiset R) {k : ℕ} (h : k ≤ Multiset.card s rw [hn, Nat.sub_sub_self (Nat.lt_succ_iff.mp (Finset.mem_range.mp hj1))] at hj2 exact Ne.irrefl hj2 · rw [Finset.mem_range] - exact Nat.sub_lt_succ (Multiset.card s) k + exact Nat.lt_succ_of_le (Nat.sub_le (Multiset.card s) k) set_option linter.uppercaseLean3 false in #align multiset.prod_X_add_C_coeff Multiset.prod_X_add_C_coeff diff --git a/Mathlib/RingTheory/TensorProduct.lean b/Mathlib/RingTheory/TensorProduct.lean index dcc947d7990a2..133f63a7e3aeb 100644 --- a/Mathlib/RingTheory/TensorProduct.lean +++ b/Mathlib/RingTheory/TensorProduct.lean @@ -64,7 +64,9 @@ variable (r : R) (f g : M →ₗ[R] N) variable (A) -/-- `baseChange A f` for `f : M →ₗ[R] N` is the `A`-linear map `A ⊗[R] M →ₗ[A] A ⊗[R] N`. -/ +/-- `baseChange A f` for `f : M →ₗ[R] N` is the `A`-linear map `A ⊗[R] M →ₗ[A] A ⊗[R] N`. + +This "base change" operation is also known as "extension of scalars". -/ def baseChange (f : M →ₗ[R] N) : A ⊗[R] M →ₗ[A] A ⊗[R] N := AlgebraTensorModule.map (LinearMap.id : A →ₗ[A] A) f #align linear_map.base_change LinearMap.baseChange @@ -99,6 +101,10 @@ theorem baseChange_smul : (r • f).baseChange A = r • f.baseChange A := by simp [baseChange_tmul] #align linear_map.base_change_smul LinearMap.baseChange_smul +lemma baseChange_comp {P : Type*} [AddCommMonoid P] [Module R P] (g : N →ₗ[R] P) : + (g ∘ₗ f).baseChange A = g.baseChange A ∘ₗ f.baseChange A := by + ext; simp + variable (R A M N) /-- `baseChange` as a linear map. -/ @@ -1084,8 +1090,11 @@ theorem basis_repr_symm_apply (a : A) (i : ι) : rw [basis, LinearEquiv.coe_symm_mk] -- porting note: `coe_symm_mk` isn't firing in `simp` simp [Equiv.uniqueProd_symm_apply, basisAux] --- Porting note: simpNF linter failed on `basis_repr_symm_apply` @[simp] +theorem basis_apply (i : ι) : + Algebra.TensorProduct.basis A b i = 1 ⊗ₜ b i := + Algebra.TensorProduct.basis_repr_symm_apply b 1 i + theorem basis_repr_symm_apply' (a : A) (i : ι) : a • Algebra.TensorProduct.basis A b i = a ⊗ₜ b i := by simpa using basis_repr_symm_apply b a i @@ -1096,6 +1105,23 @@ end TensorProduct end Algebra +namespace LinearMap + +open Algebra.TensorProduct + +variable {R M₁ M₂ ι ι₂ : Type*} (A : Type*) + [Fintype ι] [Fintype ι₂] [DecidableEq ι] [DecidableEq ι₂] + [CommRing R] [CommRing A] [Algebra R A] + [AddCommGroup M₁] [Module R M₁] [AddCommGroup M₂] [Module R M₂] + +@[simp] +lemma toMatrix_baseChange (f : M₁ →ₗ[R] M₂) (b₁ : Basis ι R M₁) (b₂ : Basis ι₂ R M₂) : + toMatrix (basis A b₁) (basis A b₂) (f.baseChange A) = + (toMatrix b₁ b₂ f).map (algebraMap R A) := by + ext; simp [toMatrix_apply] + +end LinearMap + namespace Module variable {R S A M N : Type*} [CommSemiring R] [CommSemiring S] [Semiring A] diff --git a/Mathlib/RingTheory/Trace.lean b/Mathlib/RingTheory/Trace.lean index f807313dfe4eb..b7716c358f008 100644 --- a/Mathlib/RingTheory/Trace.lean +++ b/Mathlib/RingTheory/Trace.lean @@ -118,8 +118,8 @@ theorem trace_algebraMap_of_basis (x : R) : trace R S (algebraMap R S x) = Finty haveI := Classical.decEq ι rw [trace_apply, LinearMap.trace_eq_matrix_trace R b, Matrix.trace] convert Finset.sum_const x --- Porting note: was `simp [-coe_lmul_eq_mul]`. - simp only [AlgHom.commutes, toMatrix_algebraMap, diag_apply, Matrix.scalar_apply_eq] + -- Porting note: was `simp [-coe_lmul_eq_mul]`. + simp only [AlgHom.commutes, toMatrix_algebraMap, diag_apply, scalar_apply, diagonal_apply_eq] #align algebra.trace_algebra_map_of_basis Algebra.trace_algebraMap_of_basis diff --git a/Mathlib/SetTheory/Cardinal/UnivLE.lean b/Mathlib/SetTheory/Cardinal/UnivLE.lean new file mode 100644 index 0000000000000..9d6110d04cd1c --- /dev/null +++ b/Mathlib/SetTheory/Cardinal/UnivLE.lean @@ -0,0 +1,31 @@ +/- +Copyright (c) 2023 Junyan Xu. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Junyan Xu +-/ +import Mathlib.Logic.UnivLE +import Mathlib.SetTheory.Ordinal.Basic + +/-! +# UnivLE and cardinals +-/ + +set_option autoImplicit true + +noncomputable section + +open Cardinal + +theorem univLE_iff_cardinal_le : UnivLE.{u, v} ↔ univ.{u, v+1} ≤ univ.{v, u+1} := by + rw [← not_iff_not, UnivLE]; simp_rw [small_iff_lift_mk_lt_univ]; push_neg + -- strange: simp_rw [univ_umax.{v,u}] doesn't work + refine ⟨fun ⟨α, le⟩ ↦ ?_, fun h ↦ ?_⟩ + · rw [univ_umax.{v,u}, ← lift_le.{u+1}, lift_univ, lift_lift] at le + exact le.trans_lt (lift_lt_univ'.{u,v+1} #α) + · obtain ⟨⟨α⟩, h⟩ := lt_univ'.mp h; use α + rw [univ_umax.{v,u}, ← lift_le.{u+1}, lift_univ, lift_lift] + exact h.le + +/-- Together with transitivity, this shows UnivLE "IsTotalPreorder". -/ +theorem univLE_total : UnivLE.{u, v} ∨ UnivLE.{v, u} := by + simp_rw [univLE_iff_cardinal_le]; apply le_total diff --git a/Mathlib/SetTheory/Ordinal/Basic.lean b/Mathlib/SetTheory/Ordinal/Basic.lean index af02590d43ff3..9990ead9707ff 100644 --- a/Mathlib/SetTheory/Ordinal/Basic.lean +++ b/Mathlib/SetTheory/Ordinal/Basic.lean @@ -1224,7 +1224,7 @@ theorem enum_zero_eq_bot {o : Ordinal} (ho : 0 < o) : -- intended to be used with explicit universe parameters /-- `univ.{u v}` is the order type of the ordinals of `Type u` as a member of `Ordinal.{v}` (when `u < v`). It is an inaccessible cardinal. -/ - @[nolint checkUnivs] +@[pp_with_univ, nolint checkUnivs] def univ : Ordinal.{max (u + 1) v} := lift.{v, u + 1} (@type Ordinal (· < ·) _) #align ordinal.univ Ordinal.univ @@ -1475,7 +1475,7 @@ theorem ord.orderEmbedding_coe : (ord.orderEmbedding : Cardinal → Ordinal) = o /-- The cardinal `univ` is the cardinality of ordinal `univ`, or equivalently the cardinal of `Ordinal.{u}`, or `Cardinal.{u}`, as an element of `Cardinal.{v}` (when `u < v`). -/ -@[nolint checkUnivs] +@[pp_with_univ, nolint checkUnivs] def univ := lift.{v, u + 1} #Ordinal #align cardinal.univ Cardinal.univ diff --git a/Mathlib/Tactic.lean b/Mathlib/Tactic.lean index a78a971811000..79d9f59c83cbc 100644 --- a/Mathlib/Tactic.lean +++ b/Mathlib/Tactic.lean @@ -97,13 +97,13 @@ import Mathlib.Tactic.NormNum import Mathlib.Tactic.NormNum.Basic import Mathlib.Tactic.NormNum.BigOperators import Mathlib.Tactic.NormNum.Core +import Mathlib.Tactic.NormNum.DivMod import Mathlib.Tactic.NormNum.Eq import Mathlib.Tactic.NormNum.GCD import Mathlib.Tactic.NormNum.Ineq import Mathlib.Tactic.NormNum.Inv import Mathlib.Tactic.NormNum.IsCoprime import Mathlib.Tactic.NormNum.LegendreSymbol -import Mathlib.Tactic.NormNum.Mod import Mathlib.Tactic.NormNum.NatFib import Mathlib.Tactic.NormNum.NatSqrt import Mathlib.Tactic.NormNum.OfScientific @@ -119,6 +119,7 @@ import Mathlib.Tactic.Polyrith import Mathlib.Tactic.Positivity import Mathlib.Tactic.Positivity.Basic import Mathlib.Tactic.Positivity.Core +import Mathlib.Tactic.ProdAssoc import Mathlib.Tactic.ProjectionNotation import Mathlib.Tactic.Propose import Mathlib.Tactic.ProxyType diff --git a/Mathlib/Tactic/Core.lean b/Mathlib/Tactic/Core.lean index 37bcc3a92702a..d0b89a09f5755 100644 --- a/Mathlib/Tactic/Core.lean +++ b/Mathlib/Tactic/Core.lean @@ -247,7 +247,7 @@ open Lean /-- Returns the root directory which contains the package root file, e.g. `Mathlib.lean`. -/ def getPackageDir (pkg : String) : IO System.FilePath := do - let sp ← initSrcSearchPath (← findSysroot) + let sp ← initSrcSearchPath let root? ← sp.findM? fun p => (p / pkg).isDir <||> ((p / pkg).withExtension "lean").pathExists if let some root := root? then return root diff --git a/Mathlib/Tactic/FailIfNoProgress.lean b/Mathlib/Tactic/FailIfNoProgress.lean index 96aaf76cf0e14..04e85daed6574 100644 --- a/Mathlib/Tactic/FailIfNoProgress.lean +++ b/Mathlib/Tactic/FailIfNoProgress.lean @@ -35,29 +35,45 @@ or the local context at reducible transparency. -/ syntax (name := failIfNoProgress) "fail_if_no_progress " tacticSeq : tactic /-- `lctxIsDefEq l₁ l₂` compares two lists of `Option LocalDecl`s (as returned from e.g. -`(← (← getMainGoal).getDecl).lctx.decls.toList`). It returns `true` if they contain expressions of -the same type in the same order (up to defeq), and `false` otherwise. -/ +`(← (← getMainGoal).getDecl).lctx.decls.toList`). It returns `true` if they have the same +local declarations in the same order (up to defeq, without setting mvars), and `false` otherwise. + +Assumption: this function is run with one of the local contexts as the current `MetaM` local +context, and one of the two lists consists of the `LocalDecl`s of that context. -/ def lctxIsDefEq : (l₁ l₂ : List (Option LocalDecl)) → MetaM Bool + | none :: l₁, l₂ => lctxIsDefEq l₁ l₂ + | l₁, none :: l₂ => lctxIsDefEq l₁ l₂ | some d₁ :: l₁, some d₂ :: l₂ => do + unless d₁.isLet == d₂.isLet do + return false + unless d₁.fvarId == d₂.fvarId do + -- Without compatible fvarids, `isDefEq` checks for later entries will not make sense + return false unless (← withNewMCtxDepth <| isDefEq d₁.type d₂.type) do return false + if d₁.isLet then + unless (← withNewMCtxDepth <| isDefEq d₁.value d₂.value) do + return false lctxIsDefEq l₁ l₂ - | none :: l₁, none :: l₂ => lctxIsDefEq l₁ l₂ | [], [] => return true | _, _ => return false +termination_by _ l₁ l₂ => l₁.length + l₂.length /-- Run `tacs : TacticM Unit` on `goal`, and fail if no progress is made. -/ def runAndFailIfNoProgress (goal : MVarId) (tacs : TacticM Unit) : TacticM (List MVarId) := do let l ← run goal tacs try let [newGoal] := l | failure - guard <|← withNewMCtxDepth <| withReducible <| isDefEq (← newGoal.getType) (← goal.getType) - let ctxDecls := (← goal.getDecl).lctx.decls.toList - let newCtxDecls := (← newGoal.getDecl).lctx.decls.toList - guard <|← withNewMCtxDepth <| withReducible <| lctxIsDefEq ctxDecls newCtxDecls + goal.withContext do + -- Check that the local contexts are compatible + let ctxDecls := (← goal.getDecl).lctx.decls.toList + let newCtxDecls := (← newGoal.getDecl).lctx.decls.toList + guard <|← withNewMCtxDepth <| withReducible <| lctxIsDefEq ctxDecls newCtxDecls + -- They are compatible, so now we can check that the goals are equivalent + guard <|← withNewMCtxDepth <| withReducible <| isDefEq (← newGoal.getType) (← goal.getType) catch _ => return l - throwError "no progress made on {goal}" + throwError "no progress made on\n{goal}" elab_rules : tactic | `(tactic| fail_if_no_progress $tacs) => do diff --git a/Mathlib/Tactic/Linarith/Frontend.lean b/Mathlib/Tactic/Linarith/Frontend.lean index d12feba9190e3..04b01eadfbbe6 100644 --- a/Mathlib/Tactic/Linarith/Frontend.lean +++ b/Mathlib/Tactic/Linarith/Frontend.lean @@ -221,7 +221,7 @@ If `prefType` is given, it will first use the class of proofs of comparisons ove -- If it succeeds, the passed metavariable should have been assigned. def runLinarith (cfg : LinarithConfig) (prefType : Option Expr) (g : MVarId) (hyps : List Expr) : MetaM Unit := do - let singleProcess (g : MVarId) (hyps : List Expr) : MetaM Expr := do + let singleProcess (g : MVarId) (hyps : List Expr) : MetaM Expr := g.withContext do linarithTraceProofs s!"after preprocessing, linarith has {hyps.length} facts:" hyps let hyp_set ← partitionByType hyps trace[linarith] "hypotheses appear in {hyp_set.size} different types" @@ -230,10 +230,11 @@ def runLinarith (cfg : LinarithConfig) (prefType : Option Expr) (g : MVarId) proveFalseByLinarith cfg g vs <|> findLinarithContradiction cfg g ((hyp_set.eraseIdx i).toList.map (·.2)) else findLinarithContradiction cfg g (hyp_set.toList.map (·.2)) - let preprocessors := - (if cfg.splitHypotheses then [Linarith.splitConjunctions.globalize.branching] else []) ++ - cfg.preprocessors.getD defaultPreprocessors - let preprocessors := if cfg.splitNe then Linarith.removeNe::preprocessors else preprocessors + let mut preprocessors := cfg.preprocessors.getD defaultPreprocessors + if cfg.splitNe then + preprocessors := Linarith.removeNe :: preprocessors + if cfg.splitHypotheses then + preprocessors := Linarith.splitConjunctions.globalize.branching :: preprocessors let branches ← preprocess preprocessors g hyps for (g, es) in branches do let r ← singleProcess g es @@ -263,7 +264,7 @@ if it does not succeed at doing this. it will unfold semireducible definitions when trying to match atomic expressions. -/ partial def linarith (only_on : Bool) (hyps : List Expr) (cfg : LinarithConfig := {}) - (g : MVarId) : MetaM Unit := do + (g : MVarId) : MetaM Unit := g.withContext do -- if the target is an equality, we run `linarith` twice, to prove ≤ and ≥. if (← whnfR (← instantiateMVars (← g.getType))).isEq then trace[linarith] "target is an equality: splitting" @@ -389,18 +390,23 @@ syntax (name := nlinarith) "nlinarith" "!"? linarithArgsRest : tactic @[inherit_doc nlinarith] macro "nlinarith!" rest:linarithArgsRest : tactic => `(tactic| nlinarith ! $rest:linarithArgsRest) +/-- Elaborate `t` in a way that is suitable for linarith. -/ +def elabLinarithArg (tactic : Name) (t : Term) : TacticM Expr := Term.withoutErrToSorry do + let (e, mvars) ← elabTermWithHoles t none tactic + unless mvars.isEmpty do + throwErrorAt t "Argument passed to {tactic} has metavariables:{indentD e}" + return e + /-- Allow elaboration of `LinarithConfig` arguments to tactics. -/ declare_config_elab elabLinarithConfig Linarith.LinarithConfig elab_rules : tactic - | `(tactic| linarith $[!%$bang]? $[$cfg]? $[only%$o]? $[[$args,*]]?) => - withMainContext do commitIfNoEx do - liftMetaFinishingTactic <| - Linarith.linarith o.isSome - (← ((args.map (TSepArray.getElems)).getD {}).mapM (elabTerm ·.raw none)).toList - ((← elabLinarithConfig (mkOptionalNode cfg)).updateReducibility bang.isSome) + | `(tactic| linarith $[!%$bang]? $[$cfg]? $[only%$o]? $[[$args,*]]?) => withMainContext do + let args ← ((args.map (TSepArray.getElems)).getD {}).mapM (elabLinarithArg `linarith) + let cfg := (← elabLinarithConfig (mkOptionalNode cfg)).updateReducibility bang.isSome + commitIfNoEx do liftMetaFinishingTactic <| Linarith.linarith o.isSome args.toList cfg -- TODO restore this when `add_tactic_doc` is ported -- add_tactic_doc @@ -413,15 +419,12 @@ open Linarith elab_rules : tactic | `(tactic| nlinarith $[!%$bang]? $[$cfg]? $[only%$o]? $[[$args,*]]?) => withMainContext do - let cfg ← elabLinarithConfig (mkOptionalNode cfg) - let cfg := - { cfg with + let args ← ((args.map (TSepArray.getElems)).getD {}).mapM (elabLinarithArg `nlinarith) + let cfg := (← elabLinarithConfig (mkOptionalNode cfg)).updateReducibility bang.isSome + let cfg := { cfg with preprocessors := some (cfg.preprocessors.getD defaultPreprocessors ++ [(nlinarithExtras : GlobalBranchingPreprocessor)]) } - liftMetaFinishingTactic <| - Linarith.linarith o.isSome - (← ((args.map (TSepArray.getElems)).getD {}).mapM (elabTerm ·.raw none)).toList - (cfg.updateReducibility bang.isSome) + commitIfNoEx do liftMetaFinishingTactic <| Linarith.linarith o.isSome args.toList cfg -- TODO restore this when `add_tactic_doc` is ported -- add_tactic_doc diff --git a/Mathlib/Tactic/Linarith/Preprocessing.lean b/Mathlib/Tactic/Linarith/Preprocessing.lean index 6deb920c81354..0f1be1888fdad 100644 --- a/Mathlib/Tactic/Linarith/Preprocessing.lean +++ b/Mathlib/Tactic/Linarith/Preprocessing.lean @@ -379,7 +379,7 @@ This produces `2^n` branches when there are `n` such hypotheses in the input. -/ partial def removeNe_aux : MVarId → List Expr → MetaM (List Branch) := fun g hs => do let some (e, α, a, b) ← hs.findSomeM? (fun e : Expr => do - let some (α, a, b) := (← inferType e).ne?' | return none + let some (α, a, b) := (← instantiateMVars (← inferType e)).ne?' | return none return some (e, α, a, b)) | return [(g, hs)] let [ng1, ng2] ← g.apply (← mkAppOptM ``Or.elim #[none, none, ← g.getType, ← mkAppOptM ``lt_or_gt_of_ne #[α, none, a, b, e]]) | failure @@ -416,7 +416,7 @@ Note that a preprocessor may produce multiple or no expressions from each input so the size of the list may change. -/ def preprocess (pps : List GlobalBranchingPreprocessor) (g : MVarId) (l : List Expr) : - MetaM (List Branch) := + MetaM (List Branch) := g.withContext <| pps.foldlM (fun ls pp => return (← ls.mapM fun (g, l) => do pp.process g l).join) [(g, l)] end Linarith diff --git a/Mathlib/Tactic/NormNum.lean b/Mathlib/Tactic/NormNum.lean index a849528599190..9f697e5a8180a 100644 --- a/Mathlib/Tactic/NormNum.lean +++ b/Mathlib/Tactic/NormNum.lean @@ -4,4 +4,5 @@ import Mathlib.Tactic.NormNum.Eq import Mathlib.Tactic.NormNum.Ineq import Mathlib.Tactic.NormNum.Pow import Mathlib.Tactic.NormNum.Inv +import Mathlib.Tactic.NormNum.DivMod import Mathlib.Data.Rat.Cast.Order diff --git a/Mathlib/Tactic/NormNum/Basic.lean b/Mathlib/Tactic/NormNum/Basic.lean index e50141c4cf4de..d2ed3b65d55e5 100644 --- a/Mathlib/Tactic/NormNum/Basic.lean +++ b/Mathlib/Tactic/NormNum/Basic.lean @@ -31,6 +31,8 @@ open Meta namespace Meta.NormNum open Qq +theorem IsInt.raw_refl (n : ℤ) : IsInt n n := ⟨rfl⟩ + /-! # Constructors and constants -/ theorem isNat_zero (α) [AddMonoidWithOne α] : IsNat (Zero.zero : α) (nat_lit 0) := @@ -72,8 +74,8 @@ theorem isNat_intOfNat : {n n' : ℕ} → IsNat n n' → IsNat (Int.ofNat n) n' @[norm_num Int.ofNat _] def evalIntOfNat : NormNumExt where eval {u α} e := do let .app (.const ``Int.ofNat _) (n : Q(ℕ)) ← whnfR e | failure haveI' : u =QL 0 := ⟨⟩; haveI' : $α =Q Int := ⟨⟩ - let sℕ : Q(AddMonoidWithOne ℕ) := q(AddCommMonoidWithOne.toAddMonoidWithOne) - let sℤ : Q(AddMonoidWithOne ℤ) := q(AddGroupWithOne.toAddMonoidWithOne) + let sℕ : Q(AddMonoidWithOne ℕ) := q(instAddMonoidWithOneNat) + let sℤ : Q(AddMonoidWithOne ℤ) := q(instAddMonoidWithOne) let ⟨n', p⟩ ← deriveNat n sℕ haveI' x : $e =Q Int.ofNat $n := ⟨⟩ return .isNat sℤ n' q(isNat_intOfNat $p) @@ -496,7 +498,7 @@ such that `norm_num` successfully recognises both `a` and `b`. -/ @[norm_num (_ : ℕ) - _, Sub.sub (_ : ℕ) _, Nat.sub _ _] def evalNatSub : NormNumExt where eval {u α} e := do let .app (.app f (a : Q(ℕ))) (b : Q(ℕ)) ← whnfR e | failure - -- We trust that the default instance for `HSub` is `Nat.sub` when the first parameter is `ℕ`. + -- We assert that the default instance for `HSub` is `Nat.sub` when the first parameter is `ℕ`. guard <|← withNewMCtxDepth <| isDefEq f q(HSub.hSub (α := ℕ)) haveI' : u =QL 0 := ⟨⟩; haveI' : $α =Q ℕ := ⟨⟩ haveI' : $e =Q $a - $b := ⟨⟩ @@ -517,7 +519,7 @@ such that `norm_num` successfully recognises both `a` and `b`. -/ let .app (.app f (a : Q(ℕ))) (b : Q(ℕ)) ← whnfR e | failure haveI' : u =QL 0 := ⟨⟩; haveI' : $α =Q ℕ := ⟨⟩ haveI' : $e =Q $a % $b := ⟨⟩ - -- We trust that the default instance for `HMod` is `Nat.mod` when the first parameter is `ℕ`. + -- We assert that the default instance for `HMod` is `Nat.mod` when the first parameter is `ℕ`. guard <|← withNewMCtxDepth <| isDefEq f q(HMod.hMod (α := ℕ)) let sℕ : Q(AddMonoidWithOne ℕ) := q(instAddMonoidWithOneNat) let ⟨na, pa⟩ ← deriveNat a sℕ; let ⟨nb, pb⟩ ← deriveNat b sℕ @@ -531,15 +533,40 @@ theorem isNat_natDiv : {a b : ℕ} → {a' b' c : ℕ} → /-- The `norm_num` extension which identifies expressions of the form `Nat.div a b`, such that `norm_num` successfully recognises both `a` and `b`. -/ -@[norm_num (_ : ℕ) / _, Div.div (_ : ℕ) _, Nat.div _ _] def evalNatDiv : - NormNumExt where eval {u α} e := do +@[norm_num (_ : ℕ) / _, Div.div (_ : ℕ) _, Nat.div _ _] +def evalNatDiv : NormNumExt where eval {u α} e := do let .app (.app f (a : Q(ℕ))) (b : Q(ℕ)) ← whnfR e | failure haveI' : u =QL 0 := ⟨⟩; haveI' : $α =Q ℕ := ⟨⟩ haveI' : $e =Q $a / $b := ⟨⟩ - -- We trust that the default instance for `HDiv` is `Nat.div` when the first parameter is `ℕ`. + -- We assert that the default instance for `HDiv` is `Nat.div` when the first parameter is `ℕ`. guard <|← withNewMCtxDepth <| isDefEq f q(HDiv.hDiv (α := ℕ)) let sℕ : Q(AddMonoidWithOne ℕ) := q(instAddMonoidWithOneNat) let ⟨na, pa⟩ ← deriveNat a sℕ; let ⟨nb, pb⟩ ← deriveNat b sℕ have nc : Q(ℕ) := mkRawNatLit (na.natLit! / nb.natLit!) haveI' : Nat.div $na $nb =Q $nc := ⟨⟩ return .isNat sℕ nc q(isNat_natDiv $pa $pb (.refl $nc)) + +theorem isNat_dvd_true : {a b : ℕ} → {a' b' : ℕ} → + IsNat a a' → IsNat b b' → Nat.mod b' a' = nat_lit 0 → a ∣ b + | _, _, _, _, ⟨rfl⟩, ⟨rfl⟩, e => Nat.dvd_of_mod_eq_zero e + +theorem isNat_dvd_false : {a b : ℕ} → {a' b' c : ℕ} → + IsNat a a' → IsNat b b' → Nat.mod b' a' = Nat.succ c → ¬a ∣ b + | _, _, _, _, c, ⟨rfl⟩, ⟨rfl⟩, e => mt Nat.mod_eq_zero_of_dvd (e.symm ▸ Nat.succ_ne_zero c :) + +/-- The `norm_num` extension which identifies expressions of the form `(a : ℕ) | b`, +such that `norm_num` successfully recognises both `a` and `b`. -/ +@[norm_num (_ : ℕ) ∣ _] def evalNatDvd : NormNumExt where eval {u α} e := do + let .app (.app f (a : Q(ℕ))) (b : Q(ℕ)) ← whnfR e | failure + -- We assert that the default instance for `Dvd` is `Nat.dvd` when the first parameter is `ℕ`. + guard <|← withNewMCtxDepth <| isDefEq f q(Dvd.dvd (α := ℕ)) + let sℕ : Q(AddMonoidWithOne ℕ) := q(instAddMonoidWithOneNat) + let ⟨na, pa⟩ ← deriveNat a sℕ; let ⟨nb, pb⟩ ← deriveNat b sℕ + match nb.natLit! % na.natLit! with + | 0 => + have : Q(Nat.mod $nb $na = nat_lit 0) := (q(Eq.refl (nat_lit 0)) : Expr) + return .isTrue q(isNat_dvd_true $pa $pb $this) + | c+1 => + have nc : Q(ℕ) := mkRawNatLit c + have : Q(Nat.mod $nb $na = Nat.succ $nc) := (q(Eq.refl (Nat.succ $nc)) : Expr) + return .isFalse q(isNat_dvd_false $pa $pb $this) diff --git a/Mathlib/Tactic/NormNum/DivMod.lean b/Mathlib/Tactic/NormNum/DivMod.lean new file mode 100644 index 0000000000000..a1ec060f31f45 --- /dev/null +++ b/Mathlib/Tactic/NormNum/DivMod.lean @@ -0,0 +1,175 @@ +/- +Copyright (c) 2023 Anne Baanen. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Anne Baanen, Mario Carneiro +-/ +import Mathlib.Tactic.NormNum.Basic +import Mathlib.Tactic.NormNum.Ineq + +/-! +# `norm_num` extension for integer div/mod and divides + +This file adds support for the `%`, `/`, and `∣` (divisibility) operators on `ℤ` +to the `norm_num` tactic. +-/ + +set_option autoImplicit true + +namespace Mathlib +open Lean hiding Rat mkRat +open Meta + +namespace Meta.NormNum +open Qq + +lemma isInt_ediv_zero : ∀ {a b r : ℤ}, IsInt a r → IsNat b (nat_lit 0) → IsNat (a / b) (nat_lit 0) + | _, _, _, ⟨rfl⟩, ⟨rfl⟩ => ⟨by simp [Int.ediv_zero]⟩ + +lemma isInt_ediv {a b q m a' : ℤ} {b' r : ℕ} + (ha : IsInt a a') (hb : IsNat b b') + (hm : q * b' = m) (h : r + m = a') (h₂ : Nat.blt r b' = true) : + IsInt (a / b) q := ⟨by + obtain ⟨⟨rfl⟩, ⟨rfl⟩⟩ := ha, hb + simp only [Nat.blt_eq] at h₂; simp only [← h, ← hm, Int.cast_id] + rw [Int.add_mul_ediv_right _ _ (Int.ofNat_ne_zero.2 ((Nat.zero_le ..).trans_lt h₂).ne')] + rw [Int.ediv_eq_zero_of_lt, zero_add] <;> [simp; simpa using h₂]⟩ + +lemma isInt_ediv_neg {a b q : ℤ} (h : IsInt (a / -b) q) (hq : -q = q') : IsInt (a / b) q' := + ⟨by rw [Int.cast_id, ← hq, ← @Int.cast_id q, ← h.out, ← Int.ediv_neg, Int.neg_neg]⟩ + +lemma isNat_neg_of_isNegNat {a : ℤ} {b : ℕ} (h : IsInt a (.negOfNat b)) : IsNat (-a) b := + ⟨by simp [h.out]⟩ + +/-- The `norm_num` extension which identifies expressions of the form `Int.ediv a b`, +such that `norm_num` successfully recognises both `a` and `b`. -/ +@[norm_num (_ : ℤ) / _, Div.div (_ : ℤ) _, Int.ediv _ _] +partial def evalIntDiv : NormNumExt where eval {u α} e := do + let .app (.app f (a : Q(ℤ))) (b : Q(ℤ)) ← whnfR e | failure + -- We assert that the default instance for `HDiv` is `Int.div` when the first parameter is `ℤ`. + guard <|← withNewMCtxDepth <| isDefEq f q(HDiv.hDiv (α := ℤ)) + haveI' : u =QL 0 := ⟨⟩; haveI' : $α =Q ℤ := ⟨⟩ + haveI' : $e =Q ($a / $b) := ⟨⟩ + let rℤ : Q(Ring ℤ) := q(Int.instRingInt) + let ⟨za, na, pa⟩ ← (← derive a).toInt rℤ + match ← derive (u := .zero) b with + | .isNat inst nb pb => + assumeInstancesCommute + if nb.natLit! == 0 then + have _ : $nb =Q nat_lit 0 := ⟨⟩ + return .isNat q(instAddMonoidWithOne) q(nat_lit 0) q(isInt_ediv_zero $pa $pb) + else + let ⟨zq, q, p⟩ := core a na za pa b nb pb + return .isInt rℤ q zq p + | .isNegNat _ nb pb => + assumeInstancesCommute + let ⟨zq, q, p⟩ := core a na za pa q(-$b) nb q(isNat_neg_of_isNegNat $pb) + have q' := mkRawIntLit (-zq) + have : Q(-$q = $q') := (q(Eq.refl $q') :) + return .isInt rℤ q' (-zq) q(isInt_ediv_neg $p $this) + | _ => failure +where + /-- Given a result for evaluating `a b` in `ℤ` where `b > 0`, evaluate `a / b`. -/ + core (a na : Q(ℤ)) (za : ℤ) (pa : Q(IsInt $a $na)) + (b : Q(ℤ)) (nb : Q(ℕ)) (pb : Q(IsNat $b $nb)) : + ℤ × (q : Q(ℤ)) × Q(IsInt ($a / $b) $q) := + let b := nb.natLit! + let q := za / b + have nq := mkRawIntLit q + let r := za.natMod b + have nr : Q(ℕ) := mkRawNatLit r + let m := q * b + have nm := mkRawIntLit m + have pf₁ : Q($nq * $nb = $nm) := (q(Eq.refl $nm) :) + have pf₂ : Q($nr + $nm = $na) := (q(Eq.refl $na) :) + have pf₃ : Q(Nat.blt $nr $nb = true) := (q(Eq.refl true) :) + ⟨q, nq, q(isInt_ediv $pa $pb $pf₁ $pf₂ $pf₃)⟩ + +lemma isInt_emod_zero : ∀ {a b r : ℤ}, IsInt a r → IsNat b (nat_lit 0) → IsInt (a % b) r + | _, _, _, e, ⟨rfl⟩ => by simp [e] + +lemma isInt_emod {a b q m a' : ℤ} {b' r : ℕ} + (ha : IsInt a a') (hb : IsNat b b') + (hm : q * b' = m) (h : r + m = a') (h₂ : Nat.blt r b' = true) : + IsNat (a % b) r := ⟨by + obtain ⟨⟨rfl⟩, ⟨rfl⟩⟩ := ha, hb + simp only [← h, ← hm, Int.add_mul_emod_self] + rw [Int.emod_eq_of_lt] <;> [simp; simpa using h₂]⟩ + +lemma isInt_emod_neg {a b : ℤ} {r : ℕ} (h : IsNat (a % -b) r) : IsNat (a % b) r := + ⟨by rw [← Int.emod_neg, h.out]⟩ + +/-- The `norm_num` extension which identifies expressions of the form `Int.emod a b`, +such that `norm_num` successfully recognises both `a` and `b`. -/ +@[norm_num (_ : ℤ) % _, Mod.mod (_ : ℤ) _, Int.emod _ _] +partial def evalIntMod : NormNumExt where eval {u α} e := do + let .app (.app f (a : Q(ℤ))) (b : Q(ℤ)) ← whnfR e | failure + -- We assert that the default instance for `HMod` is `Int.mod` when the first parameter is `ℤ`. + guard <|← withNewMCtxDepth <| isDefEq f q(HMod.hMod (α := ℤ)) + haveI' : u =QL 0 := ⟨⟩; haveI' : $α =Q ℤ := ⟨⟩ + haveI' : $e =Q ($a % $b) := ⟨⟩ + let rℤ : Q(Ring ℤ) := q(Int.instRingInt) + let some ⟨za, na, pa⟩ := (← derive a).toInt rℤ | failure + go a na za pa b (← derive (u := .zero) b) +where + /-- Given a result for evaluating `a b` in `ℤ`, evaluate `a % b`. -/ + go (a na : Q(ℤ)) (za : ℤ) (pa : Q(IsInt $a $na)) + (b : Q(ℤ)) : Result b → Option (Result q($a % $b)) + | .isNat inst nb pb => do + assumeInstancesCommute + if nb.natLit! == 0 then + have _ : $nb =Q nat_lit 0 := ⟨⟩ + return .isInt q(Int.instRingInt) na za q(isInt_emod_zero $pa $pb) + else + let ⟨r, p⟩ := core a na za pa b nb pb + return .isNat q(instAddMonoidWithOne) r p + | .isNegNat _ nb pb => do + assumeInstancesCommute + let ⟨r, p⟩ := core a na za pa q(-$b) nb q(isNat_neg_of_isNegNat $pb) + return .isNat q(instAddMonoidWithOne) r q(isInt_emod_neg $p) + | _ => none + + /-- Given a result for evaluating `a b` in `ℤ` where `b > 0`, evaluate `a % b`. -/ + core (a na : Q(ℤ)) (za : ℤ) (pa : Q(IsInt $a $na)) + (b : Q(ℤ)) (nb : Q(ℕ)) (pb : Q(IsNat $b $nb)) : + (r : Q(ℕ)) × Q(IsNat ($a % $b) $r) := + let b := nb.natLit! + let q := za / b + have nq := mkRawIntLit q + let r := za.natMod b + have nr : Q(ℕ) := mkRawNatLit r + let m := q * b + have nm := mkRawIntLit m + have pf₁ : Q($nq * $nb = $nm) := (q(Eq.refl $nm) :) + have pf₂ : Q($nr + $nm = $na) := (q(Eq.refl $na) :) + have pf₃ : Q(Nat.blt $nr $nb = true) := (q(Eq.refl true) :) + ⟨nr, q(isInt_emod $pa $pb $pf₁ $pf₂ $pf₃)⟩ + +theorem isInt_dvd_true : {a b : ℤ} → {a' b' c : ℤ} → + IsInt a a' → IsInt b b' → Int.mul a' c = b' → a ∣ b + | _, _, _, _, _, ⟨rfl⟩, ⟨rfl⟩, rfl => ⟨_, rfl⟩ + +theorem isInt_dvd_false : {a b : ℤ} → {a' b' : ℤ} → + IsInt a a' → IsInt b b' → Int.mod b' a' != 0 → ¬a ∣ b + | _, _, _, _, ⟨rfl⟩, ⟨rfl⟩, e => mt Int.mod_eq_zero_of_dvd (by simpa using e) + +/-- The `norm_num` extension which identifies expressions of the form `(a : ℤ) ∣ b`, +such that `norm_num` successfully recognises both `a` and `b`. -/ +@[norm_num (_ : ℤ) ∣ _] def evalIntDvd : NormNumExt where eval {u α} e := do + let .app (.app f (a : Q(ℤ))) (b : Q(ℤ)) ← whnfR e | failure + haveI' : u =QL 0 := ⟨⟩; haveI' : $α =Q Prop := ⟨⟩ + haveI' : $e =Q ($a ∣ $b) := ⟨⟩ + -- We assert that the default instance for `Dvd` is `Int.dvd` when the first parameter is `ℕ`. + guard <|← withNewMCtxDepth <| isDefEq f q(Dvd.dvd (α := ℤ)) + let rℤ : Q(Ring ℤ) := q(Int.instRingInt) + let ⟨za, na, pa⟩ ← (← derive a).toInt rℤ + let ⟨zb, nb, pb⟩ ← (← derive b).toInt rℤ + if zb % za == 0 then + let zc := zb / za + have c := mkRawIntLit zc + haveI' : Int.mul $na $c =Q $nb := ⟨⟩ + return .isTrue q(isInt_dvd_true $pa $pb (.refl $nb)) + else + have : Q(Int.mod $nb $na != 0) := (q(Eq.refl true) : Expr) + return .isFalse q(isInt_dvd_false $pa $pb $this) + +end Mathlib.Meta.NormNum diff --git a/Mathlib/Tactic/NormNum/Mod.lean b/Mathlib/Tactic/NormNum/Mod.lean deleted file mode 100644 index fb6c412483481..0000000000000 --- a/Mathlib/Tactic/NormNum/Mod.lean +++ /dev/null @@ -1,82 +0,0 @@ -/- -Copyright (c) 2023 Anne Baanen. All rights reserved. -Released under Apache 2.0 license as described in the file LICENSE. -Authors: Anne Baanen --/ -import Mathlib.Tactic.NormNum.Basic -import Mathlib.Tactic.NormNum.Ineq - -/-! -# `norm_num` extension for modulo operator - -This file adds support for the `· % ·` operator to the `norm_num` tactic. --/ - -set_option autoImplicit true - -namespace Mathlib -open Lean hiding Rat mkRat -open Meta - -namespace Meta.NormNum -open Qq - -lemma isInt_emod {a b q r m : ℤ} (hm : q * b = m) (h : r + m = a) (h₁ : 0 ≤ r) (h₂ : r < b) : - IsInt (a % b) r := - ⟨by rw [← h, ← hm, Int.add_mul_emod_self, Int.emod_eq_of_lt h₁ h₂, Int.cast_id]⟩ - -lemma isInt_emod_neg {a b c : ℤ} (h : IsInt (a % -b) c) : IsInt (a % b) c := - ⟨(Int.emod_neg _ _).symm.trans h.out⟩ - -lemma isInt_refl (a : ℤ) : IsInt a a := - ⟨rfl⟩ - -lemma isNat_neg_of_isNegNat {a : ℤ} {b : ℕ} (h : IsInt a (- b)) : - IsNat (-a) b := - ⟨by simp [h.out]⟩ - -/-- Given expressions `a b` in `ℤ`, evaluate `a % b`. - -`a` and `b` should be numeric expressions recognized by `norm_num`. --/ -partial def evalIntMod.go (a b : Q(ℤ)) : MetaM (NormNum.Result q($a % $b)) := do - let ra ← derive (u := Level.zero) a - let rb ← derive (u := Level.zero) b - let rec /-- Given a result for evaluating `a b` in `ℤ`, evaluate `a % b`. -/ - core : (b : Q(ℤ)) → Result b → MetaM (Result (u := Level.zero) (α := q(ℤ)) q($a % $b)) := - fun b rb => match rb with - | (.isNegNat inst nb pb) => do - have : $inst =Q Int.instRingInt := ⟨⟩ - let pf ← core q(- $b) - (.isNat q(AddGroupWithOne.toAddMonoidWithOne) nb q(isNat_neg_of_isNegNat $pb)) - return pf.eqTrans q((Int.emod_neg _ _).symm) - | (.isNat inst nb pb) => do - have : $inst =Q AddGroupWithOne.toAddMonoidWithOne := ⟨⟩ - let ⟨a, na, pa⟩ ← @Result.toInt _ _ _ q(OrderedRing.toRing) ra - let q := a / nb.natLit! - have nq := mkRawIntLit q - let r := a % nb.natLit! - have nr := mkRawIntLit r - let m := q * nb.natLit! - have nm := mkRawIntLit m - have pf₁ : Q($nq * $nb = $nm) := (q(Eq.refl $nm) :) - have pf₂ : Q($nr + $nm = $na) := (q(Eq.refl $na) :) - have pf₃ : Q(decide (0 ≤ $nr) = true) := (q(Eq.refl true) :) - have pf₄ : Q(decide ($nr < $nb) = true) := (q(Eq.refl true) :) - return (.isInt q(OrderedRing.toRing) nr r - q(isInt_emod (($pb).out.symm ▸ (id $pf₁)) (Eq.trans (id $pf₂) ($pa).out.symm) - (isInt_le_true (isInt_refl 0) (isInt_refl $nr) $pf₃) - (isInt_lt_true (α := ℤ) (isInt_refl $nr) (IsNat.to_isInt $pb) $pf₄))) - | _ => failure - core b rb - -/-- The `norm_num` extension which identifies expressions of the form `Int.emod a b`, -such that `norm_num` successfully recognises both `a` and `b`. -/ -@[norm_num (_ : ℤ) % _, Mod.mod (_ : ℤ) _, Int.emod _ _] partial def evalIntMod : - NormNumExt where eval {u α} e := do - let .app (.app f (a : Q(ℤ))) (b : Q(ℤ)) ← whnfR e | failure - -- We trust that the default instance for `HMod` is `Int.mod` when the first parameter is `ℤ`. - guard <|← withNewMCtxDepth <| isDefEq f q(HMod.hMod (α := ℤ)) - evalIntMod.go a b - -end Mathlib.Meta.NormNum diff --git a/Mathlib/Tactic/NormNum/NatFib.lean b/Mathlib/Tactic/NormNum/NatFib.lean index 55e79c6cd445f..b3ac88c7b46cb 100644 --- a/Mathlib/Tactic/NormNum/NatFib.lean +++ b/Mathlib/Tactic/NormNum/NatFib.lean @@ -3,7 +3,7 @@ Copyright (c) 2023 Kyle Miller. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Kyle Miller, Mario Carneiro -/ -import Mathlib.Data.Nat.Fib +import Mathlib.Data.Nat.Fib.Basic import Mathlib.Tactic.NormNum /-! # `norm_num` extension for `Nat.fib` diff --git a/Mathlib/Tactic/NormNum/Result.lean b/Mathlib/Tactic/NormNum/Result.lean index 7dec5a2d0218f..03036c0c5f485 100644 --- a/Mathlib/Tactic/NormNum/Result.lean +++ b/Mathlib/Tactic/NormNum/Result.lean @@ -456,7 +456,7 @@ def Result.eqTrans {α : Q(Type u)} {a b : Q($α)} (eq : Q($a = $b)) : Result b have b : Q(Prop) := b have eq : Q($a = $b) := eq have proof : Q(¬ $b) := proof - Result.isFalse (x := a) q($eq ▸ $proof) + Result.isFalse (x := a) q($eq ▸ $proof) | .isNat inst lit proof => Result.isNat inst lit q($eq ▸ $proof) | .isNegNat inst lit proof => Result.isNegNat inst lit q($eq ▸ $proof) | .isRat inst q n d proof => Result.isRat inst q n d q($eq ▸ $proof) diff --git a/Mathlib/Tactic/ProdAssoc.lean b/Mathlib/Tactic/ProdAssoc.lean new file mode 100644 index 0000000000000..ced3c7610defb --- /dev/null +++ b/Mathlib/Tactic/ProdAssoc.lean @@ -0,0 +1,151 @@ +/- +Copyright (c) 2023 Adam Topaz. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Adam Topaz +-/ +import Mathlib.Lean.Expr.Basic +import Mathlib.Logic.Equiv.Basic + +/-! +# Associativity of products + +This file constructs a term elaborator for "obvious" equivalences between iterated products. +For example, +```lean +(prod_assoc% : (α × β) × (γ × δ) ≃ α × (β × γ) × δ) +``` +gives the "obvious" equivalence between `(α × β) × (γ × δ)` and `α × (β × γ) × δ`. +-/ + +namespace Lean.Expr + +open Lean Meta + +/-- A helper type to keep track of universe levels and types in iterated produts. -/ +inductive ProdTree where + | type (tp : Expr) (l : Level) + | prod (fst snd : ProdTree) (lfst lsnd : Level) +deriving Repr + +/-- The iterated product corresponding to a `ProdTree`. -/ +def ProdTree.getType : ProdTree → Expr + | type tp _ => tp + | prod fst snd u v => mkAppN (.const ``Prod [u,v]) #[fst.getType, snd.getType] + +/-- The number of types appearing in an iterated product encoded as a `ProdTree`. -/ +def ProdTree.size : ProdTree → Nat + | type _ _ => 1 + | prod fst snd _ _ => fst.size + snd.size + +/-- The components of an interated product, presented as a `ProdTree`. -/ +def ProdTree.components : ProdTree → List Expr + | type tp _ => [tp] + | prod fst snd _ _ => fst.components ++ snd.components + +/-- Make a `ProdTree` out of an `Expr`. -/ +partial def mkProdTree (e : Expr) : MetaM ProdTree := + match e.consumeMData with + | .app (.app (.const ``Prod [u,v]) X) Y => do + return .prod (← X.mkProdTree) (← Y.mkProdTree) u v + | X => do + let some u := (← whnfD <| ← inferType X).type? | throwError "Not a type{indentExpr X}" + return .type X u + +/-- Given `P : ProdTree` representing an iterated product and `e : Expr` which +should correspond to a term of the iterated product, this will return +a list, whose items correspond to the leaves of `P` (i.e. the types appearing in the product), +where each item is the appropriate composition of `Prod.fst` and `Prod.snd` applied to `e` +resulting in an element of the type corresponding to the leaf. + +For example, if `P` corresponds to `(X × Y) × Z` and `t : (X × Y) × Z`, then this +should return `[t.fst.fst, t.fst.snd, t.snd]`. +-/ +def ProdTree.unpack (t : Expr) : ProdTree → MetaM (List Expr) + | type _ _ => return [t] + | prod fst snd u v => do + let fst' ← fst.unpack <| mkAppN (.const ``Prod.fst [u,v]) #[fst.getType, snd.getType, t] + let snd' ← snd.unpack <| mkAppN (.const ``Prod.snd [u,v]) #[fst.getType, snd.getType, t] + return fst' ++ snd' + +/-- This function should act as the "reverse" of `ProdTree.unpack`, constructing +a term of the iterated product out of a list of terms of the types appearing in the product. -/ +def ProdTree.pack (ts : List Expr) : ProdTree → MetaM Expr + | type _ _ => do + match ts with + | [] => throwError "Can't pack the empty list." + | [a] => return a + | _ => throwError "Failed due to size mismatch." + | prod fst snd u v => do + let fstSize := fst.size + let sndSize := snd.size + unless ts.length == fstSize + sndSize do throwError "Failed due to size mismatch." + let tsfst := ts.toArray[:fstSize] |>.toArray.toList + let tssnd := ts.toArray[fstSize:] |>.toArray.toList + let mk : Expr := mkAppN (.const ``Prod.mk [u,v]) #[fst.getType, snd.getType] + return .app (.app mk (← fst.pack tsfst)) (← snd.pack tssnd) + +/-- Converts a term `e` in an iterated product `P1` into a term of an iterated product `P2`. +Here `e` is an `Expr` representing the term, and the iterated products are represented +by terms of `ProdTree`. -/ +def ProdTree.convertTo (P1 P2 : ProdTree) (e : Expr) : MetaM Expr := + return ← P2.pack <| ← P1.unpack e + +/-- Given two expressions corresponding to iterated products of the same types, associated in +possibly different ways, this constructs the "obvious" function from one to the other. -/ +def mkProdFun (a b : Expr) : MetaM Expr := do + let pa ← a.mkProdTree + let pb ← b.mkProdTree + unless pa.components.length == pb.components.length do + throwError "The number of components in{indentD a}\nand{indentD b}\nmust match." + for (x,y) in pa.components.zip pb.components do + unless ← isDefEq x y do + throwError "Component{indentD x}\nis not definitionally equal to component{indentD y}." + withLocalDeclD `t a fun fvar => do + mkLambdaFVars #[fvar] (← pa.convertTo pb fvar) + +/-- Construct the equivalence between iterated products of the same type, associated +in possibly different ways. -/ +def mkProdEquiv (a b : Expr) : MetaM Expr := do + let some u := (← whnfD <| ← inferType a).type? | throwError "Not a type{indentExpr a}" + let some v := (← whnfD <| ← inferType b).type? | throwError "Not a type{indentExpr b}" + return mkAppN (.const ``Equiv.mk [.succ u,.succ v]) + #[a, b, ← mkProdFun a b, ← mkProdFun b a, + .app (.const ``rfl [.succ u]) a, + .app (.const ``rfl [.succ v]) b] + +/-- IMPLEMENTATION: Syntax used in the implementation of `prod_assoc%`. +This elaborator postpones if there are metavariables in the expected type, +and to propagate the fact that this elaborator produces an `Equiv`, +the `prod_assoc%` macro sets things up with a type ascription. +This enables using `prod_assoc%` with, for example `Equiv.trans` dot notation. -/ +syntax (name := prodAssocStx) "prod_assoc_internal%" : term + +open Elab Term in +/-- Elaborator for `prod_assoc%`. -/ +@[term_elab prodAssocStx] +def elabProdAssoc : TermElab := fun stx expectedType? => do + match stx with + | `(prod_assoc_internal%) => do + let some expectedType ← tryPostponeIfHasMVars? expectedType? + | throwError "expected type must be known" + let .app (.app (.const ``Equiv _) a) b := expectedType + | throwError "Expected type{indentD expectedType}\nis not of the form `α ≃ β`." + mkProdEquiv a b + | _ => throwUnsupportedSyntax + +/-- +`prod_assoc%` elaborates to the "obvious" equivalence between iterated products of types, +regardless of how the products are parenthesized. +The `prod_assoc%` term uses the expected type when elaborating. +For example, `(prod_assoc% : (α × β) × (γ × δ) ≃ α × (β × γ) × δ)`. + +The elaborator can handle holes in the expected type, +so long as they eventually get filled by unification. +```lean +example : (α × β) × (γ × δ) ≃ α × (β × γ) × δ := + (prod_assoc% : _ ≃ α × β × γ × δ).trans prod_assoc% +``` +-/ +macro "prod_assoc%" : term => `((prod_assoc_internal% : _ ≃ _)) + +end Lean.Expr diff --git a/Mathlib/Tactic/ReduceModChar.lean b/Mathlib/Tactic/ReduceModChar.lean index 2c3dbb279a02f..1e6953414c3be 100644 --- a/Mathlib/Tactic/ReduceModChar.lean +++ b/Mathlib/Tactic/ReduceModChar.lean @@ -5,7 +5,7 @@ Authors: Anne Baanen -/ import Mathlib.Data.ZMod.Basic import Mathlib.RingTheory.Polynomial.Basic -import Mathlib.Tactic.NormNum.Mod +import Mathlib.Tactic.NormNum.DivMod import Mathlib.Tactic.ReduceModChar.Ext /-! @@ -52,20 +52,20 @@ lemma CharP.cast_int_eq_mod (R : Type _) [Ring R] (p : ℕ) [CharP R p] (k : ℤ (k : R) = ↑(k % p + p * (k / p)) := by rw [Int.emod_add_ediv] _ = ↑(k % p) := by simp [CharP.cast_eq_zero R] -lemma CharP.isInt_of_mod {α : Type _} [Ring α] (n : ℕ) (inst : CharP α n) {e : α} - (h₁ : IsInt e e') (h₂ : IsInt (e' % n) r) : - IsInt e r := - ⟨by rw [h₁.out, CharP.cast_int_eq_mod α n, h₂.out, Int.cast_id]⟩ +lemma CharP.isInt_of_mod {α : Type _} [Ring α] {n n' : ℕ} (inst : CharP α n) {e : α} + (he : IsInt e e') (hn : IsNat n n') (h₂ : IsInt (e' % n') r) : IsInt e r := + ⟨by rw [he.out, CharP.cast_int_eq_mod α n, show n = n' from hn.out, h₂.out, Int.cast_id]⟩ /-- Given an integral expression `e : t` such that `t` is a ring of characteristic `n`, reduce `e` modulo `n`. -/ partial def normIntNumeral {α : Q(Type u)} (n : Q(ℕ)) (e : Q($α)) (instRing : Q(Ring $α)) - (instCharP : Q(CharP $α $n)) : - MetaM (Mathlib.Meta.NormNum.Result e) := do - let instRingInt := q(Int.instRingInt) - let ⟨_, ne, pe⟩ ← Result.toInt instRing (← Mathlib.Meta.NormNum.derive e) - let ⟨r, nr, pr⟩ ← Result.toInt instRingInt (←evalIntMod.go ne q(($n : ℤ))) - return .isInt instRing nr r q(CharP.isInt_of_mod $n $instCharP $pe $pr) + (instCharP : Q(CharP $α $n)) : MetaM (Result e) := do + let ⟨ze, ne, pe⟩ ← Result.toInt instRing (← Mathlib.Meta.NormNum.derive e) + let ⟨n', pn⟩ ← deriveNat n q(instAddMonoidWithOneNat) + let rr ← evalIntMod.go _ _ ze q(IsInt.raw_refl $ne) _ <| + .isNat q(instAddMonoidWithOne) _ q(isNat_cast _ _ (IsNat.raw_refl $n')) + let ⟨zr, nr, pr⟩ ← rr.toInt q(Int.instRingInt) + return .isInt instRing nr zr q(CharP.isInt_of_mod $instCharP $pe $pn $pr) lemma CharP.neg_eq_sub_one_mul {α : Type _} [Ring α] (n : ℕ) (inst : CharP α n) (b : α) (a : ℕ) (a' : α) (p : IsNat (n - 1 : α) a) (pa : a = a') : @@ -88,7 +88,7 @@ partial def normNeg {α : Q(Type u)} (n : Q(ℕ)) (e : Q($α)) (_instRing : Q(Ri let r ← (derive (α := α) q($n - 1)) match r with | .isNat sα a p => do - have : AddGroupWithOne.toAddMonoidWithOne =Q $sα := ⟨⟩ + have : instAddMonoidWithOne =Q $sα := ⟨⟩ let ⟨a', pa'⟩ ← mkOfNat α sα a let pf : Q(-$b = $a' * $b) := q(CharP.neg_eq_sub_one_mul $n $instCharP $b $a $a' $p $pa') return { expr := q($a' * $b), proof? := pf } @@ -136,10 +136,10 @@ This should be fast, so this pattern-matches on the type, rather than searching `CharP` instance. -/ partial def typeToCharP (t : Q(Type u)) : TypeToCharPResult t := match Expr.getAppFnArgs t with -| (`ZMod, #[(n : Q(ℕ))]) => .intLike n +| (``ZMod, #[(n : Q(ℕ))]) => .intLike n (q((ZMod.commRing _).toRing) : Q(Ring (ZMod $n))) (q(ZMod.charP _) : Q(CharP (ZMod $n) $n)) -| (`Polynomial, #[(R : Q(Type u)), _]) => match typeToCharP R with +| (``Polynomial, #[(R : Q(Type u)), _]) => match typeToCharP R with | (.intLike n _ _) => .intLike n (q(Polynomial.ring) : Q(Ring (Polynomial $R))) (q(Polynomial.instCharP _) : Q(CharP (Polynomial $R) $n)) diff --git a/Mathlib/Tactic/SimpRw.lean b/Mathlib/Tactic/SimpRw.lean index 5c5c92d124d7e..e64106927c561 100644 --- a/Mathlib/Tactic/SimpRw.lean +++ b/Mathlib/Tactic/SimpRw.lean @@ -53,12 +53,20 @@ example {a : ℕ} by simp_rw [h1, h2] ``` -/ -elab s:"simp_rw " rws:rwRuleSeq g:(location)? : tactic => do - evalTactic (← `(tactic| simp%$s (config := { failIfUnchanged := false }) only $g ?)) +elab s:"simp_rw " cfg:(config)? rws:rwRuleSeq g:(location)? : tactic => do + let cfg' : TSyntax `Lean.Parser.Tactic.config ← do + match cfg with + | Option.none => + `(config| (config := ({ failIfUnchanged := false } : Lean.Meta.Simp.Config))) + | Option.some c => match c with + | `(config| (config := $cfg)) => + `(config| (config := ({ ($cfg : Lean.Meta.Simp.Config) with failIfUnchanged := false }))) + | _ => throwError "malformed cfg" + evalTactic (← `(tactic| simp%$s $cfg' only $g ?)) withSimpRWRulesSeq s rws fun symm term => do evalTactic (← match term with | `(term| $e:term) => if symm then - `(tactic| simp%$e only [←$e:term] $g ?) + `(tactic| simp%$e $[$cfg]? only [←$e:term] $g ?) else - `(tactic| simp%$e only [$e:term] $g ?)) + `(tactic| simp%$e $[$cfg]? only [$e:term] $g ?)) diff --git a/Mathlib/Topology/Algebra/InfiniteSum/Basic.lean b/Mathlib/Topology/Algebra/InfiniteSum/Basic.lean index c10aaa08e3332..dc430ad6c00d9 100644 --- a/Mathlib/Topology/Algebra/InfiniteSum/Basic.lean +++ b/Mathlib/Topology/Algebra/InfiniteSum/Basic.lean @@ -647,21 +647,14 @@ theorem tsum_range {g : γ → β} (f : β → α) (hg : Injective g) : simp_rw [← comp_apply (g := g), tsum_univ (f ∘ g)] #align tsum_range tsum_range -open Set in /-- If `f b = 0` for all `b ∈ t`, then the sum over `f a` with `a ∈ s` is the same as the sum over `f a` with `a ∈ s ∖ t`. -/ lemma tsum_setElem_eq_tsum_setElem_diff [T2Space α] {f : β → α} (s t : Set β) (hf₀ : ∀ b ∈ t, f b = 0) : - ∑' a : s, f a = ∑' a : (s \ t : Set β), f a := by - simp_rw [_root_.tsum_subtype] - refine tsum_congr fun b' ↦ ?_ - by_cases hs : b' ∈ s \ t - · rw [indicator_of_mem hs f, indicator_of_mem (mem_of_mem_diff hs) f] - · rw [indicator_of_not_mem hs f] - rw [mem_diff, not_and, not_not_mem] at hs - by_cases h₁ : b' ∈ s - · simpa [indicator_of_mem h₁] using hf₀ b' <| hs h₁ - · exact indicator_of_not_mem h₁ f + ∑' a : s, f a = ∑' a : (s \ t : Set β), f a := + tsum_eq_tsum_of_hasSum_iff_hasSum fun {a} ↦ Iff.symm <| + (Set.inclusion_injective <| s.diff_subset t).hasSum_iff + (f := fun b : s ↦ f b) fun b hb ↦ hf₀ b <| by simpa using hb /-- If `f b = 0`, then the sum over `f a` with `a ∈ s` is the same as the sum over `f a` for `a ∈ s ∖ {b}`. -/ diff --git a/Mathlib/Topology/Category/Profinite/Basic.lean b/Mathlib/Topology/Category/Profinite/Basic.lean index 54fe80adc4765..c45526e5e2799 100644 --- a/Mathlib/Topology/Category/Profinite/Basic.lean +++ b/Mathlib/Topology/Category/Profinite/Basic.lean @@ -50,7 +50,7 @@ structure Profinite where /-- The underlying compact Hausdorff space of a profinite space. -/ toCompHaus : CompHaus /-- A profinite space is totally disconnected. -/ - [IsTotallyDisconnected : TotallyDisconnectedSpace toCompHaus] + [isTotallyDisconnected : TotallyDisconnectedSpace toCompHaus] #align Profinite Profinite namespace Profinite @@ -88,7 +88,7 @@ lemma forget_ContinuousMap_mk {X Y : Profinite} (f : X → Y) (hf : Continuous f rfl instance {X : Profinite} : TotallyDisconnectedSpace X := - X.IsTotallyDisconnected + X.isTotallyDisconnected -- We check that we automatically infer that Profinite sets are compact and Hausdorff. example {X : Profinite} : CompactSpace X := @@ -151,7 +151,7 @@ show Faithful <| inducedFunctor _ from inferInstance -- Porting note: added, as it is not found otherwise. instance {X : Profinite} : TotallyDisconnectedSpace (profiniteToCompHaus.obj X) := - X.IsTotallyDisconnected + X.isTotallyDisconnected /-- The fully faithful embedding of `Profinite` in `TopCat`. This is definitionally the same as the obvious composite. -/ @@ -188,7 +188,7 @@ def CompHaus.toProfiniteObj (X : CompHaus.{u}) : Profinite.{u} where { toTop := TopCat.of (ConnectedComponents X) is_compact := Quotient.compactSpace is_hausdorff := ConnectedComponents.t2 } - IsTotallyDisconnected := ConnectedComponents.totallyDisconnectedSpace + isTotallyDisconnected := ConnectedComponents.totallyDisconnectedSpace #align CompHaus.to_Profinite_obj CompHaus.toProfiniteObj /-- (Implementation) The bijection of homsets to establish the reflective adjunction of Profinite @@ -251,7 +251,7 @@ namespace Profinite def limitCone {J : Type u} [SmallCategory J] (F : J ⥤ Profinite.{u}) : Limits.Cone F where pt := { toCompHaus := (CompHaus.limitCone.{u, u} (F ⋙ profiniteToCompHaus)).pt - IsTotallyDisconnected := by + isTotallyDisconnected := by change TotallyDisconnectedSpace ({ u : ∀ j : J, F.obj j | _ } : Type _) exact Subtype.totallyDisconnectedSpace } π := diff --git a/Mathlib/Topology/Category/Profinite/EffectiveEpi.lean b/Mathlib/Topology/Category/Profinite/EffectiveEpi.lean index f5dbb4bc4deba..edb0d76c83473 100644 --- a/Mathlib/Topology/Category/Profinite/EffectiveEpi.lean +++ b/Mathlib/Topology/Category/Profinite/EffectiveEpi.lean @@ -122,7 +122,7 @@ def ιIso' : (QB' π) ≅ B.toCompHaus := /-- Implementation: The quotient of `relation π`, considered as an object of `Profinite`. -/ def QB : Profinite where toCompHaus := QB' π - IsTotallyDisconnected := ⟨(CompHaus.homeoOfIso (ιIso' π surj)).embedding.isTotallyDisconnected + isTotallyDisconnected := ⟨(CompHaus.homeoOfIso (ιIso' π surj)).embedding.isTotallyDisconnected (isTotallyDisconnected_of_totallyDisconnectedSpace _)⟩ /-- Implementation: The function `ιFun`, considered as a morphism in `Profinite`. -/ diff --git a/Mathlib/Topology/Category/Stonean/Basic.lean b/Mathlib/Topology/Category/Stonean/Basic.lean index 773840ef269d8..40bdb9669278d 100644 --- a/Mathlib/Topology/Category/Stonean/Basic.lean +++ b/Mathlib/Topology/Category/Stonean/Basic.lean @@ -127,7 +127,7 @@ instance (X : Stonean.{u}) : ExtremallyDisconnected X := def toProfinite : Stonean.{u} ⥤ Profinite.{u} where obj X := { toCompHaus := X.compHaus, - IsTotallyDisconnected := show TotallyDisconnectedSpace X from inferInstance } + isTotallyDisconnected := show TotallyDisconnectedSpace X from inferInstance } map f := f /-- The functor from Stonean spaces to profinite spaces is full. -/ diff --git a/Mathlib/Topology/Category/TopCat/Limits/Products.lean b/Mathlib/Topology/Category/TopCat/Limits/Products.lean index 687b8898075e5..07f12e4d224d9 100644 --- a/Mathlib/Topology/Category/TopCat/Limits/Products.lean +++ b/Mathlib/Topology/Category/TopCat/Limits/Products.lean @@ -5,6 +5,7 @@ Authors: Patrick Massot, Scott Morrison, Mario Carneiro, Andrew Yang -/ import Mathlib.Topology.Category.TopCat.EpiMono import Mathlib.Topology.Category.TopCat.Limits.Basic +import Mathlib.CategoryTheory.Limits.Shapes.Products #align_import topology.category.Top.limits.products from "leanprover-community/mathlib"@"178a32653e369dce2da68dc6b2694e385d484ef1" diff --git a/Mathlib/Topology/Connected/PathConnected.lean b/Mathlib/Topology/Connected/PathConnected.lean index b26022b8a72dd..46afc4cd61404 100644 --- a/Mathlib/Topology/Connected/PathConnected.lean +++ b/Mathlib/Topology/Connected/PathConnected.lean @@ -1118,9 +1118,9 @@ theorem IsPathConnected.exists_path_through_family' {n : ℕ} joined by a continuous path. -/ class PathConnectedSpace (X : Type*) [TopologicalSpace X] : Prop where /-- A path-connected space must be nonempty. -/ - Nonempty : Nonempty X + nonempty : Nonempty X /-- Any two points in a path-connected space must be joined by a continuous path. -/ - Joined : ∀ x y : X, Joined x y + joined : ∀ x y : X, Joined x y #align path_connected_space PathConnectedSpace theorem pathConnectedSpace_iff_zerothHomotopy : @@ -1130,7 +1130,7 @@ theorem pathConnectedSpace_iff_zerothHomotopy : · intro h refine' ⟨(nonempty_quotient_iff _).mpr h.1, ⟨_⟩⟩ rintro ⟨x⟩ ⟨y⟩ - exact Quotient.sound (PathConnectedSpace.Joined x y) + exact Quotient.sound (PathConnectedSpace.joined x y) · unfold ZerothHomotopy rintro ⟨h, h'⟩ skip @@ -1143,7 +1143,7 @@ variable [PathConnectedSpace X] /-- Use path-connectedness to build a path between two points. -/ def somePath (x y : X) : Path x y := - Nonempty.some (Joined x y) + Nonempty.some (joined x y) #align path_connected_space.some_path PathConnectedSpace.somePath end PathConnectedSpace @@ -1165,11 +1165,11 @@ theorem isPathConnected_iff_pathConnectedSpace : IsPathConnected F ↔ PathConne theorem pathConnectedSpace_iff_univ : PathConnectedSpace X ↔ IsPathConnected (univ : Set X) := by constructor · intro h - haveI := @PathConnectedSpace.Nonempty X _ _ + haveI := @PathConnectedSpace.nonempty X _ _ inhabit X refine' ⟨default, mem_univ _, _⟩ intros y _hy - simpa using PathConnectedSpace.Joined default y + simpa using PathConnectedSpace.joined default y · intro h have h' := h.joinedIn cases' h with x h @@ -1196,8 +1196,8 @@ instance Quotient.instPathConnectedSpace {s : Setoid X} [PathConnectedSpace X] : /-- This is a special case of `NormedSpace.instPathConnectedSpace` (and `TopologicalAddGroup.pathConnectedSpace`). It exists only to simplify dependencies. -/ instance Real.instPathConnectedSpace : PathConnectedSpace ℝ where - Nonempty := inferInstance - Joined := fun x y ↦ ⟨⟨⟨fun (t : I) ↦ (1 - t) * x + t * y, by continuity⟩, by simp, by simp⟩⟩ + joined x y := ⟨⟨⟨fun (t : I) ↦ (1 - t) * x + t * y, by continuity⟩, by simp, by simp⟩⟩ + nonempty := inferInstance theorem pathConnectedSpace_iff_eq : PathConnectedSpace X ↔ ∃ x : X, pathComponent x = univ := by simp [pathConnectedSpace_iff_univ, isPathConnected_iff_eq] diff --git a/Mathlib/Topology/IndicatorConstPointwise.lean b/Mathlib/Topology/IndicatorConstPointwise.lean new file mode 100644 index 0000000000000..8f395f8a4aebd --- /dev/null +++ b/Mathlib/Topology/IndicatorConstPointwise.lean @@ -0,0 +1,123 @@ +/- +Copyright (c) 2023 Kalle Kytölä. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Kalle Kytölä +-/ +import Mathlib.Algebra.IndicatorFunction +import Mathlib.Topology.Separation + +/-! +# Pointwise convergence of indicator functions + +In this file, we prove the equivalence of three different ways to phrase that the indicator +functions of sets converge pointwise. + +## Main results + +For `A` a set, `(Asᵢ)` an indexed collection of sets, under mild conditions, the following are +equivalent: + + (a) the indicator functions of `Asᵢ` tend to the indicator function of `A` pointwise; + + (b) for every `x`, we eventually have that `x ∈ Asᵢ` holds iff `x ∈ A` holds; + + (c) `Tendsto As _ <| Filter.pi (pure <| · ∈ A)`. + +The results stating these in the case when the indicators take values in a Fréchet space are: + * `tendsto_indicator_const_iff_forall_eventually` is the equivalence (a) ↔ (b); + * `tendsto_indicator_const_iff_tendsto_pi_pure` is the equivalence (a) ↔ (c). + +-/ + + +open Filter Topology + +variable {α : Type*} {A : Set α} +variable {β : Type*} [Zero β] [TopologicalSpace β] +variable {ι : Type*} (L : Filter ι) {As : ι → Set α} + +lemma tendsto_ite {β : Type*} {p : ι → Prop} [DecidablePred p] {q : Prop} [Decidable q] + {a b : β} {F G : Filter β} + (haG : {a}ᶜ ∈ G) (hbF : {b}ᶜ ∈ F) (haF : principal {a} ≤ F) (hbG : principal {b} ≤ G) : + Tendsto (fun i ↦ if p i then a else b) L (if q then F else G) ↔ ∀ᶠ i in L, p i ↔ q := by + constructor <;> intro h + · by_cases hq : q + · simp only [hq, ite_true] at h + filter_upwards [mem_map.mp (h hbF)] with i hi + simp only [Set.preimage_compl, Set.mem_compl_iff, Set.mem_preimage, Set.mem_singleton_iff, + ite_eq_right_iff, not_forall, exists_prop] at hi + tauto + · simp only [hq, ite_false] at h + filter_upwards [mem_map.mp (h haG)] with i hi + simp only [Set.preimage_compl, Set.mem_compl_iff, Set.mem_preimage, Set.mem_singleton_iff, + ite_eq_left_iff, not_forall, exists_prop] at hi + tauto + · have obs : (fun _ ↦ if q then a else b) =ᶠ[L] (fun i ↦ if p i then a else b) := by + filter_upwards [h] with i hi + simp only [hi] + apply Tendsto.congr' obs + by_cases hq : q + · simp only [hq, iff_true, ite_true] + apply le_trans _ haF + simp only [principal_singleton, le_pure_iff, mem_map, Set.mem_singleton_iff, + Set.preimage_const_of_mem, univ_mem] + · simp only [hq, ite_false] + apply le_trans _ hbG + simp only [principal_singleton, le_pure_iff, mem_map, Set.mem_singleton_iff, + Set.preimage_const_of_mem, univ_mem] + +lemma tendsto_indicator_const_apply_iff_eventually' (b : β) + (nhd_b : {0}ᶜ ∈ 𝓝 b) (nhd_o : {b}ᶜ ∈ 𝓝 0) (x : α) : + Tendsto (fun i ↦ (As i).indicator (fun (_ : α) ↦ b) x) L (𝓝 (A.indicator (fun (_ : α) ↦ b) x)) + ↔ ∀ᶠ i in L, (x ∈ As i ↔ x ∈ A) := by + classical + have heart := @tendsto_ite ι L β (fun i ↦ x ∈ As i) _ (x ∈ A) _ b 0 (𝓝 b) (𝓝 (0 : β)) + nhd_o nhd_b ?_ ?_ + convert heart + · by_cases hxA : x ∈ A <;> simp [hxA] + · simp only [principal_singleton, le_def, mem_pure] + exact fun s s_nhd ↦ mem_of_mem_nhds s_nhd + · simp only [principal_singleton, le_def, mem_pure] + exact fun s s_nhd ↦ mem_of_mem_nhds s_nhd + +lemma tendsto_indicator_const_iff_forall_eventually' + (b : β) (nhd_b : {0}ᶜ ∈ 𝓝 b) (nhd_o : {b}ᶜ ∈ 𝓝 0) : + Tendsto (fun i ↦ (As i).indicator (fun (_ : α) ↦ b)) L (𝓝 (A.indicator (fun (_ : α) ↦ b))) + ↔ ∀ x, ∀ᶠ i in L, (x ∈ As i ↔ x ∈ A) := by + simp_rw [tendsto_pi_nhds] + apply forall_congr' + exact tendsto_indicator_const_apply_iff_eventually' L b nhd_b nhd_o + +/-- The indicator functions of `Asᵢ` evaluated at `x` tend to the indicator function of `A` +evaluated at `x` if and only if we eventually have the equivalence `x ∈ Asᵢ ↔ x ∈ A`. -/ +@[simp] lemma tendsto_indicator_const_apply_iff_eventually [T1Space β] (b : β) [NeZero b] + (x : α) : + Tendsto (fun i ↦ (As i).indicator (fun (_ : α) ↦ b) x) L (𝓝 (A.indicator (fun (_ : α) ↦ b) x)) + ↔ ∀ᶠ i in L, (x ∈ As i ↔ x ∈ A) := by + apply tendsto_indicator_const_apply_iff_eventually' _ b + · simp only [compl_singleton_mem_nhds_iff, ne_eq, NeZero.ne, not_false_eq_true] + · simp only [compl_singleton_mem_nhds_iff, ne_eq, (NeZero.ne b).symm, not_false_eq_true] + +/-- The indicator functions of `Asᵢ` tend to the indicator function of `A` pointwise if and only if +for every `x`, we eventually have the equivalence `x ∈ Asᵢ ↔ x ∈ A`. -/ +@[simp] lemma tendsto_indicator_const_iff_forall_eventually [T1Space β] (b : β) [NeZero b] : + Tendsto (fun i ↦ (As i).indicator (fun (_ : α) ↦ b)) L (𝓝 (A.indicator (fun (_ : α) ↦ b))) + ↔ ∀ x, ∀ᶠ i in L, (x ∈ As i ↔ x ∈ A) := by + apply tendsto_indicator_const_iff_forall_eventually' _ b + · simp only [compl_singleton_mem_nhds_iff, ne_eq, NeZero.ne, not_false_eq_true] + · simp only [compl_singleton_mem_nhds_iff, ne_eq, (NeZero.ne b).symm, not_false_eq_true] + +lemma tendsto_indicator_const_iff_tendsto_pi_pure' + (b : β) (nhd_b : {0}ᶜ ∈ 𝓝 b) (nhd_o : {b}ᶜ ∈ 𝓝 0) : + Tendsto (fun i ↦ (As i).indicator (fun (_ : α) ↦ b)) L (𝓝 (A.indicator (fun (_ : α) ↦ b))) + ↔ (Tendsto As L <| Filter.pi (pure <| · ∈ A)) := by + rw [tendsto_indicator_const_iff_forall_eventually' _ b nhd_b nhd_o, tendsto_pi] + simp_rw [tendsto_pure] + aesop + +lemma tendsto_indicator_const_iff_tendsto_pi_pure [T1Space β] (b : β) [NeZero b] : + Tendsto (fun i ↦ (As i).indicator (fun (_ : α) ↦ b)) L (𝓝 (A.indicator (fun (_ : α) ↦ b))) + ↔ (Tendsto As L <| Filter.pi (pure <| · ∈ A)) := by + rw [tendsto_indicator_const_iff_forall_eventually _ b, tendsto_pi] + simp_rw [tendsto_pure] + aesop diff --git a/Mathlib/Topology/Sheaves/Functors.lean b/Mathlib/Topology/Sheaves/Functors.lean index e645a20e36778..141ce2b5d81b7 100644 --- a/Mathlib/Topology/Sheaves/Functors.lean +++ b/Mathlib/Topology/Sheaves/Functors.lean @@ -4,7 +4,7 @@ Released under Apache 2.0 license as described in the file LICENSE. Authors: Junyan Xu, Andrew Yang -/ import Mathlib.Topology.Sheaves.SheafCondition.Sites -import Mathlib.CategoryTheory.Sites.Pushforward +import Mathlib.CategoryTheory.Sites.Pullback #align_import topology.sheaves.functors from "leanprover-community/mathlib"@"85d6221d32c37e68f05b2e42cde6cee658dae5e9" @@ -51,8 +51,7 @@ open Presheaf /-- The pushforward of a sheaf (by a continuous map) is a sheaf. -/ theorem pushforward_sheaf_of_sheaf {F : X.Presheaf C} (h : F.IsSheaf) : (f _* F).IsSheaf := - pullback_isSheaf_of_coverPreserving (compatiblePreserving_opens_map f) - (coverPreserving_opens_map f) ⟨F, h⟩ + (Opens.map f).op_comp_isSheaf _ _ ⟨_, h⟩ set_option linter.uppercaseLean3 false in #align Top.sheaf.pushforward_sheaf_of_sheaf TopCat.Sheaf.pushforward_sheaf_of_sheaf @@ -61,7 +60,7 @@ variable (C) /-- The pushforward functor. -/ def pushforward (f : X ⟶ Y) : X.Sheaf C ⥤ Y.Sheaf C := - Sites.pullback _ (compatiblePreserving_opens_map f) (coverPreserving_opens_map f) + (Opens.map f).sheafPushforwardContinuous _ _ _ set_option linter.uppercaseLean3 false in #align Top.sheaf.pushforward TopCat.Sheaf.pushforward @@ -88,10 +87,10 @@ variable [PreservesFilteredColimits (CategoryTheory.forget A)] variable [ReflectsIsomorphisms (CategoryTheory.forget A)] /-- -The pushforward functor. +The pullback functor. -/ def pullback (f : X ⟶ Y) : Y.Sheaf A ⥤ X.Sheaf A := -Sites.pushforward A _ _ (Opens.map f) + (Opens.map f).sheafPullback _ _ _ lemma pullback_eq (f : X ⟶ Y) : pullback A f = forget A Y ⋙ Presheaf.pullback A f ⋙ presheafToSheaf _ _ := rfl @@ -106,7 +105,7 @@ def pullbackIso (f : X ⟶ Y) : /-- The adjunction between pullback and pushforward for sheaves on topological spaces. -/ def pullbackPushforwardAdjunction (f : X ⟶ Y) : pullback A f ⊣ pushforward A f := -Sites.pullbackPushforwardAdjunction _ _ _ _ _ + (Opens.map f).sheafAdjunctionContinuous _ _ _ instance : IsLeftAdjoint (pullback A f) := ⟨_, pullbackPushforwardAdjunction A f⟩ instance : IsRightAdjoint (pushforward A f) := ⟨_, pullbackPushforwardAdjunction A f⟩ diff --git a/Mathlib/Topology/Sheaves/SheafCondition/Sites.lean b/Mathlib/Topology/Sheaves/SheafCondition/Sites.lean index 01b917a271eff..e25d265d83689 100644 --- a/Mathlib/Topology/Sheaves/SheafCondition/Sites.lean +++ b/Mathlib/Topology/Sheaves/SheafCondition/Sites.lean @@ -18,8 +18,8 @@ topology, in preparation of connecting the sheaf condition on sites to the vario on spaces. We also specialize results about sheaves on sites to sheaves on spaces; we show that the inclusion -functor from a topological basis to `TopologicalSpace.Opens` is `CoverDense`, that open maps -induce `CoverPreserving` functors, and that open embeddings induce `CompatiblePreserving` functors. +functor from a topological basis to `TopologicalSpace.Opens` is cover dense, that open maps +induce cover preserving functors, and that open embeddings induce continuous functors. -/ @@ -135,7 +135,7 @@ namespace TopCat.Opens variable {X : TopCat} {ι : Type*} theorem coverDense_iff_isBasis [Category ι] (B : ι ⥤ Opens X) : - CoverDense (Opens.grothendieckTopology X) B ↔ Opens.IsBasis (Set.range B.obj) := by + B.IsCoverDense (Opens.grothendieckTopology X) ↔ Opens.IsBasis (Set.range B.obj) := by rw [Opens.isBasis_iff_nbhd] constructor; intro hd U x hx; rcases hd.1 U x hx with ⟨V, f, ⟨i, f₁, f₂, _⟩, hV⟩ exact ⟨B.obj i, ⟨i, rfl⟩, f₁.le hV, f₂.le⟩ @@ -144,7 +144,7 @@ theorem coverDense_iff_isBasis [Category ι] (B : ι ⥤ Opens X) : #align Top.opens.cover_dense_iff_is_basis TopCat.Opens.coverDense_iff_isBasis theorem coverDense_inducedFunctor {B : ι → Opens X} (h : Opens.IsBasis (Set.range B)) : - CoverDense (Opens.grothendieckTopology X) (inducedFunctor B) := + (inducedFunctor B).IsCoverDense (Opens.grothendieckTopology X) := (coverDense_iff_isBasis _).2 h #align Top.opens.cover_dense_induced_functor TopCat.Opens.coverDense_inducedFunctor @@ -176,9 +176,18 @@ theorem IsOpenMap.coverPreserving (hf : IsOpenMap f) : exact ⟨_, hf.functor.map i, ⟨_, i, 𝟙 _, hV, rfl⟩, Set.mem_image_of_mem f hxV⟩ #align is_open_map.cover_preserving IsOpenMap.coverPreserving + +lemma OpenEmbedding.functor_isContinuous (h : OpenEmbedding f) : + h.isOpenMap.functor.IsContinuous (Opens.grothendieckTopology X) + (Opens.grothendieckTopology Y) := by + apply Functor.isContinuous_of_coverPreserving + · exact h.compatiblePreserving + · exact h.isOpenMap.coverPreserving + theorem TopCat.Presheaf.isSheaf_of_openEmbedding (h : OpenEmbedding f) (hF : F.IsSheaf) : - IsSheaf (h.isOpenMap.functor.op ⋙ F) := - pullback_isSheaf_of_coverPreserving h.compatiblePreserving h.isOpenMap.coverPreserving ⟨_, hF⟩ + IsSheaf (h.isOpenMap.functor.op ⋙ F) := by + have := h.functor_isContinuous + exact Functor.op_comp_isSheaf _ _ _ ⟨_, hF⟩ #align Top.presheaf.is_sheaf_of_open_embedding TopCat.Presheaf.isSheaf_of_openEmbedding variable (f) @@ -206,6 +215,12 @@ theorem coverPreserving_opens_map : CoverPreserving (Opens.grothendieckTopology obtain ⟨V, i, hi, hxV⟩ := hS (f x) hx exact ⟨_, (Opens.map f).map i, ⟨_, _, 𝟙 _, hi, Subsingleton.elim _ _⟩, hxV⟩ +instance : (Opens.map f).IsContinuous (Opens.grothendieckTopology Y) + (Opens.grothendieckTopology X) := by + apply Functor.isContinuous_of_coverPreserving + · exact compatiblePreserving_opens_map f + · exact coverPreserving_opens_map f + end OpenEmbedding namespace TopCat.Sheaf @@ -233,8 +248,10 @@ def isTerminalOfEqEmpty (F : X.Sheaf C) {U : Opens X} (h : U = ⊥) : is a sheaf on `X`, then a homomorphism between a presheaf `F` on `X` and `F'` is equivalent to a homomorphism between their restrictions to the indexing type `ι` of `B`, with the induced category structure on `ι`. -/ -def restrictHomEquivHom : ((inducedFunctor B).op ⋙ F ⟶ (inducedFunctor B).op ⋙ F'.1) ≃ (F ⟶ F'.1) := - @CoverDense.restrictHomEquivHom _ _ _ _ _ _ _ _ (Opens.coverDense_inducedFunctor h) _ F F' +def restrictHomEquivHom : + ((inducedFunctor B).op ⋙ F ⟶ (inducedFunctor B).op ⋙ F'.1) ≃ (F ⟶ F'.1) := + @Functor.IsCoverDense.restrictHomEquivHom _ _ _ _ _ _ _ _ + (Opens.coverDense_inducedFunctor h) _ F F' #align Top.sheaf.restrict_hom_equiv_hom TopCat.Sheaf.restrictHomEquivHom @[simp] diff --git a/Mathlib/Topology/Sheaves/Stalks.lean b/Mathlib/Topology/Sheaves/Stalks.lean index 7a514af1e6de1..0eb22f953cd99 100644 --- a/Mathlib/Topology/Sheaves/Stalks.lean +++ b/Mathlib/Topology/Sheaves/Stalks.lean @@ -12,7 +12,7 @@ import Mathlib.CategoryTheory.Limits.Preserves.Filtered import Mathlib.CategoryTheory.Limits.Final import Mathlib.Tactic.CategoryTheory.Elementwise import Mathlib.Algebra.Category.Ring.Colimits -import Mathlib.CategoryTheory.Sites.Pushforward +import Mathlib.CategoryTheory.Sites.Pullback #align_import topology.sheaves.stalks from "leanprover-community/mathlib"@"5dc6092d09e5e489106865241986f7f2ad28d4c8" diff --git a/README.md b/README.md index c886bb2a7ed12..d4b82879b00cd 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ ![](https://github.com/leanprover-community/mathlib4/workflows/continuous%20integration/badge.svg?branch=master) ![GitHub CI](https://github.com/leanprover-community/mathlib4/workflows/continuous%20integration/badge.svg?branch=master) -[![Bors enabled](https://bors.tech/images/badge_small.svg)](https://app.bors.tech/repositories/37904) +[![Bors enabled](https://bors.tech/images/badge_small.svg)](https://mathlib-bors-ca18eefec4cb.herokuapp.com/repositories/16) [![project chat](https://img.shields.io/badge/zulip-join_chat-brightgreen.svg)](https://leanprover.zulipchat.com) This is a complete port of [mathlib](https://github.com/leanprover-community/mathlib) to [Lean 4](https://leanprover.github.io/). diff --git a/lake-manifest.json b/lake-manifest.json index b4dd94dd580ac..8e621831575e1 100644 --- a/lake-manifest.json +++ b/lake-manifest.json @@ -4,10 +4,10 @@ [{"url": "https://github.com/leanprover/std4", "type": "git", "subDir": null, - "rev": "e403f680f0beb8610c29e6f799132e8be880554e", + "rev": "9dc16dc350f87252cfaf873774867fbe07d46c0b", "name": "std", "manifestFile": "lake-manifest.json", - "inputRev": "main", + "inputRev": "nightly-testing", "inherited": false, "configFile": "lakefile.lean"}, {"url": "https://github.com/leanprover-community/quote4", @@ -22,7 +22,7 @@ {"url": "https://github.com/leanprover-community/aesop", "type": "git", "subDir": null, - "rev": "bf5ab42a58e71de7ebad399ce3f90d29aae7fca9", + "rev": "c7cff4551258d31c0d2d453b3f9cbca757d445f1", "name": "aesop", "manifestFile": "lake-manifest.json", "inputRev": "master", diff --git a/lakefile.lean b/lakefile.lean index fce6ed5dfc069..9c83f7364ac35 100644 --- a/lakefile.lean +++ b/lakefile.lean @@ -34,7 +34,7 @@ package mathlib where meta if get_config? doc = some "on" then -- do not download and build doc-gen4 by default require «doc-gen4» from git "https://github.com/leanprover/doc-gen4" @ "main" -require std from git "https://github.com/leanprover/std4" @ "main" +require std from git "https://github.com/leanprover/std4" @ "nightly-testing" require Qq from git "https://github.com/leanprover-community/quote4" @ "master" require aesop from git "https://github.com/leanprover-community/aesop" @ "master" require proofwidgets from git "https://github.com/leanprover-community/ProofWidgets4" @ "v0.0.23" @@ -53,7 +53,8 @@ lean_lib Archive lean_lib Counterexamples lean_lib ImportGraph /-- Additional documentation in the form of modules that only contain module docstrings. -/ -lean_lib docs +lean_lib docs where + roots := #[`docs] /-! ## Executables provided by Mathlib diff --git a/scripts/bench/temci-config.run.yml b/scripts/bench/temci-config.run.yml index c2f3df87041a6..d73e75ff5569c 100644 --- a/scripts/bench/temci-config.run.yml +++ b/scripts/bench/temci-config.run.yml @@ -23,10 +23,12 @@ run_config: runner: perf_stat perf_stat: - properties: ['wall-clock', 'instructions'] + properties: ['wall-clock', 'task-clock', 'instructions'] rusage_properties: ['maxrss'] cmd: | LEAN_PATH=$(lake print-paths --no-build Mathlib | jq -r '.oleanPath | join(":")') lean Mathlib.lean + discarded_runs: 1 + min_runs: 5 - attributes: description: size run_config: diff --git a/test/ProdAssoc.lean b/test/ProdAssoc.lean new file mode 100644 index 0000000000000..7802ba1458acd --- /dev/null +++ b/test/ProdAssoc.lean @@ -0,0 +1,27 @@ +import Mathlib.Tactic.ProdAssoc + +variable {α β γ δ : Type*} + +example : (α × β) × (γ × δ) ≃ α × (β × γ) × δ := by + have := (prod_assoc% : (α × β) × (γ × δ) ≃ α × (β × γ) × δ) + exact this + +example : (α × β) × (γ × δ) ≃ α × (β × γ) × δ := prod_assoc% + +example : (α × β) × (γ × δ) ≃ α × (β × γ) × δ := + (prod_assoc% : _ ≃ α × β × γ × δ).trans prod_assoc% + +example {α β γ δ : Type*} (x : (α × β) × (γ × δ)) : α × (β × γ) × δ := prod_assoc% x + +example : Nat ≃ Nat := prod_assoc% + +example (x : α) (y : β) (z : γ) (w : δ) : + prod_assoc% ((x,y),(z,w)) = (x,y,z,w) := + rfl + +example (x : α) (y : β) (z : γ) (w : δ) : + prod_assoc% ((x,y),(z,w)) = (x,(y,z),w) := + rfl + +example : (α × β) × (γ × δ) → α × (β × γ) × δ := + prod_assoc% diff --git a/test/SimpRw.lean b/test/SimpRw.lean index bbf3c7765a4b8..04f080fbb1ad6 100644 --- a/test/SimpRw.lean +++ b/test/SimpRw.lean @@ -5,6 +5,9 @@ Authors: Anne Baanen, Mario Carneiro -/ import Mathlib.Tactic.SimpRw +import Std.Tactic.GuardExpr + +private axiom test_sorry : ∀ {α}, α -- `simp_rw` can perform rewrites under binders: example : (λ (x y : Nat) => x + y) = (λ x y => y + x) := by simp_rw [Nat.add_comm] @@ -31,3 +34,11 @@ example {a : Nat} (h2 : ∀ a b : Nat, a ≤ b ↔ ∀ c, c < a → c < b) : (∀ b, a - 1 ≤ b) = ∀ b c : Nat, c < a → c < b + 1 := by simp_rw [h1, h2] + +-- `simp_rw` respects config options +example : 1 = 2 := by + let a := 2 + show 1 = a + simp_rw (config := {zeta := false}) [] + guard_target =ₛ 1 = a + exact test_sorry diff --git a/test/apply_fun.lean b/test/apply_fun.lean index 5b741700653d7..8c2b8c575ebd3 100644 --- a/test/apply_fun.lean +++ b/test/apply_fun.lean @@ -263,6 +263,7 @@ example : 1 = 1 := by def funFamily (_i : ℕ) : Bool → Bool := id -- `apply_fun` should not silence errors in `assumption` +set_option linter.unreachableTactic false in /-- error: maximum recursion depth has been reached (use `set_option maxRecDepth ` to increase limit) -/ diff --git a/test/convert2.lean b/test/convert2.lean index b70bada2d9358..3c069eb278e71 100644 --- a/test/convert2.lean +++ b/test/convert2.lean @@ -1,6 +1,8 @@ import Mathlib.Data.List.Defs import Mathlib.Data.Nat.Basic +set_option linter.unreachableTactic false + -- Prior to #7945 this failed with `(kernel) declaration has metavariables '_example'`. /-- error: maximum recursion depth has been reached (use `set_option maxRecDepth ` to increase limit) diff --git a/test/delabLinearIndependent.lean b/test/delabLinearIndependent.lean new file mode 100644 index 0000000000000..05ec457919f65 --- /dev/null +++ b/test/delabLinearIndependent.lean @@ -0,0 +1,21 @@ +import Mathlib.LinearAlgebra.LinearIndependent + +set_option pp.unicode.fun true + +variable {K V : Type*} [DivisionRing K] [AddCommGroup V] [Module K V] {s : Set V} {x : V} + +variable (h : LinearIndependent K (fun b => b : s → V)) in +/-- info: h : LinearIndependent K fun (b : ↑s) ↦ ↑b -/ +#guard_msgs in #check h + +variable (h : LinearIndependent K (Subtype.val : s → V)) in +/-- info: h : LinearIndependent (ι := { x // x ∈ s }) K Subtype.val -/ +#guard_msgs in #check h + +variable (h : LinearIndependent K (by exact Subtype.val : s → V)) in +/-- info: h : LinearIndependent (ι := ↑s) K Subtype.val -/ +#guard_msgs in #check h + +variable (h : LinearIndependent K (fun b => (fun b => b : s → V) b)) in +/-- info: h : LinearIndependent K fun (b : ↑s) ↦ (fun b ↦ ↑b) b -/ +#guard_msgs in #check h diff --git a/test/fail_if_no_progress.lean b/test/fail_if_no_progress.lean index f5081e6a190a2..3bebdc6d3e541 100644 --- a/test/fail_if_no_progress.lean +++ b/test/fail_if_no_progress.lean @@ -1,4 +1,10 @@ import Mathlib.Tactic.FailIfNoProgress +import Mathlib.Tactic.Basic +import Mathlib.Tactic.RunCmd +import Std.Tactic.GuardMsgs + +set_option linter.unusedVariables false +set_option pp.unicode.fun true section success @@ -7,19 +13,90 @@ example (h : 1 = 1) : True := by fail_if_no_progress simp at h trivial +example : let x := 1; x = x := by + intro x + fail_if_no_progress clear_value x + rfl + +-- New fvarids. This is not easily avoided without remapping fvarids manually. +example : let x := 1; x = x := by + intro x + fail_if_no_progress + revert x + intro x + rfl + +open Lean Elab Tactic in +example : let x := id 0; x = x := by + intro x + fail_if_no_progress + -- Reduce the value of `x` to `Nat.zero` + run_tac do + let g ← getMainGoal + let decl ← g.getDecl + let some d := decl.lctx.findFromUserName? `x | throwError "no x" + let lctx := decl.lctx.modifyLocalDecl d.fvarId fun d => + d.setValue (.const ``Nat.zero []) + let g' ← Meta.mkFreshExprMVarAt lctx decl.localInstances decl.type + g.assign g' + replaceMainGoal [g'.mvarId!] + guard_hyp x : Nat :=ₛ Nat.zero + rfl + end success section failure +/-- +error: no progress made on +x : Bool +h : x = true +⊢ x = true +-/ +#guard_msgs in example (x : Bool) (h : x = true) : x = true := by - fail_if_success - fail_if_no_progress simp - exact h + fail_if_no_progress skip +/-- +error: no progress made on +x : Bool +h : x = true +⊢ x = true +-/ +#guard_msgs in +example (x : Bool) (h : x = true) : x = true := by + fail_if_no_progress simp (config := {failIfUnchanged := false}) +/-- +error: no progress made on +x : Bool +h : x = true +⊢ True +-/ +#guard_msgs in example (x : Bool) (h : x = true) : True := by - fail_if_success - fail_if_no_progress simp at h - trivial + fail_if_no_progress simp (config := {failIfUnchanged := false}) at h + +/-- +error: no progress made on +x : Nat := (fun x ↦ x) Nat.zero +⊢ x = x +-/ +#guard_msgs in +open Lean Elab Tactic in +example : let x := (fun x => x) Nat.zero; x = x := by + intro x + fail_if_no_progress + -- Reduce the value of `x` to `Nat.zero` + run_tac do + let g ← getMainGoal + let decl ← g.getDecl + let some d := decl.lctx.findFromUserName? `x | throwError "no x" + let lctx := decl.lctx.modifyLocalDecl d.fvarId fun d => + d.setValue (.const ``Nat.zero []) + let g' ← Meta.mkFreshExprMVarAt lctx decl.localInstances decl.type + g.assign g' + replaceMainGoal [g'.mvarId!] + guard_hyp x : Nat :=ₛ Nat.zero end failure diff --git a/test/linarith.lean b/test/linarith.lean index 55d44f5b844a9..96d994e6c21f2 100644 --- a/test/linarith.lean +++ b/test/linarith.lean @@ -487,6 +487,13 @@ example (a b : Nat) (h1 : a < b + 1) (h2 : ¬ a = b) : a < b := by end +-- Checks that splitNe handles metavariables and also that conjunction splitting occurs +-- before splitNe splitting +example (r : ℚ) (h' : 1 = r * 2) : 1 = 0 ∨ r = 1 / 2 := by + by_contra h'' + push_neg at h'' + linarith (config := {splitNe := true}) + example (x y : ℚ) (h₁ : 0 ≤ y) (h₂ : y ≤ x) : y * x ≤ x * x := by nlinarith example (x y : ℚ) (h₁ : 0 ≤ y) (h₂ : y ≤ x) : y * x ≤ x ^ 2 := by nlinarith @@ -544,3 +551,39 @@ example (k : ℤ) (h : k < 1) (h₁ : -1 < k) : k = 0 := by -- linarith preprocessor to fail. change _ at h₁ linarith + +/-- error: unknown identifier 'garbage' -/ +#guard_msgs in +example (q : Prop) (p : ∀ (x : ℤ), q → 1 = 2) : 1 = 2 := by + linarith [p _ garbage] + +/-- error: unknown identifier 'garbage' -/ +#guard_msgs in +example (q : Prop) (p : ∀ (x : ℤ), q → 1 = 2) : 1 = 2 := by + nlinarith [p _ garbage] + +-- Commented out for now since `#guard_msgs` prints the metavariable numbers, which are +-- subject to change. +-- /-- +-- error: don't know how to synthesize placeholder for argument 'x' +-- ... +-- -/ +-- #guard_msgs in +-- example (q : Prop) (p : ∀ (x : ℤ), 1 = 2) : 1 = 2 := by +-- linarith [p _] + +/-- +error: Argument passed to linarith has metavariables: + p ?a +-/ +#guard_msgs in +example (q : Prop) (p : ∀ (x : ℤ), 1 = 2) : 1 = 2 := by + linarith [p ?a] + +/-- +error: Argument passed to nlinarith has metavariables: + p ?a +-/ +#guard_msgs in +example (q : Prop) (p : ∀ (x : ℤ), 1 = 2) : 1 = 2 := by + nlinarith [p ?a] diff --git a/test/norm_cast.lean b/test/norm_cast.lean index 394633d3fde8d..13eed7f0eadb1 100644 --- a/test/norm_cast.lean +++ b/test/norm_cast.lean @@ -140,7 +140,7 @@ namespace ennreal end ennreal -lemma b (h g : true) : true ∧ true := by +lemma b (_h g : true) : true ∧ true := by constructor assumption_mod_cast assumption_mod_cast diff --git a/test/norm_num.lean b/test/norm_num.lean index fb7a97fbb5fdb..f937813327eff 100644 --- a/test/norm_num.lean +++ b/test/norm_num.lean @@ -335,11 +335,11 @@ example : (1:ℂ) / 3 ≠ 2 / 7 := by norm_num1 example : (1:ℝ) ≠ 2 := by norm_num1 --- example : (5 / 2:ℕ) = 2 := by norm_num1 --- example : (5 / -2:ℤ) < -1 := by norm_num1 --- example : (0 + 1) / 2 < 0 + 1 := by norm_num1 +example : (5 / 2:ℕ) = 2 := by norm_num1 +example : (5 / -2:ℤ) < -1 := by norm_num1 +example : (0 + 1) / 2 < 0 + 1 := by norm_num1 example : Nat.succ (Nat.succ (2 ^ 3)) = 10 := by norm_num1 --- example : 10 = (-1 : ℤ) % 11 := by norm_num1 -- [fixme] ⊢ False ??? +example : 10 = (-1 : ℤ) % 11 := by norm_num1 example : (12321 - 2 : ℤ) = 12319 := by norm_num1 example : (63:ℚ) ≥ 5 := by norm_num1 @@ -363,7 +363,7 @@ example (a : ℚ) (h : 3⁻¹ * a = a) : True := by guard_hyp h : 1 / 3 * a = a trivial --- example (h : (5 : ℤ) ∣ 2) : False := by norm_num1 at h +example (h : (5 : ℤ) ∣ 2) : False := by norm_num1 at h example (h : False) : False := by norm_num1 at h example : True := by norm_num1 -- example : True ∧ True := by norm_num1 @@ -524,11 +524,11 @@ end Transparency -- user command /-- info: True -/ -#guard_msgs in -#norm_num 1 = 1 +#guard_msgs in #norm_num 1 = 1 example : 1 = 1 := by norm_num1 --- #norm_num 2^4-1 ∣ 2^16-1 --- example : 2^4-1 ∣ 2^16-1 := by norm_num1 +/-- info: True -/ +#guard_msgs in #norm_num 2^4-1 ∣ 2^16-1 +example : 2^4-1 ∣ 2^16-1 := by norm_num1 -- #norm_num (3 : Real) ^ (-2 : ℤ) = 1/9 -- example : (3 : Real) ^ (-2 : ℤ) = 1/9 := by norm_num1 diff --git a/test/norm_num_ext.lean b/test/norm_num_ext.lean index bf31496c7dfe6..5221f544dad65 100644 --- a/test/norm_num_ext.lean +++ b/test/norm_num_ext.lean @@ -6,7 +6,7 @@ Authors: Mario Carneiro import Mathlib.Tactic.NormNum.BigOperators import Mathlib.Tactic.NormNum.GCD import Mathlib.Tactic.NormNum.IsCoprime -import Mathlib.Tactic.NormNum.Mod +import Mathlib.Tactic.NormNum.DivMod import Mathlib.Tactic.NormNum.NatFib import Mathlib.Tactic.NormNum.NatSqrt import Mathlib.Tactic.NormNum.Prime @@ -436,4 +436,14 @@ example : (-3 : ℤ) % 4 = 1 := by norm_num1 example : (3 : ℤ) % -4 = 3 := by norm_num1 example : 3 + (42 : ℤ) % 5 = 5 := by norm_num1 +example : 2 ∣ 4 := by norm_num1 +example : ¬ 2 ∣ 5 := by norm_num1 +example : 553105253 ∣ 553105253 * 776531401 := by norm_num1 +example : ¬ 553105253 ∣ 553105253 * 776531401 + 1 := by norm_num1 + +example : (2 : ℤ) ∣ 4 := by norm_num1 +example : ¬ (2 : ℤ) ∣ 5 := by norm_num1 +example : (553105253 : ℤ) ∣ 553105253 * 776531401 := by norm_num1 +example : ¬ (553105253 : ℤ) ∣ 553105253 * 776531401 + 1 := by norm_num1 + end mod