Initial commit

This commit is contained in:
2026-03-10 11:17:41 +09:00
commit 7d744693f6
53 changed files with 26009 additions and 0 deletions

13
.devcontainer/.zshrc Executable file
View File

@@ -0,0 +1,13 @@
autoload -U colors && colors
precmd() {
drawline=""
for i in {1..$COLUMNS}; drawline=" $drawline"
drawline="%U${drawline}%u"
PS1="%F{252}${drawline}
%B%F{124}%n:%~>%b%f "
}
eval $(opam env)
alias ls="ls --color"

58
.devcontainer/Dockerfile Normal file
View File

@@ -0,0 +1,58 @@
FROM ubuntu:20.04
## BEGIN: RUNS AS ROOT
# Create a user
ARG USERNAME=cis5000
ARG USER_UID=1000
ARG USER_GID=$USER_UID
RUN apt-get update -y
RUN groupadd --gid $USER_GID $USERNAME \
&& useradd --uid $USER_UID --gid $USER_GID -m $USERNAME --shell /bin/zsh \
#
# [Optional] Add sudo support. Omit if you don't need to install software after connecting.
&& apt-get install -y sudo \
&& echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \
&& chmod 0440 /etc/sudoers.d/$USERNAME
## Hack needs root permissions
# See hack.sh
COPY hack.sh /tmp/hack.sh
RUN chmod +x /tmp/hack.sh
RUN /tmp/hack.sh
RUN apt-get install -y build-essential
RUN apt-get install -y linux-libc-dev
RUN apt-get install -y m4
RUN apt-get install -y opam
RUN apt-get install -y time
RUN apt-get install -y zip
RUN apt-get install -y zsh
RUN apt-get install -y libgmp3-dev
RUN DEBIAN_FRONTEND=noninteractive apt-get install -y pkg-config
## Set up user environmnent
COPY .zshrc /home/$USERNAME/
## Run in usermode
# [Optional] Set the default user. Omit if you want to keep the default as root.
USER $USERNAME
# Configure opam/ocaml
RUN opam init -y --disable-sandboxing --compiler=5.3.0
RUN opam switch 5.3.0
RUN opam install -y num
RUN opam repo add -y coq-released https://coq.inria.fr/opam/released
RUN opam pin add -y coq 9.0.0
RUN opam install -y coq-simple-io
RUN opam install -y vscoq-language-server
RUN opam update -y
RUN opam upgrade -y
RUN eval `opam config env`

View File

@@ -0,0 +1,49 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
// README at: https://github.com/devcontainers/templates/tree/main/src/ubuntu
{
"name": "Ubuntu",
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
"build": {
"dockerfile": "Dockerfile"
},
// Features to add to the dev container. More info: https://containers.dev/features.
// "features": {},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Use 'postCreateCommand' to run commands after the container is created.
// "postCreateCommand": "uname -a",
// Configure tool-specific properties.
"customizations": {
"vscode": {
"extensions": [
"maximedenes.vscoq"
],
"settings": {
"coqtop.binPath" : "/home/cis5000/.opam/4.14.0/bin",
"files.exclude": {
"**/*.aux": true,
"**/*.glob": true,
"**/*.vo": true,
"**/*.vos": true,
"**/*.vok": true,
"**/*.html": true,
"**/.*.report": true,
"**/.*.cache": true
},
"coq.loadCoqProject": true,
"coq.coqProjectRoot": ".",
"[coq]": {
"editor.tabSize": 2,
"editor.insertSpaces": true
}
}
}
}
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
// "remoteUser": "root"
}

17
.devcontainer/hack.sh Normal file
View File

@@ -0,0 +1,17 @@
#!/usr/bin/env bash
### HACK - workaround ubuntu libc6 version number bug see: https://forum.odroid.com/viewtopic.php?p=344373
mv /bin/uname /bin/uname.orig
tee -a /bin/uname <<EOF
#!/bin/bash
if [[ \$1 == "-r" ]]; then
echo '4.9.250';
exit
else
uname.orig \$1
fi
EOF
chmod 755 /bin/uname
### END HACK

1
.envrc Normal file
View File

@@ -0,0 +1 @@
use nix

12
.gitignore vendored Normal file
View File

@@ -0,0 +1,12 @@
# Rocq biproducts
*.aux
*.cache
*.glob
*.ml
*.mli
*.vo
*.vok
*.vos
.Makefile.coq.*
Makefile.coq
Makefile.coq.*

1828
AltAuto.v Normal file

File diff suppressed because it is too large Load Diff

274
AltAutoTest.v Normal file
View File

@@ -0,0 +1,274 @@
Set Warnings "-notation-overridden,-parsing".
From Stdlib Require Export String.
From LF Require Import AltAuto.
Parameter MISSING: Type.
Module Check.
Ltac check_type A B :=
match type of A with
| context[MISSING] => idtac "Missing:" A
| ?T => first [unify T B; idtac "Type: ok" | idtac "Type: wrong - should be (" B ")"]
end.
Ltac print_manual_grade A :=
match eval compute in A with
| Some (_ ?S ?C) =>
idtac "Score:" S;
match eval compute in C with
| ""%string => idtac "Comment: None"
| _ => idtac "Comment:" C
end
| None =>
idtac "Score: Ungraded";
idtac "Comment: None"
end.
End Check.
From LF Require Import AltAuto.
Import Check.
Goal True.
idtac "------------------- try_sequence --------------------".
idtac " ".
idtac "#> andb_eq_orb".
idtac "Possible points: 1".
check_type @andb_eq_orb (
(forall (b c : Basics.bool)
(_ : @eq Basics.bool (Basics.andb b c) (Basics.orb b c)),
@eq Basics.bool b c)).
idtac "Assumptions:".
Abort.
Print Assumptions andb_eq_orb.
Goal True.
idtac " ".
idtac "#> add_assoc".
idtac "Possible points: 1".
check_type @add_assoc (
(forall n m p : nat,
@eq nat (Nat.add n (Nat.add m p)) (Nat.add (Nat.add n m) p))).
idtac "Assumptions:".
Abort.
Print Assumptions add_assoc.
Goal True.
idtac " ".
idtac "#> nonzeros_app".
idtac "Possible points: 1".
check_type @nonzeros_app (
(forall lst1 lst2 : Poly.list nat,
@eq (Poly.list nat) (nonzeros (@Poly.app nat lst1 lst2))
(@Poly.app nat (nonzeros lst1) (nonzeros lst2)))).
idtac "Assumptions:".
Abort.
Print Assumptions nonzeros_app.
Goal True.
idtac " ".
idtac "------------------- notry_sequence --------------------".
idtac " ".
idtac "#> add_assoc'".
idtac "Possible points: 1".
check_type @add_assoc' (
(forall n m p : nat,
@eq nat (Nat.add n (Nat.add m p)) (Nat.add (Nat.add n m) p))).
idtac "Assumptions:".
Abort.
Print Assumptions add_assoc'.
Goal True.
idtac " ".
idtac "------------------- ev100 --------------------".
idtac " ".
idtac "#> ev100".
idtac "Possible points: 1".
check_type @ev100 ((IndProp.ev 100)).
idtac "Assumptions:".
Abort.
Print Assumptions ev100.
Goal True.
idtac " ".
idtac "------------------- re_opt --------------------".
idtac " ".
idtac "#> Manually graded: re_opt".
idtac "Possible points: 6".
print_manual_grade manual_grade_for_re_opt.
idtac " ".
idtac "------------------- automatic_solvers --------------------".
idtac " ".
idtac "#> plus_id_exercise_from_basics".
idtac "Possible points: 0.5".
check_type @plus_id_exercise_from_basics (
(forall (n m o : nat) (_ : @eq nat n m) (_ : @eq nat m o),
@eq nat (Nat.add n m) (Nat.add m o))).
idtac "Assumptions:".
Abort.
Print Assumptions plus_id_exercise_from_basics.
Goal True.
idtac " ".
idtac "#> add_assoc_from_induction".
idtac "Possible points: 0.5".
check_type @add_assoc_from_induction (
(forall n m p : nat,
@eq nat (Nat.add n (Nat.add m p)) (Nat.add (Nat.add n m) p))).
idtac "Assumptions:".
Abort.
Print Assumptions add_assoc_from_induction.
Goal True.
idtac " ".
idtac "#> S_injective_from_tactics".
idtac "Possible points: 0.5".
check_type @S_injective_from_tactics (
(forall (n m : nat) (_ : @eq nat (S n) (S m)), @eq nat n m)).
idtac "Assumptions:".
Abort.
Print Assumptions S_injective_from_tactics.
Goal True.
idtac " ".
idtac "#> or_distributes_over_and_from_logic".
idtac "Possible points: 0.5".
check_type @or_distributes_over_and_from_logic (
(forall P Q R : Prop, iff (or P (and Q R)) (and (or P Q) (or P R)))).
idtac "Assumptions:".
Abort.
Print Assumptions or_distributes_over_and_from_logic.
Goal True.
idtac " ".
idtac "------------------- re_opt_match_auto --------------------".
idtac " ".
idtac "#> Manually graded: re_opt_match''".
idtac "Possible points: 3".
print_manual_grade manual_grade_for_re_opt_match''.
idtac " ".
idtac "------------------- andb3_exchange --------------------".
idtac " ".
idtac "#> andb3_exchange".
idtac "Possible points: 1".
check_type @andb3_exchange (
(forall b c d : Basics.bool,
@eq Basics.bool (Basics.andb (Basics.andb b c) d)
(Basics.andb (Basics.andb b d) c))).
idtac "Assumptions:".
Abort.
Print Assumptions andb3_exchange.
Goal True.
idtac " ".
idtac "------------------- andb_true_elim2 --------------------".
idtac " ".
idtac "#> andb_true_elim2'".
idtac "Possible points: 1.5".
check_type @andb_true_elim2' (
(forall (b c : Basics.bool)
(_ : @eq Basics.bool (Basics.andb b c) Basics.true),
@eq Basics.bool c Basics.true)).
idtac "Assumptions:".
Abort.
Print Assumptions andb_true_elim2'.
Goal True.
idtac " ".
idtac "#> andb3_exchange'".
idtac "Possible points: 0.5".
check_type @andb3_exchange' (
(forall b c d : Basics.bool,
@eq Basics.bool (Basics.andb (Basics.andb b c) d)
(Basics.andb (Basics.andb b d) c))).
idtac "Assumptions:".
Abort.
Print Assumptions andb3_exchange'.
Goal True.
idtac " ".
idtac "------------------- nor_intuition --------------------".
idtac " ".
idtac "#> Manually graded: nor_intuition".
idtac "Advanced".
idtac "Possible points: 6".
print_manual_grade manual_grade_for_nor_intuition.
idtac " ".
idtac " ".
idtac "Max points - standard: 19".
idtac "Max points - advanced: 25".
idtac "".
idtac "Allowed Axioms:".
idtac "functional_extensionality".
idtac "FunctionalExtensionality.functional_extensionality_dep".
idtac "plus_le".
idtac "le_trans".
idtac "le_plus_l".
idtac "add_le_cases".
idtac "Sn_le_Sm__n_le_m".
idtac "O_le_n".
idtac "".
idtac "".
idtac "********** Summary **********".
idtac "".
idtac "Below is a summary of the automatically graded exercises that are incomplete.".
idtac "".
idtac "The output for each exercise can be any of the following:".
idtac " - 'Closed under the global context', if it is complete".
idtac " - 'MANUAL', if it is manually graded".
idtac " - A list of pending axioms, containing unproven assumptions. In this case".
idtac " the exercise is considered complete, if the axioms are all allowed.".
idtac "".
idtac "********** Standard **********".
idtac "---------- andb_eq_orb ---------".
Print Assumptions andb_eq_orb.
idtac "---------- add_assoc ---------".
Print Assumptions add_assoc.
idtac "---------- nonzeros_app ---------".
Print Assumptions nonzeros_app.
idtac "---------- add_assoc' ---------".
Print Assumptions add_assoc'.
idtac "---------- ev100 ---------".
Print Assumptions ev100.
idtac "---------- re_opt ---------".
idtac "MANUAL".
idtac "---------- plus_id_exercise_from_basics ---------".
Print Assumptions plus_id_exercise_from_basics.
idtac "---------- add_assoc_from_induction ---------".
Print Assumptions add_assoc_from_induction.
idtac "---------- S_injective_from_tactics ---------".
Print Assumptions S_injective_from_tactics.
idtac "---------- or_distributes_over_and_from_logic ---------".
Print Assumptions or_distributes_over_and_from_logic.
idtac "---------- re_opt_match'' ---------".
idtac "MANUAL".
idtac "---------- andb3_exchange ---------".
Print Assumptions andb3_exchange.
idtac "---------- andb_true_elim2' ---------".
Print Assumptions andb_true_elim2'.
idtac "---------- andb3_exchange' ---------".
Print Assumptions andb3_exchange'.
idtac "".
idtac "********** Advanced **********".
idtac "---------- nor_intuition ---------".
idtac "MANUAL".
Abort.
(* 2026-01-07 13:18 *)
(* 2026-01-07 13:18 *)

747
Auto.v Normal file
View File

@@ -0,0 +1,747 @@
(** * Auto: More Automation *)
Set Warnings "-notation-overridden,-notation-incompatible-prefix".
From Stdlib Require Import Lia.
From Stdlib Require Import Strings.String.
From LF Require Import Maps.
From LF Require Import Imp.
(** Up to now, we've used the manual part of Rocq's tactic
facilities. In this chapter, we'll learn more about some of
Rocq's powerful automation features: proof search via the [auto]
tactic, automated forward reasoning via the [Ltac] hypothesis
matching machinery, and deferred instantiation of existential
variables using [eapply] and [eauto]. Using these features
together with Ltac's scripting facilities will enable us to make
some of our proofs startlingly short! Used properly, they can
also make proofs more maintainable and robust to changes in
underlying definitions. A deeper treatment of [auto] and [eauto]
can be found in the [UseAuto] chapter in _Programming Language
Foundations_.
(There's one other major category of automation we haven't
discussed much yet, namely built-in decision procedures for
specific kinds of problems: [lia] is one example, but there are
others. This topic will be deferred for a while longer.)
Our motivating example will be the following proof, repeated with
just a few small changes from the [Imp] chapter. We will
simplify this proof in several stages. *)
Theorem ceval_deterministic: forall c st st1 st2,
st =[ c ]=> st1 ->
st =[ c ]=> st2 ->
st1 = st2.
Proof.
intros c st st1 st2 E1 E2;
generalize dependent st2;
induction E1; intros st2 E2; inversion E2; subst.
- (* E_Skip *) reflexivity.
- (* E_Asgn *) reflexivity.
- (* E_Seq *)
rewrite (IHE1_1 st'0 H1) in *.
apply IHE1_2. assumption.
(* E_IfTrue *)
- (* b evaluates to true *)
apply IHE1. assumption.
- (* b evaluates to false (contradiction) *)
rewrite H in H5. discriminate.
(* E_IfFalse *)
- (* b evaluates to true (contradiction) *)
rewrite H in H5. discriminate.
- (* b evaluates to false *)
apply IHE1. assumption.
(* E_WhileFalse *)
- (* b evaluates to false *)
reflexivity.
- (* b evaluates to true (contradiction) *)
rewrite H in H2. discriminate.
(* E_WhileTrue *)
- (* b evaluates to false (contradiction) *)
rewrite H in H4. discriminate.
- (* b evaluates to true *)
rewrite (IHE1_1 st'0 H3) in *.
apply IHE1_2. assumption. Qed.
(* ################################################################# *)
(** * The [auto] Tactic *)
(** Thus far, our proof scripts mostly apply relevant hypotheses or
lemmas by name, and only one at a time. *)
Example auto_example_1 : forall (P Q R: Prop),
(P -> Q) -> (Q -> R) -> P -> R.
Proof.
intros P Q R H1 H2 H3.
apply H2. apply H1. assumption.
Qed.
(** The [auto] tactic tries to free us from this drudgery by _searching_
for a sequence of applications that will prove the goal: *)
Example auto_example_1' : forall (P Q R: Prop),
(P -> Q) -> (Q -> R) -> P -> R.
Proof.
auto.
Qed.
(** The [auto] tactic solves goals that are solvable by any combination of
[intros] and [apply]. *)
(** Using [auto] is always "safe" in the sense that it will
never fail and will never change the proof state: either it
completely solves the current goal, or it does nothing. *)
(** Here is a larger example showing [auto]'s power: *)
Example auto_example_2 : forall P Q R S T U : Prop,
(P -> Q) ->
(P -> R) ->
(T -> R) ->
(S -> T -> U) ->
((P -> Q) -> (P -> S)) ->
T ->
P ->
U.
Proof. auto. Qed.
(** Proof search could, in principle, take an arbitrarily long time,
so there is a limit to how deep [auto] will search by default. *)
(** If [auto] is not solving our goal as expected we can use [debug auto]
to see a trace. *)
Example auto_example_3 : forall (P Q R S T U: Prop),
(P -> Q) ->
(Q -> R) ->
(R -> S) ->
(S -> T) ->
(T -> U) ->
P ->
U.
Proof.
(* When it cannot solve the goal, [auto] does nothing *)
auto.
(* Let's see where [auto] gets stuck using [debug auto] *)
debug auto.
(* Optional argument to [auto] says how deep to search
(default is 5) *)
auto 6.
Qed.
(** When searching for potential proofs of the current goal,
[auto] considers the hypotheses in the current context together
with a _hint database_ of other lemmas and constructors. Some
common lemmas about equality and logical operators are installed
in this hint database by default. *)
Example auto_example_4 : forall P Q R : Prop,
Q ->
(Q -> R) ->
P \/ (Q /\ R).
Proof. auto. Qed.
(** If we want to see which facts [auto] is using, we can use
[info_auto] instead. *)
Example auto_example_5: 2 = 2.
Proof.
info_auto.
Qed.
Example auto_example_5' : forall (P Q R S T U W: Prop),
(U -> T) ->
(W -> U) ->
(R -> S) ->
(S -> T) ->
(P -> R) ->
(U -> T) ->
P ->
T.
Proof.
intros.
info_auto.
Qed.
(** We can extend the hint database just for the purposes of one
application of [auto] by writing "[auto using ...]". *)
Lemma le_antisym : forall n m: nat, (n <= m /\ m <= n) -> n = m.
Proof. lia. Qed.
Example auto_example_6 : forall n m p q : nat,
(p = q -> (n <= m /\ m <= n)) ->
p = q ->
n = m.
Proof.
auto using le_antisym.
Qed.
(** Of course, in any given development there will probably be
some specific constructors and lemmas that are used very often in
proofs. We can add these to the global hint database by writing
Hint Resolve T : core.
at the top level, where [T] is a top-level theorem or a
constructor of an inductively defined proposition (i.e., anything
whose type is an implication). As a shorthand, we can write
Hint Constructors c : core.
to tell Rocq to do a [Hint Resolve] for _all_ of the constructors
from the inductive definition of [c].
It is also sometimes necessary to add
Hint Unfold d : core.
where [d] is a defined symbol, so that [auto] knows to unfold uses
of [d], thus enabling further possibilities for applying lemmas that
it knows about. *)
(** It is also possible to define specialized hint databases (besides
[core]) that can be activated only when needed; indeed, it is good
style to create your own hint databases instead of polluting
[core].
See the Rocq reference manual for details. *)
Hint Resolve le_antisym : core.
Example auto_example_6' : forall n m p q : nat,
(p = q -> (n <= m /\ m <= n)) ->
p = q ->
n = m.
Proof.
auto. (* picks up hint from database *)
Qed.
Definition is_fortytwo x := (x = 42).
Example auto_example_7: forall x,
(x <= 42 /\ 42 <= x) -> is_fortytwo x.
Proof.
auto. (* does nothing *)
Abort.
Hint Unfold is_fortytwo : core.
Example auto_example_7' : forall x,
(x <= 42 /\ 42 <= x) -> is_fortytwo x.
Proof.
auto. (* try also: info_auto. *)
Qed.
(** Note that the [Hint Unfold is_fortytwo] command above the
example is needed because, unlike the normal [apply] tactic, the
[simple apply] steps that are performed by [auto] do not do any
automatic unfolding. *)
(** Let's take a first pass over [ceval_deterministic], using [auto]
to simplify the proof script. *)
Theorem ceval_deterministic': forall c st st1 st2,
st =[ c ]=> st1 ->
st =[ c ]=> st2 ->
st1 = st2.
Proof.
intros c st st1 st2 E1 E2.
generalize dependent st2;
induction E1; intros st2 E2; inversion E2; subst;
auto. (* <---- here's one good place for auto *)
- (* E_Seq *)
rewrite (IHE1_1 st'0 H1) in *.
auto. (* <---- here's another *)
- (* E_IfTrue *)
rewrite H in H5. discriminate.
- (* E_IfFalse *)
rewrite H in H5. discriminate.
- (* E_WhileFalse *)
rewrite H in H2. discriminate.
- (* E_WhileTrue, with b false *)
rewrite H in H4. discriminate.
- (* E_WhileTrue, with b true *)
rewrite (IHE1_1 st'0 H3) in *.
auto. (* <---- and another *)
Qed.
(** When we are using a particular tactic many times in a proof, we
can use a variant of the [Proof] command to make that tactic into
a default within the proof. Saying [Proof with t] (where [t] is
an arbitrary tactic) allows us to use [t1...] as a shorthand for
[t1;t] within the proof. As an illustration, here is an alternate
version of the previous proof, using [Proof with auto]. *)
Theorem ceval_deterministic'_alt: forall c st st1 st2,
st =[ c ]=> st1 ->
st =[ c ]=> st2 ->
st1 = st2.
Proof with auto.
intros c st st1 st2 E1 E2;
generalize dependent st2;
induction E1;
intros st2 E2; inversion E2; subst...
- (* E_Seq *)
rewrite (IHE1_1 st'0 H1) in *...
- (* E_IfTrue *)
rewrite H in H5. discriminate.
- (* E_IfFalse *)
rewrite H in H5. discriminate.
- (* E_WhileFalse *)
rewrite H in H2. discriminate.
- (* E_WhileTrue, with b false *)
rewrite H in H4. discriminate.
- (* E_WhileTrue, with b true *)
rewrite (IHE1_1 st'0 H3) in *...
Qed.
(* ################################################################# *)
(** * Searching For Hypotheses *)
(** The proof has become simpler, but there is still an annoying
degree of repetition. Let's start by tackling the contradiction
cases. Each of them occurs in a situation where we have both
H1: beval st b = false
and
H2: beval st b = true
as hypotheses. The contradiction is evident, but demonstrating it
is a little complicated: we have to locate the two hypotheses [H1]
and [H2] and do a [rewrite] following by a [discriminate]. We'd
like to automate this process.
(In fact, Rocq has a built-in tactic [congruence] that will do the
job in this case. We'll ignore this tactic for now, in order to
demonstrate how to build forward-search tactics by hand.)
As a first step, we can abstract out the piece of script in
question by writing a little function in Ltac. *)
Ltac rwd H1 H2 := rewrite H1 in H2; discriminate.
Theorem ceval_deterministic'': forall c st st1 st2,
st =[ c ]=> st1 ->
st =[ c ]=> st2 ->
st1 = st2.
Proof.
intros c st st1 st2 E1 E2.
generalize dependent st2;
induction E1; intros st2 E2; inversion E2; subst; auto.
- (* E_Seq *)
rewrite (IHE1_1 st'0 H1) in *.
auto.
- (* E_IfTrue *)
rwd H H5. (* <----- *)
- (* E_IfFalse *)
rwd H H5. (* <----- *)
- (* E_WhileFalse *)
rwd H H2. (* <----- *)
- (* E_WhileTrue - b false *)
rwd H H4. (* <----- *)
- (* EWhileTrue - b true *)
rewrite (IHE1_1 st'0 H3) in *.
auto. Qed.
(** That's a bit better, but we really want Rocq to discover the
relevant hypotheses for us. We can do this by using the [match
goal] facility of Ltac. *)
Ltac find_rwd :=
match goal with
H1: ?E = true, H2: ?E = false |- _
=>
rwd H1 H2
end.
(** This [match goal] looks for two distinct hypotheses that
have the form of equalities, with the same arbitrary expression
[E] on the left and with conflicting boolean values on the right.
If such hypotheses are found, it binds [H1] and [H2] to their
names and applies the [rwd] tactic to [H1] and [H2].
Adding this tactic to the ones that we invoke in each case of the
induction handles all of the contradictory cases. *)
Theorem ceval_deterministic''': forall c st st1 st2,
st =[ c ]=> st1 ->
st =[ c ]=> st2 ->
st1 = st2.
Proof.
intros c st st1 st2 E1 E2.
generalize dependent st2;
induction E1; intros st2 E2; inversion E2; subst;
try find_rwd; (* <------ *)
auto.
- (* E_Seq *)
rewrite (IHE1_1 st'0 H1) in *.
auto.
- (* E_WhileTrue - b true *)
rewrite (IHE1_1 st'0 H3) in *.
auto. Qed.
(** Let's see about the remaining cases. Each of them involves
rewriting a hypothesis after feeding it with the required
condition. We can automate the task of finding the relevant
hypotheses to rewrite with. *)
Ltac find_eqn :=
match goal with
H1: forall x, ?P x -> ?L = ?R,
H2: ?P ?X
|- _
=> rewrite (H1 X H2) in *
end.
(** The pattern [forall x, ?P x -> ?L = ?R] matches any hypothesis of
the form "for all [x], _some property of [x]_ implies _some
equality_." The property of [x] is bound to the pattern variable
[P], and the left- and right-hand sides of the equality are bound
to [L] and [R]. The name of this hypothesis is bound to [H1].
Then the pattern [?P ?X] matches any hypothesis that provides
evidence that [P] holds for some concrete [X]. If both patterns
succeed, we apply the [rewrite] tactic (instantiating the
quantified [x] with [X] and providing [H2] as the required
evidence for [P X]) in all hypotheses and the goal. *)
Theorem ceval_deterministic'''': forall c st st1 st2,
st =[ c ]=> st1 ->
st =[ c ]=> st2 ->
st1 = st2.
Proof.
intros c st st1 st2 E1 E2.
generalize dependent st2;
induction E1; intros st2 E2; inversion E2; subst;
try find_rwd;
try find_eqn; (* <------- *)
auto.
Qed.
(** The big payoff in this approach is that the new proof script is
more robust in the face of changes to our language. To test this,
let's try adding a [REPEAT] command to the language. *)
Module Repeat.
Inductive com : Type :=
| CSkip
| CAsgn (x : string) (a : aexp)
| CSeq (c1 c2 : com)
| CIf (b : bexp) (c1 c2 : com)
| CWhile (b : bexp) (c : com)
| CRepeat (c : com) (b : bexp).
(** [REPEAT] behaves like [while], except that the loop guard is
checked _after_ each execution of the body, with the loop
repeating as long as the guard stays _false_. Because of this,
the body will always execute at least once. *)
Notation "'repeat' x 'until' y 'end'" :=
(CRepeat x y)
(in custom com at level 0,
x at level 99, y at level 99).
Notation "'skip'" :=
CSkip (in custom com at level 0).
Notation "x := y" :=
(CAsgn x y)
(in custom com at level 0, x constr at level 0,
y at level 85, no associativity).
Notation "x ; y" :=
(CSeq x y)
(in custom com at level 90, right associativity).
Notation "'if' x 'then' y 'else' z 'end'" :=
(CIf x y z)
(in custom com at level 89, x at level 99,
y at level 99, z at level 99).
Notation "'while' x 'do' y 'end'" :=
(CWhile x y)
(in custom com at level 89, x at level 99, y at level 99).
Reserved Notation "st '=[' c ']=>' st'"
(at level 40, c custom com at level 99, st' constr at next level).
Inductive ceval : com -> state -> state -> Prop :=
| E_Skip : forall st,
st =[ skip ]=> st
| E_Asgn : forall st a1 n x,
aeval st a1 = n ->
st =[ x := a1 ]=> (x !-> n ; st)
| E_Seq : forall c1 c2 st st' st'',
st =[ c1 ]=> st' ->
st' =[ c2 ]=> st'' ->
st =[ c1 ; c2 ]=> st''
| E_IfTrue : forall st st' b c1 c2,
beval st b = true ->
st =[ c1 ]=> st' ->
st =[ if b then c1 else c2 end ]=> st'
| E_IfFalse : forall st st' b c1 c2,
beval st b = false ->
st =[ c2 ]=> st' ->
st =[ if b then c1 else c2 end ]=> st'
| E_WhileFalse : forall b st c,
beval st b = false ->
st =[ while b do c end ]=> st
| E_WhileTrue : forall st st' st'' b c,
beval st b = true ->
st =[ c ]=> st' ->
st' =[ while b do c end ]=> st'' ->
st =[ while b do c end ]=> st''
| E_RepeatEnd : forall st st' b c,
st =[ c ]=> st' ->
beval st' b = true ->
st =[ repeat c until b end ]=> st'
| E_RepeatLoop : forall st st' st'' b c,
st =[ c ]=> st' ->
beval st' b = false ->
st' =[ repeat c until b end ]=> st'' ->
st =[ repeat c until b end ]=> st''
where "st =[ c ]=> st'" := (ceval c st st').
(** Our first attempt at the determinacy proof does not quite succeed:
the [E_RepeatEnd] and [E_RepeatLoop] cases are not handled by our
previous automation. *)
Theorem ceval_deterministic: forall c st st1 st2,
st =[ c ]=> st1 ->
st =[ c ]=> st2 ->
st1 = st2.
Proof.
intros c st st1 st2 E1 E2.
generalize dependent st2;
induction E1;
intros st2 E2; inversion E2; subst; try find_rwd; try find_eqn; auto.
- (* E_RepeatEnd *)
+ (* b evaluates to false (contradiction) *)
find_rwd.
(* oops: why didn't [find_rwd] solve this for us already?
answer: we did things in the wrong order. *)
- (* E_RepeatLoop *)
+ (* b evaluates to true (contradiction) *)
find_rwd.
Qed.
(** Fortunately, to fix this, we just have to swap the invocations of
[find_eqn] and [find_rwd]. *)
Theorem ceval_deterministic': forall c st st1 st2,
st =[ c ]=> st1 ->
st =[ c ]=> st2 ->
st1 = st2.
Proof.
intros c st st1 st2 E1 E2.
generalize dependent st2;
induction E1;
intros st2 E2; inversion E2; subst; try find_eqn; try find_rwd; auto.
Qed.
End Repeat.
(** These examples just give a flavor of what "hyper-automation"
can achieve in Rocq. The details of [match goal] are a bit
tricky (and debugging scripts using it is, frankly, not very
pleasant). But it is well worth adding at least simple uses to
your proofs, both to avoid tedium and to "future proof" them. *)
(* ################################################################# *)
(** * The [eapply] and [eauto] tactics *)
(** To close the chapter, let's look at one more convenience
feature of Rocq: its ability to delay instantiation of
quantifiers. To motivate this feature, recall this example from
the [Imp] chapter: *)
Example ceval_example1:
empty_st =[
X := 2;
if (X <= 1)
then Y := 3
else Z := 4
end
]=> (Z !-> 4 ; X !-> 2).
Proof.
(* We supply the intermediate state [st']... *)
apply E_Seq with (X !-> 2).
- apply E_Asgn. reflexivity.
- apply E_IfFalse. reflexivity. apply E_Asgn. reflexivity.
Qed.
(** In the first step of the proof, we had to explicitly provide a
longish expression to help Rocq instantiate a "hidden" argument to
the [E_Seq] constructor. This was needed because the definition
of [E_Seq]...
E_Seq : forall c1 c2 st st' st'',
st =[ c1 ]=> st' ->
st' =[ c2 ]=> st'' ->
st =[ c1 ; c2 ]=> st''
is quantified over a variable, [st'], that does not appear in its
conclusion, so unifying its conclusion with the goal state doesn't
help Rocq find a suitable value for this variable. If we leave
out the [with], this step fails ("Error: Unable to find an
instance for the variable [st']").
What's silly about this error is that the appropriate value for
[st'] will actually become obvious in the very next step, where we
apply [E_Asgn]. If Rocq could just wait until we get to this step,
there would be no need for us to give the value explicitly. This
is exactly what the [eapply] tactic allows: *)
Example ceval'_example1:
empty_st =[
X := 2;
if (X <= 1)
then Y := 3
else Z := 4
end
]=> (Z !-> 4 ; X !-> 2).
Proof.
(* 1 *) eapply E_Seq.
- (* 2 *) apply E_Asgn.
(* 3 *) reflexivity.
- (* 4 *) apply E_IfFalse. reflexivity. apply E_Asgn. reflexivity.
Qed.
(** The [eapply H] tactic behaves just like [apply H] except
that, after it finishes unifying the goal state with the
conclusion of [H], it skips checking whether all the variables
that were introduced in the process have been given concrete
values during unification.
If you step through the proof above, you'll see that the goal
state at position [1] mentions the _existential variable_ [?st']
in both of the generated subgoals. The next step (which gets us
to position [2]) replaces [?st'] with a concrete value. This new
value contains a new existential variable [?n], which is
instantiated in its turn by the following [reflexivity] step,
position [3]. When we start working on the second subgoal
(position [4]), we observe that the occurrence of [?st'] in this
subgoal has been replaced by the value that it was given during
the first subgoal. *)
(** Several of the tactics that we've seen so far, including [exists],
[constructor], and [auto], have similar variants. The [eauto]
tactic works like [auto], except that it uses [eapply] instead of
[apply]. Tactic [info_eauto] shows us which tactics [eauto] uses
in its proof search.
Below is an example of [eauto]. Before using it, we need to give
some hints to [auto] about using the constructors of [ceval]
and the definitions of [state] and [total_map] as part of its
proof search. *)
Hint Constructors ceval : core.
Hint Transparent state total_map : core.
Example eauto_example : exists s',
(Y !-> 1 ; X !-> 2) =[
if (X <= Y)
then Z := Y - X
else Y := X + Z
end
]=> s'.
Proof. info_eauto. Qed.
(** The [eauto] tactic works just like [auto], except that it uses
[eapply] instead of [apply]; [info_eauto] shows us which facts
[eauto] uses. *)
(** Pro tip: One might think that, since [eapply] and [eauto]
are more powerful than [apply] and [auto], we should just use them
all the time. Unfortunately, they are also significantly slower
-- especially [eauto]. Rocq experts tend to use [apply] and [auto]
most of the time, only switching to the [e] variants when the
ordinary variants don't do the job. *)
(* ################################################################# *)
(** * Constraints on Existential Variables *)
(** In order for [Qed] to succeed, all existential variables need to
be determined by the end of the proof. Otherwise Rocq
will (rightly) refuse to accept the proof. Remember that the Rocq
tactics build proof objects, and proof objects containing
existential variables are not complete. *)
Lemma silly1 : forall (P : nat -> nat -> Prop) (Q : nat -> Prop),
(forall x y : nat, P x y) ->
(forall x y : nat, P x y -> Q x) ->
Q 42.
Proof.
intros P Q HP HQ. eapply HQ. apply HP. Unshelve. exact 0.
(** Rocq gives a warning after [apply HP]: "All the remaining goals
are on the shelf," means that we've finished all our top-level
proof obligations but along the way we've put some aside to be
done later, and we have not finished those. Trying to close the
proof with [Qed] would yield an error. (Try it!) *)
Abort.
(** An additional constraint is that existential variables cannot be
instantiated with terms containing ordinary variables that did not
exist at the time the existential variable was created. (The
reason for this technical restriction is that allowing such
instantiation would lead to inconsistency of Rocq's logic.) *)
Lemma silly2 :
forall (P : nat -> nat -> Prop) (Q : nat -> Prop),
(exists y, P 42 y) ->
(forall x y : nat, P x y -> Q x) ->
Q 42.
Proof.
intros P Q HP HQ. eapply HQ. destruct HP as [y HP'].
Fail apply HP'.
(** The error we get, with some details elided, is:
cannot instantiate "?y" because "y" is not in its scope
In this case there is an easy fix: doing [destruct HP] _before_
doing [eapply HQ]. *)
Abort.
Lemma silly2_fixed :
forall (P : nat -> nat -> Prop) (Q : nat -> Prop),
(exists y, P 42 y) ->
(forall x y : nat, P x y -> Q x) ->
Q 42.
Proof.
intros P Q HP HQ. destruct HP as [y HP'].
eapply HQ. apply HP'.
Qed.
(** The [apply HP'] in the last step unifies the existential variable
in the goal with the variable [y].
Note that the [assumption] tactic doesn't work in this case, since
it cannot handle existential variables. However, Rocq also
provides an [eassumption] tactic that solves the goal if one of
the premises matches the goal up to instantiations of existential
variables. We can use it instead of [apply HP'] if we like. *)
Lemma silly2_eassumption : forall (P : nat -> nat -> Prop) (Q : nat -> Prop),
(exists y, P 42 y) ->
(forall x y : nat, P x y -> Q x) ->
Q 42.
Proof.
intros P Q HP HQ. destruct HP as [y HP']. eapply HQ. eassumption.
Qed.
(** The [eauto] tactic will use [eapply] and [eassumption], streamlining
the proof even further. *)
Lemma silly2_eauto : forall (P : nat -> nat -> Prop) (Q : nat -> Prop),
(exists y, P 42 y) ->
(forall x y : nat, P x y -> Q x) ->
Q 42.
Proof.
intros P Q HP HQ. destruct HP as [y HP']. eauto.
Qed.
(* 2026-01-07 13:18 *)

68
AutoTest.v Normal file
View File

@@ -0,0 +1,68 @@
Set Warnings "-notation-overridden,-parsing".
From Stdlib Require Export String.
From LF Require Import Auto.
Parameter MISSING: Type.
Module Check.
Ltac check_type A B :=
match type of A with
| context[MISSING] => idtac "Missing:" A
| ?T => first [unify T B; idtac "Type: ok" | idtac "Type: wrong - should be (" B ")"]
end.
Ltac print_manual_grade A :=
match eval compute in A with
| Some (_ ?S ?C) =>
idtac "Score:" S;
match eval compute in C with
| ""%string => idtac "Comment: None"
| _ => idtac "Comment:" C
end
| None =>
idtac "Score: Ungraded";
idtac "Comment: None"
end.
End Check.
From LF Require Import Auto.
Import Check.
Goal True.
idtac " ".
idtac "Max points - standard: 0".
idtac "Max points - advanced: 0".
idtac "".
idtac "Allowed Axioms:".
idtac "functional_extensionality".
idtac "FunctionalExtensionality.functional_extensionality_dep".
idtac "plus_le".
idtac "le_trans".
idtac "le_plus_l".
idtac "add_le_cases".
idtac "Sn_le_Sm__n_le_m".
idtac "O_le_n".
idtac "".
idtac "".
idtac "********** Summary **********".
idtac "".
idtac "Below is a summary of the automatically graded exercises that are incomplete.".
idtac "".
idtac "The output for each exercise can be any of the following:".
idtac " - 'Closed under the global context', if it is complete".
idtac " - 'MANUAL', if it is manually graded".
idtac " - A list of pending axioms, containing unproven assumptions. In this case".
idtac " the exercise is considered complete, if the axioms are all allowed.".
idtac "".
idtac "********** Standard **********".
idtac "".
idtac "********** Advanced **********".
Abort.
(* 2026-01-07 13:18 *)
(* 2026-01-07 13:18 *)

2052
Basics.v Normal file

File diff suppressed because it is too large Load Diff

554
BasicsTest.v Normal file
View File

@@ -0,0 +1,554 @@
Set Warnings "-notation-overridden,-parsing".
From Stdlib Require Export String.
From LF Require Import Basics.
Parameter MISSING: Type.
Module Check.
Ltac check_type A B :=
match type of A with
| context[MISSING] => idtac "Missing:" A
| ?T => first [unify T B; idtac "Type: ok" | idtac "Type: wrong - should be (" B ")"]
end.
Ltac print_manual_grade A :=
match eval compute in A with
| Some (_ ?S ?C) =>
idtac "Score:" S;
match eval compute in C with
| ""%string => idtac "Comment: None"
| _ => idtac "Comment:" C
end
| None =>
idtac "Score: Ungraded";
idtac "Comment: None"
end.
End Check.
From LF Require Import Basics.
Import Check.
Goal True.
idtac "------------------- nandb --------------------".
idtac " ".
idtac "#> test_nandb4".
idtac "Possible points: 1".
check_type @test_nandb4 ((@eq bool (nandb true true) false)).
idtac "Assumptions:".
Abort.
Print Assumptions test_nandb4.
Goal True.
idtac " ".
idtac "------------------- andb3 --------------------".
idtac " ".
idtac "#> test_andb34".
idtac "Possible points: 1".
check_type @test_andb34 ((@eq bool (andb3 true true false) false)).
idtac "Assumptions:".
Abort.
Print Assumptions test_andb34.
Goal True.
idtac " ".
idtac "------------------- factorial --------------------".
idtac " ".
idtac "#> test_factorial2".
idtac "Possible points: 1".
check_type @test_factorial2 ((@eq nat (factorial 5) (Nat.mul 10 12))).
idtac "Assumptions:".
Abort.
Print Assumptions test_factorial2.
Goal True.
idtac " ".
idtac "------------------- ltb --------------------".
idtac " ".
idtac "#> test_ltb3".
idtac "Possible points: 1".
check_type @test_ltb3 ((@eq bool (ltb 4 2) false)).
idtac "Assumptions:".
Abort.
Print Assumptions test_ltb3.
Goal True.
idtac " ".
idtac "------------------- plus_id_exercise --------------------".
idtac " ".
idtac "#> plus_id_exercise".
idtac "Possible points: 1".
check_type @plus_id_exercise (
(forall (n m o : nat) (_ : @eq nat n m) (_ : @eq nat m o),
@eq nat (Nat.add n m) (Nat.add m o))).
idtac "Assumptions:".
Abort.
Print Assumptions plus_id_exercise.
Goal True.
idtac " ".
idtac "------------------- mult_n_1 --------------------".
idtac " ".
idtac "#> mult_n_1".
idtac "Possible points: 1".
check_type @mult_n_1 ((forall p : nat, @eq nat (Nat.mul p 1) p)).
idtac "Assumptions:".
Abort.
Print Assumptions mult_n_1.
Goal True.
idtac " ".
idtac "------------------- andb_true_elim2 --------------------".
idtac " ".
idtac "#> andb_true_elim2".
idtac "Possible points: 2".
check_type @andb_true_elim2 (
(forall (b c : bool) (_ : @eq bool (andb b c) true), @eq bool c true)).
idtac "Assumptions:".
Abort.
Print Assumptions andb_true_elim2.
Goal True.
idtac " ".
idtac "------------------- zero_nbeq_plus_1 --------------------".
idtac " ".
idtac "#> zero_nbeq_plus_1".
idtac "Possible points: 1".
check_type @zero_nbeq_plus_1 ((forall n : nat, @eq bool (eqb 0 (Nat.add n 1)) false)).
idtac "Assumptions:".
Abort.
Print Assumptions zero_nbeq_plus_1.
Goal True.
idtac " ".
idtac "------------------- identity_fn_applied_twice --------------------".
idtac " ".
idtac "#> identity_fn_applied_twice".
idtac "Possible points: 1".
check_type @identity_fn_applied_twice (
(forall (f : forall _ : bool, bool) (_ : forall x : bool, @eq bool (f x) x)
(b : bool),
@eq bool (f (f b)) b)).
idtac "Assumptions:".
Abort.
Print Assumptions identity_fn_applied_twice.
Goal True.
idtac " ".
idtac "------------------- negation_fn_applied_twice --------------------".
idtac " ".
idtac "#> Manually graded: negation_fn_applied_twice".
idtac "Possible points: 1".
print_manual_grade manual_grade_for_negation_fn_applied_twice.
idtac " ".
idtac "------------------- letter_comparison --------------------".
idtac " ".
idtac "#> LateDays.letter_comparison_Eq".
idtac "Possible points: 1".
check_type @LateDays.letter_comparison_Eq (
(forall l : LateDays.letter,
@eq LateDays.comparison (LateDays.letter_comparison l l) LateDays.Eq)).
idtac "Assumptions:".
Abort.
Print Assumptions LateDays.letter_comparison_Eq.
Goal True.
idtac " ".
idtac "------------------- grade_comparison --------------------".
idtac " ".
idtac "#> LateDays.test_grade_comparison1".
idtac "Possible points: 0.5".
check_type @LateDays.test_grade_comparison1 (
(@eq LateDays.comparison
(LateDays.grade_comparison (LateDays.Grade LateDays.A LateDays.Minus)
(LateDays.Grade LateDays.B LateDays.Plus))
LateDays.Gt)).
idtac "Assumptions:".
Abort.
Print Assumptions LateDays.test_grade_comparison1.
Goal True.
idtac " ".
idtac "#> LateDays.test_grade_comparison2".
idtac "Possible points: 0.5".
check_type @LateDays.test_grade_comparison2 (
(@eq LateDays.comparison
(LateDays.grade_comparison (LateDays.Grade LateDays.A LateDays.Minus)
(LateDays.Grade LateDays.A LateDays.Plus))
LateDays.Lt)).
idtac "Assumptions:".
Abort.
Print Assumptions LateDays.test_grade_comparison2.
Goal True.
idtac " ".
idtac "#> LateDays.test_grade_comparison3".
idtac "Possible points: 0.5".
check_type @LateDays.test_grade_comparison3 (
(@eq LateDays.comparison
(LateDays.grade_comparison (LateDays.Grade LateDays.F LateDays.Plus)
(LateDays.Grade LateDays.F LateDays.Plus))
LateDays.Eq)).
idtac "Assumptions:".
Abort.
Print Assumptions LateDays.test_grade_comparison3.
Goal True.
idtac " ".
idtac "#> LateDays.test_grade_comparison4".
idtac "Possible points: 0.5".
check_type @LateDays.test_grade_comparison4 (
(@eq LateDays.comparison
(LateDays.grade_comparison (LateDays.Grade LateDays.B LateDays.Minus)
(LateDays.Grade LateDays.C LateDays.Plus))
LateDays.Gt)).
idtac "Assumptions:".
Abort.
Print Assumptions LateDays.test_grade_comparison4.
Goal True.
idtac " ".
idtac "------------------- lower_letter_lowers --------------------".
idtac " ".
idtac "#> LateDays.lower_letter_lowers".
idtac "Possible points: 2".
check_type @LateDays.lower_letter_lowers (
(forall (l : LateDays.letter)
(_ : @eq LateDays.comparison (LateDays.letter_comparison LateDays.F l)
LateDays.Lt),
@eq LateDays.comparison
(LateDays.letter_comparison (LateDays.lower_letter l) l) LateDays.Lt)).
idtac "Assumptions:".
Abort.
Print Assumptions LateDays.lower_letter_lowers.
Goal True.
idtac " ".
idtac "------------------- lower_grade --------------------".
idtac " ".
idtac "#> LateDays.lower_grade_A_Plus".
idtac "Possible points: 0.25".
check_type @LateDays.lower_grade_A_Plus (
(@eq LateDays.grade
(LateDays.lower_grade (LateDays.Grade LateDays.A LateDays.Plus))
(LateDays.Grade LateDays.A LateDays.Natural))).
idtac "Assumptions:".
Abort.
Print Assumptions LateDays.lower_grade_A_Plus.
Goal True.
idtac " ".
idtac "#> LateDays.lower_grade_A_Natural".
idtac "Possible points: 0.25".
check_type @LateDays.lower_grade_A_Natural (
(@eq LateDays.grade
(LateDays.lower_grade (LateDays.Grade LateDays.A LateDays.Natural))
(LateDays.Grade LateDays.A LateDays.Minus))).
idtac "Assumptions:".
Abort.
Print Assumptions LateDays.lower_grade_A_Natural.
Goal True.
idtac " ".
idtac "#> LateDays.lower_grade_A_Minus".
idtac "Possible points: 0.25".
check_type @LateDays.lower_grade_A_Minus (
(@eq LateDays.grade
(LateDays.lower_grade (LateDays.Grade LateDays.A LateDays.Minus))
(LateDays.Grade LateDays.B LateDays.Plus))).
idtac "Assumptions:".
Abort.
Print Assumptions LateDays.lower_grade_A_Minus.
Goal True.
idtac " ".
idtac "#> LateDays.lower_grade_B_Plus".
idtac "Possible points: 0.25".
check_type @LateDays.lower_grade_B_Plus (
(@eq LateDays.grade
(LateDays.lower_grade (LateDays.Grade LateDays.B LateDays.Plus))
(LateDays.Grade LateDays.B LateDays.Natural))).
idtac "Assumptions:".
Abort.
Print Assumptions LateDays.lower_grade_B_Plus.
Goal True.
idtac " ".
idtac "#> LateDays.lower_grade_F_Natural".
idtac "Possible points: 0.25".
check_type @LateDays.lower_grade_F_Natural (
(@eq LateDays.grade
(LateDays.lower_grade (LateDays.Grade LateDays.F LateDays.Natural))
(LateDays.Grade LateDays.F LateDays.Minus))).
idtac "Assumptions:".
Abort.
Print Assumptions LateDays.lower_grade_F_Natural.
Goal True.
idtac " ".
idtac "#> LateDays.lower_grade_twice".
idtac "Possible points: 0.25".
check_type @LateDays.lower_grade_twice (
(@eq LateDays.grade
(LateDays.lower_grade
(LateDays.lower_grade (LateDays.Grade LateDays.B LateDays.Minus)))
(LateDays.Grade LateDays.C LateDays.Natural))).
idtac "Assumptions:".
Abort.
Print Assumptions LateDays.lower_grade_twice.
Goal True.
idtac " ".
idtac "#> LateDays.lower_grade_thrice".
idtac "Possible points: 0.25".
check_type @LateDays.lower_grade_thrice (
(@eq LateDays.grade
(LateDays.lower_grade
(LateDays.lower_grade
(LateDays.lower_grade (LateDays.Grade LateDays.B LateDays.Minus))))
(LateDays.Grade LateDays.C LateDays.Minus))).
idtac "Assumptions:".
Abort.
Print Assumptions LateDays.lower_grade_thrice.
Goal True.
idtac " ".
idtac "#> LateDays.lower_grade_F_Minus".
idtac "Possible points: 0.25".
check_type @LateDays.lower_grade_F_Minus (
(@eq LateDays.grade
(LateDays.lower_grade (LateDays.Grade LateDays.F LateDays.Minus))
(LateDays.Grade LateDays.F LateDays.Minus))).
idtac "Assumptions:".
Abort.
Print Assumptions LateDays.lower_grade_F_Minus.
Goal True.
idtac " ".
idtac "------------------- lower_grade_lowers --------------------".
idtac " ".
idtac "#> LateDays.lower_grade_lowers".
idtac "Possible points: 3".
check_type @LateDays.lower_grade_lowers (
(forall (g : LateDays.grade)
(_ : @eq LateDays.comparison
(LateDays.grade_comparison
(LateDays.Grade LateDays.F LateDays.Minus) g)
LateDays.Lt),
@eq LateDays.comparison
(LateDays.grade_comparison (LateDays.lower_grade g) g) LateDays.Lt)).
idtac "Assumptions:".
Abort.
Print Assumptions LateDays.lower_grade_lowers.
Goal True.
idtac " ".
idtac "------------------- no_penalty_for_mostly_on_time --------------------".
idtac " ".
idtac "#> LateDays.no_penalty_for_mostly_on_time".
idtac "Possible points: 2".
check_type @LateDays.no_penalty_for_mostly_on_time (
(forall (late_days : nat) (g : LateDays.grade)
(_ : @eq bool (ltb late_days 9) true),
@eq LateDays.grade (LateDays.apply_late_policy late_days g) g)).
idtac "Assumptions:".
Abort.
Print Assumptions LateDays.no_penalty_for_mostly_on_time.
Goal True.
idtac " ".
idtac "------------------- graded_lowered_once --------------------".
idtac " ".
idtac "#> LateDays.grade_lowered_once".
idtac "Possible points: 2".
check_type @LateDays.grade_lowered_once (
(forall (late_days : nat) (g : LateDays.grade)
(_ : @eq bool (ltb late_days 9) false)
(_ : @eq bool (ltb late_days 17) true),
@eq LateDays.grade (LateDays.apply_late_policy late_days g)
(LateDays.lower_grade g))).
idtac "Assumptions:".
Abort.
Print Assumptions LateDays.grade_lowered_once.
Goal True.
idtac " ".
idtac "------------------- binary --------------------".
idtac " ".
idtac "#> test_bin_incr1".
idtac "Possible points: 0.5".
check_type @test_bin_incr1 ((@eq bin (incr (B1 Z)) (B0 (B1 Z)))).
idtac "Assumptions:".
Abort.
Print Assumptions test_bin_incr1.
Goal True.
idtac " ".
idtac "#> test_bin_incr2".
idtac "Possible points: 0.5".
check_type @test_bin_incr2 ((@eq bin (incr (B0 (B1 Z))) (B1 (B1 Z)))).
idtac "Assumptions:".
Abort.
Print Assumptions test_bin_incr2.
Goal True.
idtac " ".
idtac "#> test_bin_incr3".
idtac "Possible points: 0.5".
check_type @test_bin_incr3 ((@eq bin (incr (B1 (B1 Z))) (B0 (B0 (B1 Z))))).
idtac "Assumptions:".
Abort.
Print Assumptions test_bin_incr3.
Goal True.
idtac " ".
idtac "#> test_bin_incr4".
idtac "Possible points: 0.5".
check_type @test_bin_incr4 ((@eq nat (bin_to_nat (B0 (B1 Z))) 2)).
idtac "Assumptions:".
Abort.
Print Assumptions test_bin_incr4.
Goal True.
idtac " ".
idtac "#> test_bin_incr5".
idtac "Possible points: 0.5".
check_type @test_bin_incr5 (
(@eq nat (bin_to_nat (incr (B1 Z))) (Nat.add 1 (bin_to_nat (B1 Z))))).
idtac "Assumptions:".
Abort.
Print Assumptions test_bin_incr5.
Goal True.
idtac " ".
idtac "#> test_bin_incr6".
idtac "Possible points: 0.5".
check_type @test_bin_incr6 (
(@eq nat (bin_to_nat (incr (incr (B1 Z)))) (Nat.add 2 (bin_to_nat (B1 Z))))).
idtac "Assumptions:".
Abort.
Print Assumptions test_bin_incr6.
Goal True.
idtac " ".
idtac " ".
idtac "Max points - standard: 28".
idtac "Max points - advanced: 28".
idtac "".
idtac "Allowed Axioms:".
idtac "functional_extensionality".
idtac "FunctionalExtensionality.functional_extensionality_dep".
idtac "plus_le".
idtac "le_trans".
idtac "le_plus_l".
idtac "add_le_cases".
idtac "Sn_le_Sm__n_le_m".
idtac "O_le_n".
idtac "".
idtac "".
idtac "********** Summary **********".
idtac "".
idtac "Below is a summary of the automatically graded exercises that are incomplete.".
idtac "".
idtac "The output for each exercise can be any of the following:".
idtac " - 'Closed under the global context', if it is complete".
idtac " - 'MANUAL', if it is manually graded".
idtac " - A list of pending axioms, containing unproven assumptions. In this case".
idtac " the exercise is considered complete, if the axioms are all allowed.".
idtac "".
idtac "********** Standard **********".
idtac "---------- test_nandb4 ---------".
Print Assumptions test_nandb4.
idtac "---------- test_andb34 ---------".
Print Assumptions test_andb34.
idtac "---------- test_factorial2 ---------".
Print Assumptions test_factorial2.
idtac "---------- test_ltb3 ---------".
Print Assumptions test_ltb3.
idtac "---------- plus_id_exercise ---------".
Print Assumptions plus_id_exercise.
idtac "---------- mult_n_1 ---------".
Print Assumptions mult_n_1.
idtac "---------- andb_true_elim2 ---------".
Print Assumptions andb_true_elim2.
idtac "---------- zero_nbeq_plus_1 ---------".
Print Assumptions zero_nbeq_plus_1.
idtac "---------- identity_fn_applied_twice ---------".
Print Assumptions identity_fn_applied_twice.
idtac "---------- negation_fn_applied_twice ---------".
idtac "MANUAL".
idtac "---------- LateDays.letter_comparison_Eq ---------".
Print Assumptions LateDays.letter_comparison_Eq.
idtac "---------- LateDays.test_grade_comparison1 ---------".
Print Assumptions LateDays.test_grade_comparison1.
idtac "---------- LateDays.test_grade_comparison2 ---------".
Print Assumptions LateDays.test_grade_comparison2.
idtac "---------- LateDays.test_grade_comparison3 ---------".
Print Assumptions LateDays.test_grade_comparison3.
idtac "---------- LateDays.test_grade_comparison4 ---------".
Print Assumptions LateDays.test_grade_comparison4.
idtac "---------- LateDays.lower_letter_lowers ---------".
Print Assumptions LateDays.lower_letter_lowers.
idtac "---------- LateDays.lower_grade_A_Plus ---------".
Print Assumptions LateDays.lower_grade_A_Plus.
idtac "---------- LateDays.lower_grade_A_Natural ---------".
Print Assumptions LateDays.lower_grade_A_Natural.
idtac "---------- LateDays.lower_grade_A_Minus ---------".
Print Assumptions LateDays.lower_grade_A_Minus.
idtac "---------- LateDays.lower_grade_B_Plus ---------".
Print Assumptions LateDays.lower_grade_B_Plus.
idtac "---------- LateDays.lower_grade_F_Natural ---------".
Print Assumptions LateDays.lower_grade_F_Natural.
idtac "---------- LateDays.lower_grade_twice ---------".
Print Assumptions LateDays.lower_grade_twice.
idtac "---------- LateDays.lower_grade_thrice ---------".
Print Assumptions LateDays.lower_grade_thrice.
idtac "---------- LateDays.lower_grade_F_Minus ---------".
Print Assumptions LateDays.lower_grade_F_Minus.
idtac "---------- LateDays.lower_grade_lowers ---------".
Print Assumptions LateDays.lower_grade_lowers.
idtac "---------- LateDays.no_penalty_for_mostly_on_time ---------".
Print Assumptions LateDays.no_penalty_for_mostly_on_time.
idtac "---------- LateDays.grade_lowered_once ---------".
Print Assumptions LateDays.grade_lowered_once.
idtac "---------- test_bin_incr1 ---------".
Print Assumptions test_bin_incr1.
idtac "---------- test_bin_incr2 ---------".
Print Assumptions test_bin_incr2.
idtac "---------- test_bin_incr3 ---------".
Print Assumptions test_bin_incr3.
idtac "---------- test_bin_incr4 ---------".
Print Assumptions test_bin_incr4.
idtac "---------- test_bin_incr5 ---------".
Print Assumptions test_bin_incr5.
idtac "---------- test_bin_incr6 ---------".
Print Assumptions test_bin_incr6.
idtac "".
idtac "********** Advanced **********".
Abort.
(* 2026-01-07 13:18 *)
(* 2026-01-07 13:18 *)

35
Bib.v Normal file
View File

@@ -0,0 +1,35 @@
(** * Bib: Bibliography *)
(* ################################################################# *)
(** * Resources cited in this volume *)
(**
[Bertot 2004] Interactive Theorem Proving and Program Development:
Coq'Art: The Calculus of Inductive Constructions, by Yves Bertot and
Pierre Casteran. Springer-Verlag, 2004.
{https://tinyurl.com/z3o7nqu}
[Chlipala 2013] Certified Programming with Dependent Types, by
Adam Chlipala. MIT Press. 2013. {https://tinyurl.com/zqdnyg2}
[Lipovaca 2011] Learn You a Haskell for Great Good! A Beginner's
Guide, by Miran Lipovaca, No Starch Press, April 2011.
{http://learnyouahaskell.com}
[O'Sullivan 2008] Bryan O'Sullivan, John Goerzen, and Don Stewart:
Real world Haskell - code you can believe in. O'Reilly
2008. {http://book.realworldhaskell.org}
[Pugh 1991] Pugh, William. "The Omega test: a fast and practical
integer programming algorithm for dependence analysis." Proceedings
of the 1991 ACM/IEEE conference on Supercomputing. ACM, 1991.
{https://dl.acm.org/citation.cfm?id=125848}
[Wadler 2015] Philip Wadler. "Propositions as types."
Communications of the ACM 58, no. 12 (2015): 75-84.
{https://dl.acm.org/citation.cfm?id=2699407}
*)
(* 2026-01-06 11:47 *)

68
BibTest.v Normal file
View File

@@ -0,0 +1,68 @@
Set Warnings "-notation-overridden,-parsing".
From Stdlib Require Export String.
From LF Require Import Bib.
Parameter MISSING: Type.
Module Check.
Ltac check_type A B :=
match type of A with
| context[MISSING] => idtac "Missing:" A
| ?T => first [unify T B; idtac "Type: ok" | idtac "Type: wrong - should be (" B ")"]
end.
Ltac print_manual_grade A :=
match eval compute in A with
| Some (_ ?S ?C) =>
idtac "Score:" S;
match eval compute in C with
| ""%string => idtac "Comment: None"
| _ => idtac "Comment:" C
end
| None =>
idtac "Score: Ungraded";
idtac "Comment: None"
end.
End Check.
From LF Require Import Bib.
Import Check.
Goal True.
idtac " ".
idtac "Max points - standard: 0".
idtac "Max points - advanced: 0".
idtac "".
idtac "Allowed Axioms:".
idtac "functional_extensionality".
idtac "FunctionalExtensionality.functional_extensionality_dep".
idtac "plus_le".
idtac "le_trans".
idtac "le_plus_l".
idtac "add_le_cases".
idtac "Sn_le_Sm__n_le_m".
idtac "O_le_n".
idtac "".
idtac "".
idtac "********** Summary **********".
idtac "".
idtac "Below is a summary of the automatically graded exercises that are incomplete.".
idtac "".
idtac "The output for each exercise can be any of the following:".
idtac " - 'Closed under the global context', if it is complete".
idtac " - 'MANUAL', if it is manually graded".
idtac " - A list of pending axioms, containing unproven assumptions. In this case".
idtac " the exercise is considered complete, if the axioms are all allowed.".
idtac "".
idtac "********** Standard **********".
idtac "".
idtac "********** Advanced **********".
Abort.
(* 2026-01-06 11:47 *)
(* 2026-01-06 11:47 *)

134
Extraction.v Normal file
View File

@@ -0,0 +1,134 @@
(** * Extraction: Extracting OCaml from Rocq *)
(* ################################################################# *)
(** * Basic Extraction *)
(** In its simplest form, extracting an efficient program from one
written in Rocq is completely straightforward.
First we say what language we want to extract into. Options are
OCaml (the most mature), Haskell (mostly works), and Scheme (a bit
out of date). *)
From Stdlib Require Extraction.
Set Extraction Output Directory ".".
Extraction Language OCaml.
(** Now we load up the Rocq environment with some definitions, either
directly or by importing them from other modules. *)
Set Warnings "-notation-overridden,-notation-incompatible-prefix".
From Stdlib Require Import Arith.
From Stdlib Require Import Init.Nat.
From Stdlib Require Import EqNat.
From LF Require Import ImpCEvalFun.
(** Finally, we tell Rocq the name of a definition to extract and the
name of a file to put the extracted code into. *)
Extraction "imp1.ml" ceval_step.
(** When Rocq processes this command, it generates a file [imp1.ml]
containing an extracted version of [ceval_step], together with
everything that it recursively depends on. Compile the present
[.v] file and have a look at [imp1.ml] now. *)
(* ################################################################# *)
(** * Controlling Extraction of Specific Types *)
(** We can tell Rocq to extract certain [Inductive] definitions to
specific OCaml types. For each one, we must say
- how the Rocq type itself should be represented in OCaml, and
- how each constructor should be translated. *)
Extract Inductive bool => "bool" [ "true" "false" ].
(** Also, for non-enumeration types (where the constructors take
arguments), we give an OCaml expression that can be used as a
"recursor" over elements of the type. (Think Church numerals.) *)
Extract Inductive nat => "int"
[ "0" "(fun x -> x + 1)" ]
"(fun zero succ n ->
if n=0 then zero () else succ (n-1))".
(** We can also extract defined constants to specific OCaml terms or
operators. *)
Extract Constant plus => "( + )".
Extract Constant mult => "( * )".
Extract Constant eqb => "( = )".
(** Important: It is entirely _your responsibility_ to make sure that
the translations you're proving make sense. For example, it might
be tempting to include this one
Extract Constant minus => "( - )".
but doing so could lead to serious confusion! (Why?)
*)
Extraction "imp2.ml" ceval_step.
(** Have a look at the file [imp2.ml]. Notice how the fundamental
definitions have changed from [imp1.ml]. *)
(* ################################################################# *)
(** * A Complete Example *)
(** To use our extracted evaluator to run Imp programs, all we need to
add is a tiny driver program that calls the evaluator and prints
out the result.
For simplicity, we'll print results by dumping out the first four
memory locations in the final state.
Also, to make it easier to type in examples, let's extract a
parser from the [ImpParser] Rocq module. To do this, we first need
to set up the right correspondence between Rocq strings and lists
of OCaml characters. *)
From Stdlib Require Import ExtrOcamlBasic.
From Stdlib Require Import ExtrOcamlString.
(** We also need one more variant of booleans. *)
Extract Inductive sumbool => "bool" ["true" "false"].
(** The extraction is the same as always. *)
From LF Require Import Imp.
From LF Require Import ImpParser.
From LF Require Import Maps.
Extraction "imp.ml" empty_st ceval_step parse.
(** Now let's run our generated Imp evaluator. First, have a look at
[impdriver.ml]. (This was written by hand, not extracted.)
Next, compile the driver together with the extracted code and
execute it, as follows.
ocamlc -w -20 -w -26 -o impdriver imp.mli imp.ml impdriver.ml
./impdriver
(The [-w] flags to [ocamlc] are just there to suppress a few
spurious warnings.) *)
(* ################################################################# *)
(** * Discussion *)
(** Since we've proved that the [ceval_step] function behaves the same
as the [ceval] relation in an appropriate sense, the extracted
program can be viewed as a _certified_ Imp interpreter. Of
course, the parser we're using is not certified, since we didn't
prove anything about it! *)
(* ################################################################# *)
(** * Going Further *)
(** Further details about extraction can be found in the Extract
chapter in _Verified Functional Algorithms_ (_Software
Foundations_ volume 3). *)
(* 2026-01-07 13:18 *)

68
ExtractionTest.v Normal file
View File

@@ -0,0 +1,68 @@
Set Warnings "-notation-overridden,-parsing".
From Stdlib Require Export String.
From LF Require Import Extraction.
Parameter MISSING: Type.
Module Check.
Ltac check_type A B :=
match type of A with
| context[MISSING] => idtac "Missing:" A
| ?T => first [unify T B; idtac "Type: ok" | idtac "Type: wrong - should be (" B ")"]
end.
Ltac print_manual_grade A :=
match eval compute in A with
| Some (_ ?S ?C) =>
idtac "Score:" S;
match eval compute in C with
| ""%string => idtac "Comment: None"
| _ => idtac "Comment:" C
end
| None =>
idtac "Score: Ungraded";
idtac "Comment: None"
end.
End Check.
From LF Require Import Extraction.
Import Check.
Goal True.
idtac " ".
idtac "Max points - standard: 0".
idtac "Max points - advanced: 0".
idtac "".
idtac "Allowed Axioms:".
idtac "functional_extensionality".
idtac "FunctionalExtensionality.functional_extensionality_dep".
idtac "plus_le".
idtac "le_trans".
idtac "le_plus_l".
idtac "add_le_cases".
idtac "Sn_le_Sm__n_le_m".
idtac "O_le_n".
idtac "".
idtac "".
idtac "********** Summary **********".
idtac "".
idtac "Below is a summary of the automatically graded exercises that are incomplete.".
idtac "".
idtac "The output for each exercise can be any of the following:".
idtac " - 'Closed under the global context', if it is complete".
idtac " - 'MANUAL', if it is manually graded".
idtac " - A list of pending axioms, containing unproven assumptions. In this case".
idtac " the exercise is considered complete, if the axioms are all allowed.".
idtac "".
idtac "********** Standard **********".
idtac "".
idtac "********** Advanced **********".
Abort.
(* 2026-01-07 13:18 *)
(* 2026-01-07 13:18 *)

2092
Imp.v Normal file

File diff suppressed because it is too large Load Diff

398
ImpCEvalFun.v Normal file
View File

@@ -0,0 +1,398 @@
(** * ImpCEvalFun: An Evaluation Function for Imp *)
(** We saw in the [Imp] chapter how a naive approach to defining a
function representing evaluation for Imp runs into difficulties.
There, we adopted the solution of changing from a functional to a
relational definition of evaluation. In this optional chapter, we
consider strategies for getting the functional approach to
work. *)
(* ################################################################# *)
(** * A Broken Evaluator *)
Set Warnings "-notation-overridden,-notation-incompatible-prefix".
From Stdlib Require Import Lia.
From Stdlib Require Import Arith.
From Stdlib Require Import PeanoNat.
Import Nat.
From Stdlib Require Import EqNat.
From LF Require Import Imp Maps.
Local Open Scope com_scope.
(** Here was our first try at an evaluation function for commands,
omitting [while]. *)
Fixpoint ceval_step1 (st : state) (c : com) : state :=
match c with
| <{ skip }> =>
st
| <{ l := a1 }> =>
(l !-> aeval st a1 ; st)
| <{ c1 ; c2 }> =>
let st' := ceval_step1 st c1 in
ceval_step1 st' c2
| <{ if b then c1 else c2 end }> =>
if (beval st b)
then ceval_step1 st c1
else ceval_step1 st c2
| <{ while b1 do c1 end }> =>
st (* bogus *)
end.
(** As we remarked in chapter [Imp], in a traditional functional
programming language like ML or Haskell we could write the while
case as follows:
| while b1 do c1 end =>
if (beval st b1) then
ceval_step1 st <{ c1; while b1 do c1 end }>
else st
Rocq doesn't accept such a definition ([Error: Cannot guess
decreasing argument of fix]) because the function we want to
define is not guaranteed to terminate. Indeed, the changed
[ceval_step1] function applied to the [loop] program from [Imp.v]
would never terminate. Since Rocq is not just a functional
programming language, but also a consistent logic, any potentially
non-terminating function needs to be rejected. Here is an
invalid(!) Rocq program showing what would go wrong if Rocq allowed
non-terminating recursive functions:
Fixpoint loop_false (n : nat) : False := loop_false n.
That is, propositions like [False] would become
provable (e.g., [loop_false 0] would be a proof of [False]), which
would be a disaster for Rocq's logical consistency.
Thus, because it doesn't terminate on all inputs, the full version
of [ceval_step1] cannot be written in Rocq -- at least not without
one additional trick... *)
(* ################################################################# *)
(** * A Step-Indexed Evaluator *)
(** The trick we need is to pass an _additional_ parameter to the
evaluation function that tells it how long to run. Informally, we
start the evaluator with a certain amount of "gas" in its tank,
and we allow it to run until either it terminates in the usual way
_or_ it runs out of gas, at which point we simply stop evaluating
and say that the final result is the empty memory. (We could also
say that the result is the current state at the point where the
evaluator runs out of gas -- it doesn't really matter because the
result is going to be wrong in either case!) *)
Fixpoint ceval_step2 (st : state) (c : com) (i : nat) : state :=
match i with
| O => empty_st
| S i' =>
match c with
| <{ skip }> =>
st
| <{ l := a1 }> =>
(l !-> aeval st a1 ; st)
| <{ c1 ; c2 }> =>
let st' := ceval_step2 st c1 i' in
ceval_step2 st' c2 i'
| <{ if b then c1 else c2 end }> =>
if (beval st b)
then ceval_step2 st c1 i'
else ceval_step2 st c2 i'
| <{ while b1 do c1 end }> =>
if (beval st b1)
then let st' := ceval_step2 st c1 i' in
ceval_step2 st' c i'
else st
end
end.
(** _Note_: It is tempting to think that the index [i] here is
counting the "number of steps of evaluation." But if you look
closely you'll see that this is not the case: for example, in the
rule for sequencing, the same [i] is passed to both recursive
calls. Understanding the exact way that [i] is treated will be
important in the proof of [ceval__ceval_step], which is given as
an exercise below.
One thing that is not so nice about this evaluator is that we
can't tell, from its result, whether it stopped because the
program terminated normally or because it ran out of gas. Our
next version returns an [option state] instead of just a [state],
so that we can distinguish between normal and abnormal
termination. *)
Fixpoint ceval_step3 (st : state) (c : com) (i : nat)
: option state :=
match i with
| O => None
| S i' =>
match c with
| <{ skip }> =>
Some st
| <{ l := a1 }> =>
Some (l !-> aeval st a1 ; st)
| <{ c1 ; c2 }> =>
match (ceval_step3 st c1 i') with
| Some st' => ceval_step3 st' c2 i'
| None => None
end
| <{ if b then c1 else c2 end }> =>
if (beval st b)
then ceval_step3 st c1 i'
else ceval_step3 st c2 i'
| <{ while b1 do c1 end }> =>
if (beval st b1)
then match (ceval_step3 st c1 i') with
| Some st' => ceval_step3 st' c i'
| None => None
end
else Some st
end
end.
(** We can improve the readability of this version by introducing a
bit of auxiliary notation to hide the plumbing involved in
repeatedly matching against optional states. *)
Notation "'LETOPT' x <== e1 'IN' e2"
:= (match e1 with
| Some x => e2
| None => None
end)
(right associativity, at level 60).
Fixpoint ceval_step (st : state) (c : com) (i : nat)
: option state :=
match i with
| O => None
| S i' =>
match c with
| <{ skip }> =>
Some st
| <{ l := a1 }> =>
Some (l !-> aeval st a1 ; st)
| <{ c1 ; c2 }> =>
LETOPT st' <== ceval_step st c1 i' IN
ceval_step st' c2 i'
| <{ if b then c1 else c2 end }> =>
if (beval st b)
then ceval_step st c1 i'
else ceval_step st c2 i'
| <{ while b1 do c1 end }> =>
if (beval st b1)
then LETOPT st' <== ceval_step st c1 i' IN
ceval_step st' c i'
else Some st
end
end.
Definition test_ceval (st:state) (c:com) :=
match ceval_step st c 500 with
| None => None
| Some st => Some (st X, st Y, st Z)
end.
Example example_test_ceval :
test_ceval empty_st
<{ X := 2;
if (X <= 1)
then Y := 3
else Z := 4
end }>
= Some (2, 0, 4).
Proof. reflexivity. Qed.
(** **** Exercise: 1 star, standard, optional (pup_to_n)
Write an Imp program that sums the numbers from [1] to
[X] (inclusive -- i.e., [1 + 2 + ... + X]) in the variable [Y]. Make
sure your solution satisfies the test that follows. *)
Definition pup_to_n : com
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
Example pup_to_n_1 :
test_ceval (X !-> 5) pup_to_n
= Some (0, 15, 0).
(* FILL IN HERE *) Admitted.
(*
Proof. reflexivity. Qed.
*)
(** [] *)
(** **** Exercise: 2 stars, standard, optional (peven)
Write an [Imp] program that sets [Z] to [0] if [X] is even and
sets [Z] to [1] otherwise. Use [test_ceval] to test your
program. *)
(* FILL IN HERE
[] *)
(* ################################################################# *)
(** * Relational vs. Step-Indexed Evaluation *)
(** As for arithmetic and boolean expressions, we'd hope that
the two alternative definitions of evaluation would actually
amount to the same thing in the end. This section shows that this
is the case. *)
Theorem ceval_step__ceval: forall c st st',
(exists i, ceval_step st c i = Some st') ->
st =[ c ]=> st'.
Proof.
intros c st st' H.
inversion H as [i E].
clear H.
generalize dependent st'.
generalize dependent st.
generalize dependent c.
induction i as [| i' ].
- (* i = 0 -- contradictory *)
intros c st st' H. discriminate H.
- (* i = S i' *)
intros c st st' H.
destruct c;
simpl in H; inversion H; subst; clear H.
+ (* skip *) apply E_Skip.
+ (* := *) apply E_Asgn. reflexivity.
+ (* ; *)
destruct (ceval_step st c1 i') eqn:Heqr1.
* (* Evaluation of r1 terminates normally *)
apply E_Seq with s.
apply IHi'. rewrite Heqr1. reflexivity.
apply IHi'. assumption.
* (* Otherwise -- contradiction *)
discriminate H1.
+ (* if *)
destruct (beval st b) eqn:Heqr.
* (* r = true *)
apply E_IfTrue. rewrite Heqr. reflexivity.
apply IHi'. assumption.
* (* r = false *)
apply E_IfFalse. rewrite Heqr. reflexivity.
apply IHi'. assumption.
+ (* while *) destruct (beval st b) eqn :Heqr.
* (* r = true *)
destruct (ceval_step st c i') eqn:Heqr1.
{ (* r1 = Some s *)
apply E_WhileTrue with s. rewrite Heqr.
reflexivity.
apply IHi'. rewrite Heqr1. reflexivity.
apply IHi'. assumption. }
{ (* r1 = None *) discriminate H1. }
* (* r = false *)
injection H1 as H2. rewrite <- H2.
apply E_WhileFalse. apply Heqr. Qed.
(** **** Exercise: 4 stars, advanced (ceval_step__ceval_inf)
Write an informal proof of [ceval_step__ceval], following the
usual template. (The template for case analysis on an inductively
defined value should look the same as for induction, except that
there is no induction hypothesis.) Make your proof communicate
the main ideas to a human reader; do not simply transcribe the
steps of the formal proof. *)
(* FILL IN HERE *)
(* Do not modify the following line: *)
Definition manual_grade_for_ceval_step__ceval_inf : option (nat*string) := None.
(** [] *)
Theorem ceval_step_more: forall i1 i2 st st' c,
i1 <= i2 ->
ceval_step st c i1 = Some st' ->
ceval_step st c i2 = Some st'.
Proof.
induction i1 as [|i1']; intros i2 st st' c Hle Hceval.
- (* i1 = 0 *)
simpl in Hceval. discriminate Hceval.
- (* i1 = S i1' *)
destruct i2 as [|i2']. inversion Hle.
assert (Hle': i1' <= i2') by lia.
destruct c.
+ (* skip *)
simpl in Hceval. inversion Hceval.
reflexivity.
+ (* := *)
simpl in Hceval. inversion Hceval.
reflexivity.
+ (* ; *)
simpl in Hceval. simpl.
destruct (ceval_step st c1 i1') eqn:Heqst1'o.
* (* st1'o = Some *)
apply (IHi1' i2') in Heqst1'o; try assumption.
rewrite Heqst1'o. simpl. simpl in Hceval.
apply (IHi1' i2') in Hceval; try assumption.
* (* st1'o = None *)
discriminate Hceval.
+ (* if *)
simpl in Hceval. simpl.
destruct (beval st b); apply (IHi1' i2') in Hceval;
assumption.
+ (* while *)
simpl in Hceval. simpl.
destruct (beval st b); try assumption.
destruct (ceval_step st c i1') eqn: Heqst1'o.
* (* st1'o = Some *)
apply (IHi1' i2') in Heqst1'o; try assumption.
rewrite -> Heqst1'o. simpl. simpl in Hceval.
apply (IHi1' i2') in Hceval; try assumption.
* (* i1'o = None *)
simpl in Hceval. discriminate Hceval. Qed.
(** **** Exercise: 3 stars, standard, especially useful (ceval__ceval_step)
Finish the following proof. You'll need [ceval_step_more] in a
few places, as well as some basic facts about [<=] and [plus]. *)
Theorem ceval__ceval_step: forall c st st',
st =[ c ]=> st' ->
exists i, ceval_step st c i = Some st'.
Proof.
intros c st st' Hce.
induction Hce.
(* FILL IN HERE *) Admitted.
(** [] *)
Theorem ceval_and_ceval_step_coincide: forall c st st',
st =[ c ]=> st'
<-> exists i, ceval_step st c i = Some st'.
Proof.
intros c st st'.
split. apply ceval__ceval_step. apply ceval_step__ceval.
Qed.
(* ################################################################# *)
(** * Determinism of Evaluation Again *)
(** Using the fact that the relational and step-indexed definition of
evaluation are the same, we can give a slicker proof that the
evaluation _relation_ is deterministic. *)
Theorem ceval_deterministic' : forall c st st1 st2,
st =[ c ]=> st1 ->
st =[ c ]=> st2 ->
st1 = st2.
Proof.
intros c st st1 st2 He1 He2.
apply ceval__ceval_step in He1.
apply ceval__ceval_step in He2.
inversion He1 as [i1 E1].
inversion He2 as [i2 E2].
apply ceval_step_more with (i2 := i1 + i2) in E1.
apply ceval_step_more with (i2 := i1 + i2) in E2.
rewrite E1 in E2. inversion E2. reflexivity.
lia. lia. Qed.
(* 2026-01-07 13:18 *)

97
ImpCEvalFunTest.v Normal file
View File

@@ -0,0 +1,97 @@
Set Warnings "-notation-overridden,-parsing".
From Stdlib Require Export String.
From LF Require Import ImpCEvalFun.
Parameter MISSING: Type.
Module Check.
Ltac check_type A B :=
match type of A with
| context[MISSING] => idtac "Missing:" A
| ?T => first [unify T B; idtac "Type: ok" | idtac "Type: wrong - should be (" B ")"]
end.
Ltac print_manual_grade A :=
match eval compute in A with
| Some (_ ?S ?C) =>
idtac "Score:" S;
match eval compute in C with
| ""%string => idtac "Comment: None"
| _ => idtac "Comment:" C
end
| None =>
idtac "Score: Ungraded";
idtac "Comment: None"
end.
End Check.
From LF Require Import ImpCEvalFun.
Import Check.
Goal True.
idtac "------------------- ceval_step__ceval_inf --------------------".
idtac " ".
idtac "#> Manually graded: ceval_step__ceval_inf".
idtac "Advanced".
idtac "Possible points: 6".
print_manual_grade manual_grade_for_ceval_step__ceval_inf.
idtac " ".
idtac "------------------- ceval__ceval_step --------------------".
idtac " ".
idtac "#> ceval__ceval_step".
idtac "Possible points: 3".
check_type @ceval__ceval_step (
(forall (c : Imp.com) (st st' : Imp.state) (_ : Imp.ceval c st st'),
@ex nat
(fun i : nat =>
@eq (option Imp.state) (ceval_step st c i) (@Some Imp.state st')))).
idtac "Assumptions:".
Abort.
Print Assumptions ceval__ceval_step.
Goal True.
idtac " ".
idtac " ".
idtac "Max points - standard: 3".
idtac "Max points - advanced: 9".
idtac "".
idtac "Allowed Axioms:".
idtac "functional_extensionality".
idtac "FunctionalExtensionality.functional_extensionality_dep".
idtac "plus_le".
idtac "le_trans".
idtac "le_plus_l".
idtac "add_le_cases".
idtac "Sn_le_Sm__n_le_m".
idtac "O_le_n".
idtac "".
idtac "".
idtac "********** Summary **********".
idtac "".
idtac "Below is a summary of the automatically graded exercises that are incomplete.".
idtac "".
idtac "The output for each exercise can be any of the following:".
idtac " - 'Closed under the global context', if it is complete".
idtac " - 'MANUAL', if it is manually graded".
idtac " - A list of pending axioms, containing unproven assumptions. In this case".
idtac " the exercise is considered complete, if the axioms are all allowed.".
idtac "".
idtac "********** Standard **********".
idtac "---------- ceval__ceval_step ---------".
Print Assumptions ceval__ceval_step.
idtac "".
idtac "********** Advanced **********".
idtac "---------- ceval_step__ceval_inf ---------".
idtac "MANUAL".
Abort.
(* 2026-01-07 13:18 *)
(* 2026-01-07 13:18 *)

466
ImpParser.v Normal file
View File

@@ -0,0 +1,466 @@
(** * ImpParser: Lexing and Parsing in Rocq *)
(** The development of the Imp language in [Imp.v] completely ignores
issues of concrete syntax -- how an ascii string that a programmer
might write gets translated into abstract syntax trees defined by
the datatypes [aexp], [bexp], and [com]. In this chapter, we
illustrate how the rest of the story can be filled in by building
a simple lexical analyzer and parser using Rocq's functional
programming facilities. *)
(** It is not important to understand all the details here (and
accordingly, the explanations are fairly terse and there are no
exercises). The main point is simply to demonstrate that it can
be done. You are invited to look through the code -- most of it
is not very complicated, though the parser relies on some
"monadic" programming idioms that may require a little work to
make out -- but most readers will probably want to just skim down
to the Examples section at the very end to get the punchline. *)
Set Warnings "-notation-overridden,-notation-incompatible-prefix".
From Stdlib Require Import Strings.String.
From Stdlib Require Import Strings.Ascii.
From Stdlib Require Import Arith.
From Stdlib Require Import Init.Nat.
From Stdlib Require Import EqNat.
From Stdlib Require Import List. Import ListNotations.
From LF Require Import Maps Imp.
Local Open Scope com_scope.
(* ################################################################# *)
(** * Internals *)
(* ================================================================= *)
(** ** Lexical Analysis *)
Definition isWhite (c : ascii) : bool :=
let n := nat_of_ascii c in
orb (orb (n =? 32) (* space *)
(n =? 9)) (* tab *)
(orb (n =? 10) (* linefeed *)
(n =? 13)). (* Carriage return. *)
Notation "x '<=?' y" := (x <=? y)
(at level 70, no associativity) : nat_scope.
Definition isLowerAlpha (c : ascii) : bool :=
let n := nat_of_ascii c in
andb (97 <=? n) (n <=? 122).
Definition isAlpha (c : ascii) : bool :=
let n := nat_of_ascii c in
orb (andb (65 <=? n) (n <=? 90))
(andb (97 <=? n) (n <=? 122)).
Definition isDigit (c : ascii) : bool :=
let n := nat_of_ascii c in
andb (48 <=? n) (n <=? 57).
Inductive chartype := white | alpha | digit | other.
Definition classifyChar (c : ascii) : chartype :=
if isWhite c then
white
else if isAlpha c then
alpha
else if isDigit c then
digit
else
other.
Fixpoint list_of_string (s : string) : list ascii :=
match s with
| EmptyString => []
| String c s => c :: (list_of_string s)
end.
Definition string_of_list (xs : list ascii) : string :=
fold_right String EmptyString xs.
Definition token := string.
Fixpoint tokenize_helper (cls : chartype) (acc xs : list ascii)
: list (list ascii) :=
let tk := match acc with [] => [] | _::_ => [rev acc] end in
match xs with
| [] => tk
| (x::xs') =>
match cls, classifyChar x, x with
| _, _, "(" =>
tk ++ ["("]::(tokenize_helper other [] xs')
| _, _, ")" =>
tk ++ [")"]::(tokenize_helper other [] xs')
| _, white, _ =>
tk ++ (tokenize_helper white [] xs')
| alpha,alpha,x =>
tokenize_helper alpha (x::acc) xs'
| digit,digit,x =>
tokenize_helper digit (x::acc) xs'
| other,other,x =>
tokenize_helper other (x::acc) xs'
| _,tp,x =>
tk ++ (tokenize_helper tp [x] xs')
end
end %char.
Definition tokenize (s : string) : list string :=
map string_of_list (tokenize_helper white [] (list_of_string s)).
Example tokenize_ex1 :
tokenize "abc12=3 223*(3+(a+c))" %string
= ["abc"; "12"; "="; "3"; "223";
"*"; "("; "3"; "+"; "(";
"a"; "+"; "c"; ")"; ")"]%string.
Proof. reflexivity. Qed.
(* ================================================================= *)
(** ** Parsing *)
(* ----------------------------------------------------------------- *)
(** *** Options With Errors *)
(** An [option] type with error messages: *)
Inductive optionE (X:Type) : Type :=
| SomeE (x : X)
| NoneE (s : string).
Arguments SomeE {X}.
Arguments NoneE {X}.
(** Some syntactic sugar to make writing nested match-expressions on
optionE more convenient. *)
Notation "' p <- e1 ;; e2"
:= (match e1 with
| SomeE p => e2
| NoneE err => NoneE err
end)
(right associativity, p pattern, at level 60, e1 at next level).
Notation "'TRY' e1 'OR' e2"
:= (
let result := e1 in
match result with
| SomeE _ => result
| NoneE _ => e2
end)
(right associativity,
at level 60, e1 at next level, e2 at next level).
(* ----------------------------------------------------------------- *)
(** *** Generic Combinators for Building Parsers *)
Open Scope string_scope.
Definition parser (T : Type) :=
list token -> optionE (T * list token).
Fixpoint many_helper {T} (p : parser T) acc steps xs :=
match steps, p xs with
| 0, _ =>
NoneE "Too many recursive calls"
| _, NoneE _ =>
SomeE ((rev acc), xs)
| S steps', SomeE (t, xs') =>
many_helper p (t :: acc) steps' xs'
end.
(** A (step-indexed) parser that expects zero or more [p]s: *)
Definition many {T} (p : parser T) (steps : nat) : parser (list T) :=
many_helper p [] steps.
(** A parser that expects a given token, followed by [p]: *)
Definition firstExpect {T} (t : token) (p : parser T)
: parser T :=
fun xs => match xs with
| x::xs' =>
if string_dec x t
then p xs'
else NoneE ("expected '" ++ t ++ "'.")
| [] =>
NoneE ("expected '" ++ t ++ "'.")
end.
(** A parser that expects a particular token: *)
Definition expect (t : token) : parser unit :=
firstExpect t (fun xs => SomeE (tt, xs)).
(* ----------------------------------------------------------------- *)
(** *** A Recursive-Descent Parser for Imp *)
(** Identifiers: *)
Definition parseIdentifier (xs : list token)
: optionE (string * list token) :=
match xs with
| [] => NoneE "Expected identifier"
| x::xs' =>
if forallb isLowerAlpha (list_of_string x) then
SomeE (x, xs')
else
NoneE ("Illegal identifier:'" ++ x ++ "'")
end.
(** Numbers: *)
Definition parseNumber (xs : list token)
: optionE (nat * list token) :=
match xs with
| [] => NoneE "Expected number"
| x::xs' =>
if forallb isDigit (list_of_string x) then
SomeE (fold_left
(fun n d =>
10 * n + (nat_of_ascii d -
nat_of_ascii "0"%char))
(list_of_string x)
0,
xs')
else
NoneE "Expected number"
end.
(** Parse arithmetic expressions *)
Fixpoint parsePrimaryExp (steps:nat)
(xs : list token)
: optionE (aexp * list token) :=
match steps with
| 0 => NoneE "Too many recursive calls"
| S steps' =>
TRY ' (i, rest) <- parseIdentifier xs ;;
SomeE (AId i, rest)
OR
TRY ' (n, rest) <- parseNumber xs ;;
SomeE (ANum n, rest)
OR
' (e, rest) <- firstExpect "(" (parseSumExp steps') xs ;;
' (u, rest') <- expect ")" rest ;;
SomeE (e,rest')
end
with parseProductExp (steps:nat)
(xs : list token) :=
match steps with
| 0 => NoneE "Too many recursive calls"
| S steps' =>
' (e, rest) <- parsePrimaryExp steps' xs ;;
' (es, rest') <- many (firstExpect "*" (parsePrimaryExp steps'))
steps' rest ;;
SomeE (fold_left AMult es e, rest')
end
with parseSumExp (steps:nat) (xs : list token) :=
match steps with
| 0 => NoneE "Too many recursive calls"
| S steps' =>
' (e, rest) <- parseProductExp steps' xs ;;
' (es, rest') <-
many (fun xs =>
TRY ' (e,rest') <-
firstExpect "+"
(parseProductExp steps') xs ;;
SomeE ( (true, e), rest')
OR
' (e, rest') <-
firstExpect "-"
(parseProductExp steps') xs ;;
SomeE ( (false, e), rest'))
steps' rest ;;
SomeE (fold_left (fun e0 term =>
match term with
| (true, e) => APlus e0 e
| (false, e) => AMinus e0 e
end)
es e,
rest')
end.
Definition parseAExp := parseSumExp.
(** Parsing boolean expressions: *)
Fixpoint parseAtomicExp (steps:nat)
(xs : list token) :=
match steps with
| 0 => NoneE "Too many recursive calls"
| S steps' =>
TRY ' (u,rest) <- expect "true" xs ;;
SomeE (BTrue,rest)
OR
TRY ' (u,rest) <- expect "false" xs ;;
SomeE (BFalse,rest)
OR
TRY ' (e,rest) <- firstExpect "~"
(parseAtomicExp steps')
xs ;;
SomeE (BNot e, rest)
OR
TRY ' (e,rest) <- firstExpect "("
(parseConjunctionExp steps')
xs ;;
' (u,rest') <- expect ")" rest ;;
SomeE (e, rest')
OR
' (e, rest) <- parseProductExp steps' xs ;;
TRY ' (e', rest') <- firstExpect "="
(parseAExp steps') rest ;;
SomeE (BEq e e', rest')
OR
TRY ' (e', rest') <- firstExpect "<="
(parseAExp steps') rest ;;
SomeE (BLe e e', rest')
OR
NoneE "Expected '=' or '<=' after arithmetic expression"
end
with parseConjunctionExp (steps:nat)
(xs : list token) :=
match steps with
| 0 => NoneE "Too many recursive calls"
| S steps' =>
' (e, rest) <- parseAtomicExp steps' xs ;;
' (es, rest') <- many (firstExpect "&&"
(parseAtomicExp steps'))
steps' rest ;;
SomeE (fold_left BAnd es e, rest')
end.
Definition parseBExp := parseConjunctionExp.
Check parseConjunctionExp.
Definition testParsing {X : Type}
(p : nat ->
list token ->
optionE (X * list token))
(s : string) :=
let t := tokenize s in
p 100 t.
(*
Eval compute in
testParsing parseProductExp "x.y.(x.x).x".
Eval compute in
testParsing parseConjunctionExp "~(x=x&&x*x<=(x*x)*x)&&x=x".
*)
(** Parsing commands: *)
Fixpoint parseSimpleCommand (steps:nat)
(xs : list token) :=
match steps with
| 0 => NoneE "Too many recursive calls"
| S steps' =>
TRY ' (u, rest) <- expect "skip" xs ;;
SomeE (<{skip}>, rest)
OR
TRY ' (e,rest) <-
firstExpect "if"
(parseBExp steps') xs ;;
' (c,rest') <-
firstExpect "then"
(parseSequencedCommand steps') rest ;;
' (c',rest'') <-
firstExpect "else"
(parseSequencedCommand steps') rest' ;;
' (tt,rest''') <-
expect "end" rest'' ;;
SomeE(<{if e then c else c' end}>, rest''')
OR
TRY ' (e,rest) <-
firstExpect "while"
(parseBExp steps') xs ;;
' (c,rest') <-
firstExpect "do"
(parseSequencedCommand steps') rest ;;
' (u,rest'') <-
expect "end" rest' ;;
SomeE(<{while e do c end}>, rest'')
OR
TRY ' (i, rest) <- parseIdentifier xs ;;
' (e, rest') <- firstExpect ":=" (parseAExp steps') rest ;;
SomeE (<{i := e}>, rest')
OR
NoneE "Expecting a command"
end
with parseSequencedCommand (steps:nat)
(xs : list token) :=
match steps with
| 0 => NoneE "Too many recursive calls"
| S steps' =>
' (c, rest) <- parseSimpleCommand steps' xs ;;
TRY ' (c', rest') <-
firstExpect ";"
(parseSequencedCommand steps') rest ;;
SomeE (<{c ; c'}>, rest')
OR
SomeE (c, rest)
end.
Definition bignumber := 1000.
Definition parse (str : string) : optionE com :=
let tokens := tokenize str in
match parseSequencedCommand bignumber tokens with
| SomeE (c, []) => SomeE c
| SomeE (_, t::_) => NoneE ("Trailing tokens remaining: " ++ t)
| NoneE err => NoneE err
end.
(* ################################################################# *)
(** * Examples *)
Example eg1 : parse "
if x = y + 1 + 2 - y * 6 + 3 then
x := x * 1;
y := 0
else
skip
end "
=
SomeE <{
if ("x" = ("y" + 1 + 2 - "y" * 6 + 3)) then
"x" := "x" * 1;
"y" := 0
else
skip
end }>.
Proof. cbv. reflexivity. Qed.
Example eg2 : parse "
skip;
z:=x*y*(x*x);
while x=x do
if (z <= z*z) && ~(x = 2) then
x := z;
y := z
else
skip
end;
skip
end;
x:=z "
=
SomeE <{
skip;
"z" := "x" * "y" * ("x" * "x");
while ("x" = "x") do
if ("z" <= "z" * "z") && ~("x" = 2) then
"x" := "z";
"y" := "z"
else
skip
end;
skip
end;
"x" := "z" }>.
Proof. cbv. reflexivity. Qed.
(* 2026-01-07 13:18 *)

68
ImpParserTest.v Normal file
View File

@@ -0,0 +1,68 @@
Set Warnings "-notation-overridden,-parsing".
From Stdlib Require Export String.
From LF Require Import ImpParser.
Parameter MISSING: Type.
Module Check.
Ltac check_type A B :=
match type of A with
| context[MISSING] => idtac "Missing:" A
| ?T => first [unify T B; idtac "Type: ok" | idtac "Type: wrong - should be (" B ")"]
end.
Ltac print_manual_grade A :=
match eval compute in A with
| Some (_ ?S ?C) =>
idtac "Score:" S;
match eval compute in C with
| ""%string => idtac "Comment: None"
| _ => idtac "Comment:" C
end
| None =>
idtac "Score: Ungraded";
idtac "Comment: None"
end.
End Check.
From LF Require Import ImpParser.
Import Check.
Goal True.
idtac " ".
idtac "Max points - standard: 0".
idtac "Max points - advanced: 0".
idtac "".
idtac "Allowed Axioms:".
idtac "functional_extensionality".
idtac "FunctionalExtensionality.functional_extensionality_dep".
idtac "plus_le".
idtac "le_trans".
idtac "le_plus_l".
idtac "add_le_cases".
idtac "Sn_le_Sm__n_le_m".
idtac "O_le_n".
idtac "".
idtac "".
idtac "********** Summary **********".
idtac "".
idtac "Below is a summary of the automatically graded exercises that are incomplete.".
idtac "".
idtac "The output for each exercise can be any of the following:".
idtac " - 'Closed under the global context', if it is complete".
idtac " - 'MANUAL', if it is manually graded".
idtac " - A list of pending axioms, containing unproven assumptions. In this case".
idtac " the exercise is considered complete, if the axioms are all allowed.".
idtac "".
idtac "********** Standard **********".
idtac "".
idtac "********** Advanced **********".
Abort.
(* 2026-01-07 13:18 *)
(* 2026-01-07 13:18 *)

292
ImpTest.v Normal file
View File

@@ -0,0 +1,292 @@
Set Warnings "-notation-overridden,-parsing".
From Stdlib Require Export String.
From LF Require Import Imp.
Parameter MISSING: Type.
Module Check.
Ltac check_type A B :=
match type of A with
| context[MISSING] => idtac "Missing:" A
| ?T => first [unify T B; idtac "Type: ok" | idtac "Type: wrong - should be (" B ")"]
end.
Ltac print_manual_grade A :=
match eval compute in A with
| Some (_ ?S ?C) =>
idtac "Score:" S;
match eval compute in C with
| ""%string => idtac "Comment: None"
| _ => idtac "Comment:" C
end
| None =>
idtac "Score: Ungraded";
idtac "Comment: None"
end.
End Check.
From LF Require Import Imp.
Import Check.
Goal True.
idtac "------------------- optimize_0plus_b_sound --------------------".
idtac " ".
idtac "#> AExp.optimize_0plus_b_test1".
idtac "Possible points: 0.5".
check_type @AExp.optimize_0plus_b_test1 (
(@eq AExp.bexp
(AExp.optimize_0plus_b
(AExp.BNot
(AExp.BGt (AExp.APlus (AExp.ANum 0) (AExp.ANum 4)) (AExp.ANum 8))))
(AExp.BNot (AExp.BGt (AExp.ANum 4) (AExp.ANum 8))))).
idtac "Assumptions:".
Abort.
Print Assumptions AExp.optimize_0plus_b_test1.
Goal True.
idtac " ".
idtac "#> AExp.optimize_0plus_b_test2".
idtac "Possible points: 0.5".
check_type @AExp.optimize_0plus_b_test2 (
(@eq AExp.bexp
(AExp.optimize_0plus_b
(AExp.BAnd
(AExp.BLe (AExp.APlus (AExp.ANum 0) (AExp.ANum 4)) (AExp.ANum 5))
AExp.BTrue))
(AExp.BAnd (AExp.BLe (AExp.ANum 4) (AExp.ANum 5)) AExp.BTrue))).
idtac "Assumptions:".
Abort.
Print Assumptions AExp.optimize_0plus_b_test2.
Goal True.
idtac " ".
idtac "#> AExp.optimize_0plus_b_sound".
idtac "Possible points: 2".
check_type @AExp.optimize_0plus_b_sound (
(forall b : AExp.bexp,
@eq bool (AExp.beval (AExp.optimize_0plus_b b)) (AExp.beval b))).
idtac "Assumptions:".
Abort.
Print Assumptions AExp.optimize_0plus_b_sound.
Goal True.
idtac " ".
idtac "------------------- bevalR --------------------".
idtac " ".
idtac "#> AExp.bevalR_iff_beval".
idtac "Possible points: 3".
check_type @AExp.bevalR_iff_beval (
(forall (b : AExp.bexp) (bv : bool),
iff (AExp.bevalR b bv) (@eq bool (AExp.beval b) bv))).
idtac "Assumptions:".
Abort.
Print Assumptions AExp.bevalR_iff_beval.
Goal True.
idtac " ".
idtac "------------------- ceval_example2 --------------------".
idtac " ".
idtac "#> ceval_example2".
idtac "Possible points: 2".
check_type @ceval_example2 (
(ceval (CSeq (CAsgn X (ANum 0)) (CSeq (CAsgn Y (ANum 1)) (CAsgn Z (ANum 2))))
empty_st
(@Maps.t_update nat
(@Maps.t_update nat (@Maps.t_update nat empty_st X 0) Y 1) Z 2))).
idtac "Assumptions:".
Abort.
Print Assumptions ceval_example2.
Goal True.
idtac " ".
idtac "------------------- loop_never_stops --------------------".
idtac " ".
idtac "#> loop_never_stops".
idtac "Possible points: 3".
check_type @loop_never_stops ((forall st st' : state, not (ceval loop st st'))).
idtac "Assumptions:".
Abort.
Print Assumptions loop_never_stops.
Goal True.
idtac " ".
idtac "------------------- no_whiles_eqv --------------------".
idtac " ".
idtac "#> no_whiles_eqv".
idtac "Possible points: 3".
check_type @no_whiles_eqv (
(forall c : com, iff (@eq bool (no_whiles c) true) (no_whilesR c))).
idtac "Assumptions:".
Abort.
Print Assumptions no_whiles_eqv.
Goal True.
idtac " ".
idtac "------------------- no_whiles_terminating --------------------".
idtac " ".
idtac "#> Manually graded: no_whiles_terminating".
idtac "Possible points: 6".
print_manual_grade manual_grade_for_no_whiles_terminating.
idtac " ".
idtac "------------------- stack_compiler --------------------".
idtac " ".
idtac "#> s_execute1".
idtac "Possible points: 1".
check_type @s_execute1 (
(@eq (list nat)
(s_execute empty_st (@nil nat)
(@cons sinstr (SPush 5)
(@cons sinstr (SPush 3)
(@cons sinstr (SPush 1) (@cons sinstr SMinus (@nil sinstr))))))
(@cons nat 2 (@cons nat 5 (@nil nat))))).
idtac "Assumptions:".
Abort.
Print Assumptions s_execute1.
Goal True.
idtac " ".
idtac "#> s_execute2".
idtac "Possible points: 0.5".
check_type @s_execute2 (
(@eq (list nat)
(s_execute (@Maps.t_update nat empty_st X 3)
(@cons nat 3 (@cons nat 4 (@nil nat)))
(@cons sinstr (SPush 4)
(@cons sinstr (SLoad X)
(@cons sinstr SMult (@cons sinstr SPlus (@nil sinstr))))))
(@cons nat 15 (@cons nat 4 (@nil nat))))).
idtac "Assumptions:".
Abort.
Print Assumptions s_execute2.
Goal True.
idtac " ".
idtac "#> s_compile1".
idtac "Possible points: 1.5".
check_type @s_compile1 (
(@eq (list sinstr) (s_compile (AMinus (AId X) (AMult (ANum 2) (AId Y))))
(@cons sinstr (SLoad X)
(@cons sinstr (SPush 2)
(@cons sinstr (SLoad Y)
(@cons sinstr SMult (@cons sinstr SMinus (@nil sinstr)))))))).
idtac "Assumptions:".
Abort.
Print Assumptions s_compile1.
Goal True.
idtac " ".
idtac "------------------- execute_app --------------------".
idtac " ".
idtac "#> execute_app".
idtac "Possible points: 3".
check_type @execute_app (
(forall (st : state) (p1 p2 : list sinstr) (stack : list nat),
@eq (list nat) (s_execute st stack (@app sinstr p1 p2))
(s_execute st (s_execute st stack p1) p2))).
idtac "Assumptions:".
Abort.
Print Assumptions execute_app.
Goal True.
idtac " ".
idtac "------------------- stack_compiler_correct --------------------".
idtac " ".
idtac "#> s_compile_correct_aux".
idtac "Possible points: 2.5".
check_type @s_compile_correct_aux (
(forall (st : state) (e : aexp) (stack : list nat),
@eq (list nat) (s_execute st stack (s_compile e))
(@cons nat (aeval st e) stack))).
idtac "Assumptions:".
Abort.
Print Assumptions s_compile_correct_aux.
Goal True.
idtac " ".
idtac "#> s_compile_correct".
idtac "Possible points: 0.5".
check_type @s_compile_correct (
(forall (st : state) (e : aexp),
@eq (list nat) (s_execute st (@nil nat) (s_compile e))
(@cons nat (aeval st e) (@nil nat)))).
idtac "Assumptions:".
Abort.
Print Assumptions s_compile_correct.
Goal True.
idtac " ".
idtac " ".
idtac "Max points - standard: 29".
idtac "Max points - advanced: 29".
idtac "".
idtac "Allowed Axioms:".
idtac "functional_extensionality".
idtac "FunctionalExtensionality.functional_extensionality_dep".
idtac "plus_le".
idtac "le_trans".
idtac "le_plus_l".
idtac "add_le_cases".
idtac "Sn_le_Sm__n_le_m".
idtac "O_le_n".
idtac "".
idtac "".
idtac "********** Summary **********".
idtac "".
idtac "Below is a summary of the automatically graded exercises that are incomplete.".
idtac "".
idtac "The output for each exercise can be any of the following:".
idtac " - 'Closed under the global context', if it is complete".
idtac " - 'MANUAL', if it is manually graded".
idtac " - A list of pending axioms, containing unproven assumptions. In this case".
idtac " the exercise is considered complete, if the axioms are all allowed.".
idtac "".
idtac "********** Standard **********".
idtac "---------- AExp.optimize_0plus_b_test1 ---------".
Print Assumptions AExp.optimize_0plus_b_test1.
idtac "---------- AExp.optimize_0plus_b_test2 ---------".
Print Assumptions AExp.optimize_0plus_b_test2.
idtac "---------- AExp.optimize_0plus_b_sound ---------".
Print Assumptions AExp.optimize_0plus_b_sound.
idtac "---------- AExp.bevalR_iff_beval ---------".
Print Assumptions AExp.bevalR_iff_beval.
idtac "---------- ceval_example2 ---------".
Print Assumptions ceval_example2.
idtac "---------- loop_never_stops ---------".
Print Assumptions loop_never_stops.
idtac "---------- no_whiles_eqv ---------".
Print Assumptions no_whiles_eqv.
idtac "---------- no_whiles_terminating ---------".
idtac "MANUAL".
idtac "---------- s_execute1 ---------".
Print Assumptions s_execute1.
idtac "---------- s_execute2 ---------".
Print Assumptions s_execute2.
idtac "---------- s_compile1 ---------".
Print Assumptions s_compile1.
idtac "---------- execute_app ---------".
Print Assumptions execute_app.
idtac "---------- s_compile_correct_aux ---------".
Print Assumptions s_compile_correct_aux.
idtac "---------- s_compile_correct ---------".
Print Assumptions s_compile_correct.
idtac "".
idtac "********** Advanced **********".
Abort.
(* 2026-01-07 13:18 *)
(* 2026-01-07 13:18 *)

966
IndPrinciples.v Normal file
View File

@@ -0,0 +1,966 @@
(** * IndPrinciples: Induction Principles *)
(** Every time we declare a new [Inductive] datatype, Rocq
automatically generates an _induction principle_ for this type.
This induction principle is a theorem like any other: If [t] is
defined inductively, the corresponding induction principle is
called [t_ind]. *)
Set Warnings "-notation-overridden".
From LF Require Export ProofObjects.
(* ################################################################# *)
(** * Basics *)
(** Here is the induction principle for natural numbers: *)
Check nat_ind :
forall P : nat -> Prop,
P 0 ->
(forall n : nat, P n -> P (S n)) ->
forall n : nat, P n.
(** In English: Suppose [P] is a property of natural numbers (that is,
[P n] is a [Prop] for every [n]). To show that [P n] holds of all
[n], it suffices to show:
- [P] holds of [0]
- for any [n], if [P] holds of [n], then [P] holds of [S n]. *)
(** The [induction] tactic is a straightforward wrapper that, at its
core, simply performs [apply t_ind]. To see this more clearly,
let's experiment with directly using [apply nat_ind], instead of
the [induction] tactic, to carry out some proofs. Here, for
example, is an alternate proof of a theorem that we saw in the
[Induction] chapter. *)
Theorem mul_0_r' : forall n:nat,
n * 0 = 0.
Proof.
apply nat_ind.
- (* n = O *) reflexivity.
- (* n = S n' *) simpl. intros n' IHn'. rewrite -> IHn'.
reflexivity. Qed.
(** This proof is basically the same as the earlier one, but a
few minor differences are worth noting.
First, in the induction step of the proof (the [S] case), we
have to do a little bookkeeping manually (the [intros]) that
[induction] does automatically.
Second, we do not introduce [n] into the context before applying
[nat_ind] -- the conclusion of [nat_ind] is a quantified formula,
and [apply] needs this conclusion to exactly match the shape of
the goal state, including the quantifier. By contrast, the
[induction] tactic works either with a variable in the context or
a quantified variable in the goal.
Third, we had to manually supply the name of the induction principle
with [apply], but [induction] figures that out itself.
These conveniences make [induction] nicer to use in practice than
applying induction principles like [nat_ind] directly. But it is
important to realize that, modulo these bits of bookkeeping,
applying [nat_ind] is what we are really doing. *)
(** **** Exercise: 2 stars, standard (plus_one_r')
Complete this proof without using the [induction] tactic. *)
Theorem plus_one_r' : forall n:nat,
n + 1 = S n.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** Rocq generates induction principles for every datatype
defined with [Inductive], including those that aren't recursive.
Although of course we don't need the proof technique of induction
to prove properties of non-recursive datatypes, the idea of an
induction principle still makes sense for them: it gives a way to
prove that a property holds for all values of the type. *)
(** These generated principles follow a similar pattern. If we
define a type [t] with constructors [c1] ... [cn], Rocq generates a
theorem with this shape:
t_ind : forall P : t -> Prop,
... case for c1 ... ->
... case for c2 ... -> ...
... case for cn ... ->
forall n : t, P n
The specific shape of each case depends on the arguments to the
corresponding constructor. *)
(** Before trying to write down a general rule, let's look at
some more examples. First, an example where the constructors take
no arguments: *)
Inductive time : Type :=
| day
| night.
Check time_ind :
forall P : time -> Prop,
P day ->
P night ->
forall t : time, P t.
(** **** Exercise: 1 star, standard, optional (rgb)
Write out the induction principle that Rocq will generate for the
following datatype. Write down your answer on paper or type it
into a comment, and then compare it with what Rocq prints. *)
Inductive rgb : Type :=
| red
| green
| blue.
Check rgb_ind.
(** [] *)
(** Here's another example, this time with one of the constructors
taking some arguments. *)
Inductive natlist : Type :=
| nnil
| ncons (n : nat) (l : natlist).
Check natlist_ind :
forall P : natlist -> Prop,
P nnil ->
(forall (n : nat) (l : natlist),
P l -> P (ncons n l)) ->
forall l : natlist, P l.
(** In general, the automatically generated induction principle for
inductive type [t] is formed as follows:
- Each constructor [c] generates one case of the principle.
- If [c] takes no arguments, that case is:
"P holds of c"
- If [c] takes arguments [x1:a1] ... [xn:an], that case is:
"For all x1:a1 ... xn:an,
if [P] holds of each of the arguments of type [t],
then [P] holds of [c x1 ... xn]"
But that oversimplifies a little. An assumption about [P]
holding of an argument [x] of type [t] actually occurs
immediately after the quantification of [x].
*)
(** For example, suppose we had written the definition of [natlist] a little
differently: *)
Inductive natlist' : Type :=
| nnil'
| nsnoc (l : natlist') (n : nat).
(** Now the induction principle case for [nsnoc] is a bit different
than the earlier case for [ncons]: *)
Check natlist'_ind :
forall P : natlist' -> Prop,
P nnil' ->
(forall l : natlist', P l -> forall n : nat, P (nsnoc l n)) ->
forall n : natlist', P n.
(** **** Exercise: 2 stars, standard (booltree_ind)
Here is a type for trees that contain a boolean value at each leaf
and branch. *)
Inductive booltree : Type :=
| bt_empty
| bt_leaf (b : bool)
| bt_branch (b : bool) (t1 t2 : booltree).
(* What is the induction principle for [booltree]? Of course you could
ask Rocq, but try not to do that. Instead, write it down yourself on
paper. Then look at the definition of [booltree_ind_type], below.
It has three missing pieces, which are provided by the definitions
in between here and there. Fill in those definitions based on what
you wrote on paper. *)
Definition booltree_property_type : Type := booltree -> Prop.
Definition base_case (P : booltree_property_type) : Prop
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
Definition leaf_case (P : booltree_property_type) : Prop
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
Definition branch_case (P : booltree_property_type) : Prop
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
Definition booltree_ind_type :=
forall (P : booltree_property_type),
base_case P ->
leaf_case P ->
branch_case P ->
forall (b : booltree), P b.
(** Now check the correctness of your answers by proving the following
theorem. If you have them right, you can complete the proof with
just one tactic: [exact booltree_ind]. That will work because the
automatically generated induction principle [booltree_ind] has the
same type as what you just defined. *)
Theorem booltree_ind_type_correct : booltree_ind_type.
Proof. (* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 2 stars, standard (toy_ind)
Here is an induction principle for a toy type:
forall P : Toy -> Prop,
(forall b : bool, P (con1 b)) ->
(forall (n : nat) (t : Toy), P t -> P (con2 n t)) ->
forall t : Toy, P t
Give an [Inductive] definition of [Toy], such that the induction
principle Rocq generates is that given above: *)
Inductive Toy : Type :=
(* FILL IN HERE *)
.
(** Show that your definition is correct by proving the following theorem.
You should be able to instantiate [f] and [g] with your two constructors,
then immediately finish the proof with [exact Toy_ind]. As in the previous
exercise, that will work because the automatically generated induction
principle [Toy_ind] will have the same type. *)
Theorem Toy_correct : exists f g,
forall P : Toy -> Prop,
(forall b : bool, P (f b)) ->
(forall (n : nat) (t : Toy), P t -> P (g n t)) ->
forall t : Toy, P t.
Proof. (* FILL IN HERE *) Admitted.
(** [] *)
(* ################################################################# *)
(** * Polymorphism *)
(** What about polymorphic datatypes?
The inductive definition of polymorphic lists
Inductive list (X:Type) : Type :=
| nil : list X
| cons : X -> list X -> list X.
is very similar to that of [natlist]. The main difference is
that, here, the whole definition is _parameterized_ on a set [X]:
that is, we are defining a _family_ of inductive types [list X],
one for each [X]. (Note that, wherever [list] appears in the body
of the declaration, it is always applied to the parameter [X].)
*)
(** The induction principle is likewise parameterized on [X]:
list_ind :
forall (X : Type) (P : list X -> Prop),
P [] ->
(forall (x : X) (l : list X), P l -> P (x :: l)) ->
forall l : list X, P l
Note that the _whole_ induction principle is parameterized on
[X]. That is, [list_ind] can be thought of as a polymorphic
function that, when applied to a type [X], gives us back an
induction principle specialized to the type [list X]. *)
(** **** Exercise: 1 star, standard, optional (tree)
Write out the induction principle that Rocq will generate for
the following datatype. Compare your answer with what Rocq
prints. *)
Inductive tree (X:Type) : Type :=
| leaf (x : X)
| node (t1 t2 : tree X).
Check tree_ind.
(** [] *)
(** **** Exercise: 1 star, standard, optional (mytype)
Find an inductive definition that gives rise to the
following induction principle:
mytype_ind :
forall (X : Type) (P : mytype X -> Prop),
(forall x : X, P (constr1 X x)) ->
(forall n : nat, P (constr2 X n)) ->
(forall m : mytype X, P m ->
forall n : nat, P (constr3 X m n)) ->
forall m : mytype X, P m
*)
(** [] *)
(** **** Exercise: 1 star, standard, optional (foo)
Find an inductive definition that gives rise to the
following induction principle:
foo_ind :
forall (X Y : Type) (P : foo X Y -> Prop),
(forall x : X, P (bar X Y x)) ->
(forall y : Y, P (baz X Y y)) ->
(forall f1 : nat -> foo X Y,
(forall n : nat, P (f1 n)) -> P (quux X Y f1)) ->
forall f2 : foo X Y, P f2
*)
(** [] *)
(** **** Exercise: 1 star, standard, optional (foo')
Consider the following inductive definition: *)
Inductive foo' (X:Type) : Type :=
| C1 (l : list X) (f : foo' X)
| C2.
(** What induction principle will Rocq generate for [foo']? (Fill
in the blanks, then check your answer with Rocq.)
foo'_ind :
forall (X : Type) (P : foo' X -> Prop),
(forall (l : list X) (f : foo' X),
_______________________ ->
_______________________ ) ->
___________________________________________ ->
forall f : foo' X, ________________________
*)
(** [] *)
(* ################################################################# *)
(** * Induction Hypotheses *)
(** Where does the phrase "induction hypothesis" fit into this story?
The induction principle for numbers
forall P : nat -> Prop,
P 0 ->
(forall n : nat, P n -> P (S n)) ->
forall n : nat, P n
is a generic statement that holds for all propositions
[P] (or rather, strictly speaking, for all families of
propositions [P] indexed by a number [n]). Each time we
use this principle, we are choosing [P] to be a particular
expression of type [nat -> Prop].
We can make proofs by induction more explicit by giving
this expression a name. For example, instead of stating
the theorem [mul_0_r] as "[forall n, n * 0 = 0]," we can
write it as "[forall n, P_m0r n]", where [P_m0r] is defined
as... *)
Definition P_m0r (n:nat) : Prop :=
n * 0 = 0.
(** ... or equivalently: *)
Definition P_m0r' : nat -> Prop :=
fun n => n * 0 = 0.
(** Now it is easier to see where [P_m0r] appears in the proof. *)
Theorem mul_0_r'' : forall n:nat,
P_m0r n.
Proof.
apply nat_ind.
- (* n = O *) reflexivity.
- (* n = S n' *)
(* Note the proof state at this point! *)
intros n IHn.
unfold P_m0r in IHn. unfold P_m0r. simpl. apply IHn. Qed.
(** This extra naming step isn't something that we do in
normal proofs, but it is useful to do it explicitly for an example
or two, because it allows us to see exactly what the induction
hypothesis is. If we prove [forall n, P_m0r n] by induction on
[n] (using either [induction] or [apply nat_ind]), we see that the
first subgoal requires us to prove [P_m0r 0] ("[P] holds for
zero"), while the second subgoal requires us to prove [forall n',
P_m0r n' -> P_m0r (S n')] (that is "[P] holds of [S n'] if it
holds of [n']" or, more elegantly, "[P] is preserved by [S]").
The _induction hypothesis_ is the premise of this latter
implication -- the assumption that [P] holds of [n'], which we are
allowed to use in proving that [P] holds for [S n']. *)
(* ################################################################# *)
(** * More on the [induction] Tactic *)
(** The [induction] tactic actually does even more low-level
bookkeeping for us than we discussed above.
Recall the informal statement of the induction principle for
natural numbers:
- If [P n] is some proposition involving a natural number n, and
we want to show that P holds for _all_ numbers n, we can
reason like this:
- show that [P O] holds
- show that, if [P n'] holds, then so does [P (S n')]
- conclude that [P n] holds for all n.
So, when we begin a proof with [intros n] and then [induction n],
we are first telling Rocq to consider a _particular_ [n] (by
introducing it into the context) and then telling it to prove
something about _all_ numbers (by using induction).
*)
(** What Rocq actually does in this situation, internally, is it
"re-generalizes" the variable we perform induction on. For
example, in our original proof that [plus] is associative... *)
Theorem add_assoc' : forall n m p : nat,
n + (m + p) = (n + m) + p.
Proof.
(* ...we first introduce all 3 variables into the context,
which amounts to saying "Consider an arbitrary [n], [m], and
[p]..." *)
intros n m p.
(* ...We now use the [induction] tactic to prove [P n] (that
is, [n + (m + p) = (n + m) + p]) for _all_ [n],
and hence also for the particular [n] that is in the context
at the moment. *)
induction n as [| n'].
- (* n = O *) reflexivity.
- (* n = S n' *)
simpl. rewrite -> IHn'. reflexivity. Qed.
(** It also works to apply [induction] to a variable that is
quantified in the goal. *)
Theorem add_comm' : forall n m : nat,
n + m = m + n.
Proof.
induction n as [| n'].
- (* n = O *) intros m. rewrite -> add_0_r. reflexivity.
- (* n = S n' *) intros m. simpl. rewrite -> IHn'.
rewrite <- plus_n_Sm. reflexivity. Qed.
(** Note that [induction n] leaves [m] still bound in the goal --
i.e., what we are proving inductively is a statement beginning
with [forall m].
If we do [induction] on a variable that is quantified in the goal
_after_ some other quantifiers, the [induction] tactic will
automatically introduce the variables bound by these quantifiers
into the context. *)
Theorem add_comm'' : forall n m : nat,
n + m = m + n.
Proof.
(* Let's do induction on [m] this time, instead of [n]... *)
induction m as [| m']. (* [n] is already introduced into the context *)
- (* m = O *) simpl. rewrite -> add_0_r. reflexivity.
- (* m = S m' *) simpl. rewrite <- IHm'.
rewrite <- plus_n_Sm. reflexivity. Qed.
(** **** Exercise: 1 star, standard, optional (plus_explicit_prop)
Rewrite both [add_assoc'] and [add_comm'] and their proofs in
the same style as [mul_0_r''] above -- that is, for each theorem,
give an explicit [Definition] of the proposition being proved by
induction, and state the theorem and proof in terms of this
defined proposition. *)
(* FILL IN HERE
[] *)
(* ################################################################# *)
(** * Induction Principles for Propositions *)
(** Inductive definitions of propositions also cause Rocq to generate
induction priniciples. For example, recall our proposition [ev]
from [IndProp]: *)
Print ev.
(* ===>
Inductive ev : nat -> Prop :=
| ev_0 : ev 0
| ev_SS : forall n : nat, ev n -> ev (S (S n)))
*)
Check ev_ind :
forall P : nat -> Prop,
P 0 ->
(forall n : nat, ev n -> P n -> P (S (S n))) ->
forall n : nat, ev n -> P n.
(** In English, [ev_ind] says: Suppose [P] is a property of natural
numbers. To show that [P n] holds whenever [n] is even, it suffices
to show:
- [P] holds for [0],
- for any [n], if [n] is even and [P] holds for [n], then [P]
holds for [S (S n)]. *)
(** As expected, we can apply [ev_ind] directly instead of using
[induction]. For example, we can use it to show that [ev'] (the
slightly awkward alternate definition of evenness that we saw in
an exercise in the [IndProp] chapter) is equivalent to the
cleaner inductive definition [ev]: *)
Inductive ev' : nat -> Prop :=
| ev'_0 : ev' 0
| ev'_2 : ev' 2
| ev'_sum n m (Hn : ev' n) (Hm : ev' m) : ev' (n + m).
Theorem ev_ev' : forall n, ev n -> ev' n.
Proof.
apply ev_ind.
- (* ev_0 *)
apply ev'_0.
- (* ev_SS *)
intros m Hm IH.
apply (ev'_sum 2 m).
+ apply ev'_2.
+ apply IH.
Qed.
(** The precise form of an [Inductive] definition can affect the
induction principle Rocq generates. *)
Inductive le1 : nat -> nat -> Prop :=
| le1_n : forall n, le1 n n
| le1_S : forall n m, (le1 n m) -> (le1 n (S m)).
Notation "m <=1 n" := (le1 m n) (at level 70).
(** This definition can be streamlined a little by observing that the
left-hand argument [n] is the same everywhere in the definition,
so we can actually make it a "general parameter" to the whole
definition, rather than an argument to each constructor. *)
Inductive le2 (n:nat) : nat -> Prop :=
| le2_n : le2 n n
| le2_S m (H : le2 n m) : le2 n (S m).
Notation "m <=2 n" := (le2 m n) (at level 70).
(** The second one is better, even though it looks less symmetric.
Why? Because it gives us a simpler induction principle. *)
Check le1_ind :
forall P : nat -> nat -> Prop,
(forall n : nat, P n n) ->
(forall n m : nat, n <=1 m -> P n m -> P n (S m)) ->
forall n n0 : nat, n <=1 n0 -> P n n0.
Check le2_ind :
forall (n : nat) (P : nat -> Prop),
P n ->
(forall m : nat, n <=2 m -> P m -> P (S m)) ->
forall n0 : nat, n <=2 n0 -> P n0.
(* ################################################################# *)
(** * Another Form of Induction Principles on Propositions (Optional) *)
(** The induction principle that Rocq generated for [ev] was parameterized
on a natural number [n]. It could have additionally been parameterized
on the evidence that [n] was even, which would have led to this
induction principle:
forall P : (forall n : nat, ev'' n -> Prop),
P O ev_0 ->
(forall (m : nat) (E : ev'' m),
P m E -> P (S (S m)) (ev_SS m E)) ->
forall (n : nat) (E : ev'' n), P n E
*)
(** ... because:
- Since [ev] is indexed by a number [n] (every [ev] object [E] is
a piece of evidence that some particular number [n] is even),
the proposition [P] is parameterized by both [n] and [E] --
that is, the induction principle can be used to prove
assertions involving both an even number and the evidence that
it is even.
- Since there are two ways of giving evidence of evenness ([even]
has two constructors), applying the induction principle
generates two subgoals:
- We must prove that [P] holds for [O] and [ev_0].
- We must prove that, whenever [m] is an even number and [E]
is an evidence of its evenness, if [P] holds of [m] and
[E], then it also holds of [S (S m)] and [ev_SS m E].
- If these subgoals can be proved, then the induction principle
tells us that [P] is true for _all_ even numbers [n] and
evidence [E] of their evenness.
This is more flexibility than we normally need or want: it is
giving us a way to prove logical assertions where the assertion
involves properties of some piece of _evidence_ of evenness, while
all we really care about is proving properties of _numbers_ that
are even -- we are interested in assertions about numbers, not
about evidence. It would therefore be more convenient to have an
induction principle for proving propositions [P] that are
parameterized just by [n] and whose conclusion establishes [P] for
all even numbers [n]:
forall P : nat -> Prop,
... ->
forall n : nat,
even n -> P n
That is why Rocq actually generates the induction principle
[ev_ind] that we saw before. *)
(* ################################################################# *)
(** * Formal vs. Informal Proofs by Induction *)
(** Question: What is the relation between a formal proof of a
proposition [P] and an informal proof of the same proposition [P]?
Answer: The latter should _teach_ the reader everything they would
need to understand to be able to produce the former.
Question: How much detail does that require?
Unfortunately, there is no single right answer; rather, there is a
range of choices.
At one end of the spectrum, we can essentially give the reader the
whole formal proof (i.e., the "informal" proof will amount to just
transcribing the formal one into words). This may give the reader
the ability to reproduce the formal one for themselves, but it
probably doesn't _teach_ them anything much.
At the other end of the spectrum, we can say "The theorem is true
and you can figure out why for yourself if you think about it hard
enough." This is also not a good teaching strategy, because often
writing the proof requires one or more significant insights into
the thing we're proving, and most readers will give up before they
rediscover all the same insights as we did.
In the middle is the golden mean -- a proof that includes all of
the essential insights (saving the reader the hard work that we
went through to find the proof in the first place) plus high-level
suggestions for the more routine parts to save the reader from
spending too much time reconstructing these (e.g., what the IH says
and what must be shown in each case of an inductive proof), but not
so much detail that the main ideas are obscured.
Since we've spent much of this chapter looking "under the hood" at
formal proofs by induction, now is a good moment to talk a little
about _informal_ proofs by induction.
In the real world of mathematical communication, written proofs
range from extremely longwinded and pedantic to extremely brief and
telegraphic. Although the ideal is somewhere in between, while one
is getting used to the style it is better to start out at the
pedantic end. Also, during the learning phase, it is probably
helpful to have a clear standard to compare against. With this in
mind, we offer two templates -- one for proofs by induction over
_data_ (i.e., where the thing we're doing induction on lives in
[Type]) and one for proofs by induction over _evidence_ (i.e.,
where the inductively defined thing lives in [Prop]). *)
(* ================================================================= *)
(** ** Induction Over an Inductively Defined Set *)
(** _Template_:
- _Theorem_: <Universally quantified proposition of the form
"For all [n:S], [P(n)]," where [S] is some inductively defined
set.>
_Proof_: By induction on [n].
<one case for each constructor [c] of [S]...>
- Suppose [n = c a1 ... ak], where <...and here we state
the IH for each of the [a]'s that has type [S], if any>.
We must show <...and here we restate [P(c a1 ... ak)]>.
<go on and prove [P(n)] to finish the case...>
- <other cases similarly...> []
_Example_:
- _Theorem_: For all sets [X], lists [l : list X], and numbers
[n], if [length l = n] then [index (S n) l = None].
_Proof_: By induction on [l].
- Suppose [l = []]. We must show, for all numbers [n],
that, if [length [] = n], then [index (S n) [] =
None].
This follows immediately from the definition of [index].
- Suppose [l = x :: l'] for some [x] and [l'], where
[length l' = n'] implies [index (S n') l' = None], for
any number [n']. We must show, for all [n], that, if
[length (x::l') = n] then [index (S n) (x::l') =
None].
Let [n] be a number with [length l = n]. Since
length l = length (x::l') = S (length l'),
it suffices to show that
index (S (length l')) l' = None.
But this follows directly from the induction hypothesis,
picking [n'] to be [length l']. [] *)
(* ================================================================= *)
(** ** Induction Over an Inductively Defined Proposition *)
(** Since inductively defined proof objects are often called
"derivation trees," this form of proof is also known as _induction
on derivations_.
_Template_:
- _Theorem_: <Proposition of the form "[Q -> P]," where [Q] is
some inductively defined proposition (more generally,
"For all [x] [y] [z], [Q x y z -> P x y z]")>
_Proof_: By induction on a derivation of [Q]. <Or, more
generally, "Suppose we are given [x], [y], and [z]. We
show that [Q x y z] implies [P x y z], by induction on a
derivation of [Q x y z]"...>
<one case for each constructor [c] of [Q]...>
- Suppose the final rule used to show [Q] is [c]. Then
<...and here we state the types of all of the [a]'s
together with any equalities that follow from the
definition of the constructor and the IH for each of
the [a]'s that has type [Q], if there are any>. We must
show <...and here we restate [P]>.
<go on and prove [P] to finish the case...>
- <other cases similarly...> []
_Example_
- _Theorem_: The [<=] relation is transitive -- i.e., for all
numbers [n], [m], and [o], if [n <= m] and [m <= o], then
[n <= o].
_Proof_: By induction on a derivation of [m <= o].
- Suppose the final rule used to show [m <= o] is
[le_n]. Then [m = o] and we must show that [n <= m],
which is immediate by hypothesis.
- Suppose the final rule used to show [m <= o] is
[le_S]. Then [o = S o'] for some [o'] with [m <= o'].
We must show that [n <= S o'].
By induction hypothesis, [n <= o'].
But then, by [le_S], [n <= S o']. [] *)
(* ################################################################# *)
(** * Explicit Proof Objects for Induction (Optional) *)
(** Although tactic-based proofs are normally much easier to
work with, the ability to write a proof term directly is sometimes
very handy, particularly when we want Rocq to do something slightly
non-standard. *)
(** Recall again the induction principle on naturals that Rocq generates for
us automatically from the Inductive declaration for [nat]. *)
Check nat_ind :
forall P : nat -> Prop,
P 0 ->
(forall n : nat, P n -> P (S n)) ->
forall n : nat, P n.
(** There's nothing magic about this induction lemma: it's just
another Rocq lemma that requires a proof. Rocq generates the proof
automatically too... *)
Print nat_ind.
(** We can rewrite that more tidily as follows: *)
Fixpoint build_proof
(P : nat -> Prop)
(evPO : P 0)
(evPS : forall n : nat, P n -> P (S n))
(n : nat) : P n :=
match n with
| 0 => evPO
| S k => evPS k (build_proof P evPO evPS k)
end.
Definition nat_ind_tidy := build_proof.
(** We can read [build_proof] as follows: Suppose we have
evidence [evPO] that [P] holds on 0, and evidence [evPS] that [forall
n:nat, P n -> P (S n)]. Then we can prove that [P] holds of an
arbitrary nat [n] using recursive function [build_proof], which
pattern matches on [n]:
- If [n] is 0, [build_proof] returns [evPO] to show that [P n]
holds.
- If [n] is [S k], [build_proof] applies itself recursively on
[k] to obtain evidence that [P k] holds; then it applies
[evPS] on that evidence to show that [P (S n)] holds. *)
(** Recursive function [build_proof] thus pattern matches against
[n], recursing all the way down to 0, and building up a proof
as it returns. *)
(** The actual [nat_ind] that Rocq generates uses a recursive
function [F] defined with [fix] instead of [Fixpoint]. *)
(** We can adapt this approach to proving [nat_ind] to help prove
_non-standard_ induction principles too. As a motivating example,
suppose that we want to prove the following lemma, directly
relating the [ev] predicate we defined in [IndProp]
to the [even] function defined in [Basics]. *)
Lemma even_ev : forall n: nat, even n = true -> ev n.
Proof.
induction n; intros.
- apply ev_0.
- destruct n.
+ simpl in H. inversion H.
+ simpl in H.
apply ev_SS.
Abort.
(** Attempts to prove this by standard induction on [n] fail in the case for
[S (S n)], because the induction hypothesis only tells us something about
[S n], which is useless. There are various ways to hack around this problem;
for example, we _can_ use ordinary induction on [n] to prove this (try it!):
[Lemma even_ev' : forall n : nat,
(even n = true -> ev n) /\ (even (S n) = true -> ev (S n))].
But we can make a much better proof by defining and proving a
non-standard induction principle that goes "by twos":
*)
Definition nat_ind2 :
forall (P : nat -> Prop),
P 0 ->
P 1 ->
(forall n : nat, P n -> P (S(S n))) ->
forall n : nat , P n :=
fun P => fun P0 => fun P1 => fun PSS =>
fix f (n:nat) := match n with
0 => P0
| 1 => P1
| S (S n') => PSS n' (f n')
end.
(** Once you get the hang of it, it is entirely straightforward to
give an explicit proof term for induction principles like this.
Proving this as a lemma using tactics is much less intuitive.
The [induction ... using] tactic variant gives a convenient way to
utilize a non-standard induction principle like this. *)
Lemma even_ev : forall n, even n = true -> ev n.
Proof.
intros.
induction n as [ | |n'] using nat_ind2.
- apply ev_0.
- simpl in H.
inversion H.
- simpl in H.
apply ev_SS.
apply IHn'.
apply H.
Qed.
(** **** Exercise: 4 stars, standard, optional (t_tree)
What if we wanted to define binary trees as follows, using a
constructor that bundles the children and value at a node into a
tuple? *)
Notation "( x , y , .. , z )" := (pair .. (pair x y) .. z) : core_scope.
Inductive t_tree (X : Type) : Type :=
| t_leaf
| t_branch : (t_tree X * X * t_tree X) -> t_tree X.
Arguments t_leaf {X}.
Arguments t_branch {X}.
(** Unfortunately, the automatically-generated induction principle is
not as strong as we need. It doesn't introduce induction hypotheses
for the subtrees. *)
Check t_tree_ind.
(** That will get us in trouble if we want to prove something by
induction, such as that [reflect] is an involution. *)
Fixpoint reflect {X : Type} (t : t_tree X) : t_tree X :=
match t with
| t_leaf => t_leaf
| t_branch (l, v, r) => t_branch (reflect r, v, reflect l)
end.
Theorem reflect_involution : forall (X : Type) (t : t_tree X),
reflect (reflect t) = t.
Proof.
intros X t. induction t.
- reflexivity.
- destruct p as [[l v] r]. simpl. Abort.
(** We get stuck, because we have no inductive hypothesis for [l] or
[r]. So, we need to define our own custom induction principle, and
use it to complete the proof.
First, define the type of the induction principle that you want to
use. There are many possible answers. Recall that you can use
[match] as part of the definition. *)
Definition better_t_tree_ind_type : Prop
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
(** Second, define the induction principle by giving a term of that
type. Use the examples about [nat], above, as models. *)
Definition better_t_tree_ind : better_t_tree_ind_type
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
(** Finally, prove the theorem. If [induction...using] gives you an
error about "Cannot recognize an induction scheme", don't worry
about it. The [induction] tactic is picky about the shape of the
theorem you pass to it, but it doesn't give you much information
to debug what is wrong about that shape. You can use [apply]
instead, as we saw at the beginning of this file. *)
Theorem reflect_involution : forall (X : Type) (t : t_tree X),
reflect (reflect t) = t.
Proof. (* FILL IN HERE *) Admitted.
(** [] *)
(* 2026-01-07 13:18 *)

118
IndPrinciplesTest.v Normal file
View File

@@ -0,0 +1,118 @@
Set Warnings "-notation-overridden,-parsing".
From Stdlib Require Export String.
From LF Require Import IndPrinciples.
Parameter MISSING: Type.
Module Check.
Ltac check_type A B :=
match type of A with
| context[MISSING] => idtac "Missing:" A
| ?T => first [unify T B; idtac "Type: ok" | idtac "Type: wrong - should be (" B ")"]
end.
Ltac print_manual_grade A :=
match eval compute in A with
| Some (_ ?S ?C) =>
idtac "Score:" S;
match eval compute in C with
| ""%string => idtac "Comment: None"
| _ => idtac "Comment:" C
end
| None =>
idtac "Score: Ungraded";
idtac "Comment: None"
end.
End Check.
From LF Require Import IndPrinciples.
Import Check.
Goal True.
idtac "------------------- plus_one_r' --------------------".
idtac " ".
idtac "#> plus_one_r'".
idtac "Possible points: 2".
check_type @plus_one_r' ((forall n : nat, @eq nat (Nat.add n 1) (S n))).
idtac "Assumptions:".
Abort.
Print Assumptions plus_one_r'.
Goal True.
idtac " ".
idtac "------------------- booltree_ind --------------------".
idtac " ".
idtac "#> booltree_ind_type_correct".
idtac "Possible points: 2".
check_type @booltree_ind_type_correct (booltree_ind_type).
idtac "Assumptions:".
Abort.
Print Assumptions booltree_ind_type_correct.
Goal True.
idtac " ".
idtac "------------------- toy_ind --------------------".
idtac " ".
idtac "#> Toy_correct".
idtac "Possible points: 2".
check_type @Toy_correct (
(@ex (forall _ : bool, Toy)
(fun f : forall _ : bool, Toy =>
@ex (forall (_ : nat) (_ : Toy), Toy)
(fun g : forall (_ : nat) (_ : Toy), Toy =>
forall (P : forall _ : Toy, Prop) (_ : forall b : bool, P (f b))
(_ : forall (n : nat) (t : Toy) (_ : P t), P (g n t))
(t : Toy),
P t)))).
idtac "Assumptions:".
Abort.
Print Assumptions Toy_correct.
Goal True.
idtac " ".
idtac " ".
idtac "Max points - standard: 6".
idtac "Max points - advanced: 6".
idtac "".
idtac "Allowed Axioms:".
idtac "functional_extensionality".
idtac "FunctionalExtensionality.functional_extensionality_dep".
idtac "plus_le".
idtac "le_trans".
idtac "le_plus_l".
idtac "add_le_cases".
idtac "Sn_le_Sm__n_le_m".
idtac "O_le_n".
idtac "".
idtac "".
idtac "********** Summary **********".
idtac "".
idtac "Below is a summary of the automatically graded exercises that are incomplete.".
idtac "".
idtac "The output for each exercise can be any of the following:".
idtac " - 'Closed under the global context', if it is complete".
idtac " - 'MANUAL', if it is manually graded".
idtac " - A list of pending axioms, containing unproven assumptions. In this case".
idtac " the exercise is considered complete, if the axioms are all allowed.".
idtac "".
idtac "********** Standard **********".
idtac "---------- plus_one_r' ---------".
Print Assumptions plus_one_r'.
idtac "---------- booltree_ind_type_correct ---------".
Print Assumptions booltree_ind_type_correct.
idtac "---------- Toy_correct ---------".
Print Assumptions Toy_correct.
idtac "".
idtac "********** Advanced **********".
Abort.
(* 2026-01-07 13:18 *)
(* 2026-01-07 13:18 *)

2962
IndProp.v Normal file

File diff suppressed because it is too large Load Diff

632
IndPropTest.v Normal file
View File

@@ -0,0 +1,632 @@
Set Warnings "-notation-overridden,-parsing".
From Stdlib Require Export String.
From LF Require Import IndProp.
Parameter MISSING: Type.
Module Check.
Ltac check_type A B :=
match type of A with
| context[MISSING] => idtac "Missing:" A
| ?T => first [unify T B; idtac "Type: ok" | idtac "Type: wrong - should be (" B ")"]
end.
Ltac print_manual_grade A :=
match eval compute in A with
| Some (_ ?S ?C) =>
idtac "Score:" S;
match eval compute in C with
| ""%string => idtac "Comment: None"
| _ => idtac "Comment:" C
end
| None =>
idtac "Score: Ungraded";
idtac "Comment: None"
end.
End Check.
From LF Require Import IndProp.
Import Check.
Goal True.
idtac "------------------- ev_double --------------------".
idtac " ".
idtac "#> ev_double".
idtac "Possible points: 1".
check_type @ev_double ((forall n : nat, ev (double n))).
idtac "Assumptions:".
Abort.
Print Assumptions ev_double.
Goal True.
idtac " ".
idtac "------------------- Perm3 --------------------".
idtac " ".
idtac "#> Perm3_ex1".
idtac "Possible points: 0.5".
check_type @Perm3_ex1 (
(@Perm3 nat (@cons nat 1 (@cons nat 2 (@cons nat 3 (@nil nat))))
(@cons nat 2 (@cons nat 3 (@cons nat 1 (@nil nat)))))).
idtac "Assumptions:".
Abort.
Print Assumptions Perm3_ex1.
Goal True.
idtac " ".
idtac "#> Perm3_refl".
idtac "Possible points: 0.5".
check_type @Perm3_refl (
(forall (X : Type) (a b c : X),
@Perm3 X (@cons X a (@cons X b (@cons X c (@nil X))))
(@cons X a (@cons X b (@cons X c (@nil X)))))).
idtac "Assumptions:".
Abort.
Print Assumptions Perm3_refl.
Goal True.
idtac " ".
idtac "------------------- le_inversion --------------------".
idtac " ".
idtac "#> le_inversion".
idtac "Possible points: 1".
check_type @le_inversion (
(forall (n m : nat) (_ : le n m),
or (@eq nat n m)
(@ex nat (fun m' : nat => and (@eq nat m (S m')) (le n m'))))).
idtac "Assumptions:".
Abort.
Print Assumptions le_inversion.
Goal True.
idtac " ".
idtac "------------------- inversion_practice --------------------".
idtac " ".
idtac "#> SSSSev__even".
idtac "Possible points: 1".
check_type @SSSSev__even ((forall (n : nat) (_ : ev (S (S (S (S n))))), ev n)).
idtac "Assumptions:".
Abort.
Print Assumptions SSSSev__even.
Goal True.
idtac " ".
idtac "------------------- ev5_nonsense --------------------".
idtac " ".
idtac "#> ev5_nonsense".
idtac "Possible points: 1".
check_type @ev5_nonsense ((forall _ : ev 5, @eq nat (Nat.add 2 2) 9)).
idtac "Assumptions:".
Abort.
Print Assumptions ev5_nonsense.
Goal True.
idtac " ".
idtac "------------------- ev_sum --------------------".
idtac " ".
idtac "#> ev_sum".
idtac "Possible points: 2".
check_type @ev_sum ((forall (n m : nat) (_ : ev n) (_ : ev m), ev (Nat.add n m))).
idtac "Assumptions:".
Abort.
Print Assumptions ev_sum.
Goal True.
idtac " ".
idtac "------------------- ev_ev__ev --------------------".
idtac " ".
idtac "#> ev_ev__ev".
idtac "Advanced".
idtac "Possible points: 3".
check_type @ev_ev__ev ((forall (n m : nat) (_ : ev (Nat.add n m)) (_ : ev n), ev m)).
idtac "Assumptions:".
Abort.
Print Assumptions ev_ev__ev.
Goal True.
idtac " ".
idtac "------------------- Perm3_In --------------------".
idtac " ".
idtac "#> Perm3_In".
idtac "Possible points: 2".
check_type @Perm3_In (
(forall (X : Type) (x : X) (l1 l2 : list X) (_ : @Perm3 X l1 l2)
(_ : @In X x l1),
@In X x l2)).
idtac "Assumptions:".
Abort.
Print Assumptions Perm3_In.
Goal True.
idtac " ".
idtac "------------------- le_facts --------------------".
idtac " ".
idtac "#> le_trans".
idtac "Possible points: 0.5".
check_type @le_trans ((forall (m n o : nat) (_ : le m n) (_ : le n o), le m o)).
idtac "Assumptions:".
Abort.
Print Assumptions le_trans.
Goal True.
idtac " ".
idtac "#> O_le_n".
idtac "Possible points: 0.5".
check_type @O_le_n ((forall n : nat, le 0 n)).
idtac "Assumptions:".
Abort.
Print Assumptions O_le_n.
Goal True.
idtac " ".
idtac "#> n_le_m__Sn_le_Sm".
idtac "Possible points: 0.5".
check_type @n_le_m__Sn_le_Sm ((forall (n m : nat) (_ : le n m), le (S n) (S m))).
idtac "Assumptions:".
Abort.
Print Assumptions n_le_m__Sn_le_Sm.
Goal True.
idtac " ".
idtac "#> Sn_le_Sm__n_le_m".
idtac "Possible points: 1".
check_type @Sn_le_Sm__n_le_m ((forall (n m : nat) (_ : le (S n) (S m)), le n m)).
idtac "Assumptions:".
Abort.
Print Assumptions Sn_le_Sm__n_le_m.
Goal True.
idtac " ".
idtac "#> le_plus_l".
idtac "Possible points: 0.5".
check_type @le_plus_l ((forall a b : nat, le a (Nat.add a b))).
idtac "Assumptions:".
Abort.
Print Assumptions le_plus_l.
Goal True.
idtac " ".
idtac "------------------- plus_le_facts1 --------------------".
idtac " ".
idtac "#> plus_le".
idtac "Possible points: 1".
check_type @plus_le (
(forall (n1 n2 m : nat) (_ : le (Nat.add n1 n2) m), and (le n1 m) (le n2 m))).
idtac "Assumptions:".
Abort.
Print Assumptions plus_le.
Goal True.
idtac " ".
idtac "#> plus_le_cases".
idtac "Possible points: 1".
check_type @plus_le_cases (
(forall (n m p q : nat) (_ : le (Nat.add n m) (Nat.add p q)),
or (le n p) (le m q))).
idtac "Assumptions:".
Abort.
Print Assumptions plus_le_cases.
Goal True.
idtac " ".
idtac "------------------- plus_le_facts2 --------------------".
idtac " ".
idtac "#> plus_le_compat_l".
idtac "Possible points: 0.5".
check_type @plus_le_compat_l (
(forall (n m p : nat) (_ : le n m), le (Nat.add p n) (Nat.add p m))).
idtac "Assumptions:".
Abort.
Print Assumptions plus_le_compat_l.
Goal True.
idtac " ".
idtac "#> plus_le_compat_r".
idtac "Possible points: 0.5".
check_type @plus_le_compat_r (
(forall (n m p : nat) (_ : le n m), le (Nat.add n p) (Nat.add m p))).
idtac "Assumptions:".
Abort.
Print Assumptions plus_le_compat_r.
Goal True.
idtac " ".
idtac "#> le_plus_trans".
idtac "Possible points: 1".
check_type @le_plus_trans ((forall (n m p : nat) (_ : le n m), le n (Nat.add m p))).
idtac "Assumptions:".
Abort.
Print Assumptions le_plus_trans.
Goal True.
idtac " ".
idtac "------------------- R_provability --------------------".
idtac " ".
idtac "#> Manually graded: R.R_provability".
idtac "Possible points: 3".
print_manual_grade R.manual_grade_for_R_provability.
idtac " ".
idtac "------------------- subsequence --------------------".
idtac " ".
idtac "#> subseq_refl".
idtac "Advanced".
idtac "Possible points: 1".
check_type @subseq_refl ((forall l : list nat, subseq l l)).
idtac "Assumptions:".
Abort.
Print Assumptions subseq_refl.
Goal True.
idtac " ".
idtac "#> subseq_app".
idtac "Advanced".
idtac "Possible points: 2".
check_type @subseq_app (
(forall (l1 l2 l3 : list nat) (_ : subseq l1 l2), subseq l1 (@app nat l2 l3))).
idtac "Assumptions:".
Abort.
Print Assumptions subseq_app.
Goal True.
idtac " ".
idtac "#> subseq_trans".
idtac "Advanced".
idtac "Possible points: 3".
check_type @subseq_trans (
(forall (l1 l2 l3 : list nat) (_ : subseq l1 l2) (_ : subseq l2 l3),
subseq l1 l3)).
idtac "Assumptions:".
Abort.
Print Assumptions subseq_trans.
Goal True.
idtac " ".
idtac "------------------- exp_match_ex1 --------------------".
idtac " ".
idtac "#> EmptySet_is_empty".
idtac "Possible points: 0.5".
check_type @EmptySet_is_empty (
(forall (T : Type) (s : list T), not (@exp_match T s (@EmptySet T)))).
idtac "Assumptions:".
Abort.
Print Assumptions EmptySet_is_empty.
Goal True.
idtac " ".
idtac "#> MUnion'".
idtac "Possible points: 0.5".
check_type @MUnion' (
(forall (T : Type) (s : list T) (re1 re2 : reg_exp T)
(_ : or (@exp_match T s re1) (@exp_match T s re2)),
@exp_match T s (@Union T re1 re2))).
idtac "Assumptions:".
Abort.
Print Assumptions MUnion'.
Goal True.
idtac " ".
idtac "#> MStar'".
idtac "Possible points: 2".
check_type @MStar' (
(forall (T : Type) (ss : list (list T)) (re : reg_exp T)
(_ : forall (s : list T) (_ : @In (list T) s ss), @exp_match T s re),
@exp_match T (@fold (list T) (list T) (@app T) ss (@nil T)) (@Star T re))).
idtac "Assumptions:".
Abort.
Print Assumptions MStar'.
Goal True.
idtac " ".
idtac "------------------- re_not_empty --------------------".
idtac " ".
idtac "#> re_not_empty".
idtac "Possible points: 3".
check_type @re_not_empty ((forall (T : Type) (_ : reg_exp T), bool)).
idtac "Assumptions:".
Abort.
Print Assumptions re_not_empty.
Goal True.
idtac " ".
idtac "#> re_not_empty_correct".
idtac "Possible points: 3".
check_type @re_not_empty_correct (
(forall (T : Type) (re : reg_exp T),
iff (@ex (list T) (fun s : list T => @exp_match T s re))
(@eq bool (@re_not_empty T re) true))).
idtac "Assumptions:".
Abort.
Print Assumptions re_not_empty_correct.
Goal True.
idtac " ".
idtac "------------------- weak_pumping_char --------------------".
idtac " ".
idtac "#> Pumping.weak_pumping_char".
idtac "Possible points: 2".
check_type @Pumping.weak_pumping_char (
(forall (T : Type) (x : T)
(_ : le (@Pumping.pumping_constant T (@Char T x))
(@length T (@cons T x (@nil T)))),
@ex (list T)
(fun s1 : list T =>
@ex (list T)
(fun s2 : list T =>
@ex (list T)
(fun s3 : list T =>
and (@eq (list T) (@cons T x (@nil T)) (@app T s1 (@app T s2 s3)))
(and (not (@eq (list T) s2 (@nil T)))
(forall m : nat,
@exp_match T (@app T s1 (@app T (@Pumping.napp T m s2) s3))
(@Char T x)))))))).
idtac "Assumptions:".
Abort.
Print Assumptions Pumping.weak_pumping_char.
Goal True.
idtac " ".
idtac "------------------- weak_pumping_app --------------------".
idtac " ".
idtac "#> Pumping.weak_pumping_app".
idtac "Possible points: 3".
check_type @Pumping.weak_pumping_app (
(forall (T : Type) (s1 s2 : list T) (re1 re2 : reg_exp T)
(_ : @exp_match T s1 re1) (_ : @exp_match T s2 re2)
(_ : forall _ : le (@Pumping.pumping_constant T re1) (@length T s1),
@ex (list T)
(fun s3 : list T =>
@ex (list T)
(fun s4 : list T =>
@ex (list T)
(fun s5 : list T =>
and (@eq (list T) s1 (@app T s3 (@app T s4 s5)))
(and (not (@eq (list T) s4 (@nil T)))
(forall m : nat,
@exp_match T
(@app T s3 (@app T (@Pumping.napp T m s4) s5)) re1))))))
(_ : forall _ : le (@Pumping.pumping_constant T re2) (@length T s2),
@ex (list T)
(fun s3 : list T =>
@ex (list T)
(fun s4 : list T =>
@ex (list T)
(fun s5 : list T =>
and (@eq (list T) s2 (@app T s3 (@app T s4 s5)))
(and (not (@eq (list T) s4 (@nil T)))
(forall m : nat,
@exp_match T
(@app T s3 (@app T (@Pumping.napp T m s4) s5)) re2))))))
(_ : le (@Pumping.pumping_constant T (@App T re1 re2))
(@length T (@app T s1 s2))),
@ex (list T)
(fun s0 : list T =>
@ex (list T)
(fun s3 : list T =>
@ex (list T)
(fun s4 : list T =>
and (@eq (list T) (@app T s1 s2) (@app T s0 (@app T s3 s4)))
(and (not (@eq (list T) s3 (@nil T)))
(forall m : nat,
@exp_match T (@app T s0 (@app T (@Pumping.napp T m s3) s4))
(@App T re1 re2)))))))).
idtac "Assumptions:".
Abort.
Print Assumptions Pumping.weak_pumping_app.
Goal True.
idtac " ".
idtac "------------------- weak_pumping_union_l --------------------".
idtac " ".
idtac "#> Pumping.weak_pumping_union_l".
idtac "Possible points: 3".
check_type @Pumping.weak_pumping_union_l (
(forall (T : Type) (s1 : list T) (re1 re2 : reg_exp T)
(_ : @exp_match T s1 re1)
(_ : forall _ : le (@Pumping.pumping_constant T re1) (@length T s1),
@ex (list T)
(fun s2 : list T =>
@ex (list T)
(fun s3 : list T =>
@ex (list T)
(fun s4 : list T =>
and (@eq (list T) s1 (@app T s2 (@app T s3 s4)))
(and (not (@eq (list T) s3 (@nil T)))
(forall m : nat,
@exp_match T
(@app T s2 (@app T (@Pumping.napp T m s3) s4)) re1))))))
(_ : le (@Pumping.pumping_constant T (@Union T re1 re2)) (@length T s1)),
@ex (list T)
(fun s0 : list T =>
@ex (list T)
(fun s2 : list T =>
@ex (list T)
(fun s3 : list T =>
and (@eq (list T) s1 (@app T s0 (@app T s2 s3)))
(and (not (@eq (list T) s2 (@nil T)))
(forall m : nat,
@exp_match T (@app T s0 (@app T (@Pumping.napp T m s2) s3))
(@Union T re1 re2)))))))).
idtac "Assumptions:".
Abort.
Print Assumptions Pumping.weak_pumping_union_l.
Goal True.
idtac " ".
idtac "------------------- reflect_iff --------------------".
idtac " ".
idtac "#> reflect_iff".
idtac "Possible points: 2".
check_type @reflect_iff (
(forall (P : Prop) (b : bool) (_ : reflect P b), iff P (@eq bool b true))).
idtac "Assumptions:".
Abort.
Print Assumptions reflect_iff.
Goal True.
idtac " ".
idtac "------------------- eqbP_practice --------------------".
idtac " ".
idtac "#> eqbP_practice".
idtac "Possible points: 3".
check_type @eqbP_practice (
(forall (n : nat) (l : list nat) (_ : @eq nat (count n l) 0),
not (@In nat n l))).
idtac "Assumptions:".
Abort.
Print Assumptions eqbP_practice.
Goal True.
idtac " ".
idtac "------------------- nostutter_defn --------------------".
idtac " ".
idtac "#> Manually graded: nostutter".
idtac "Possible points: 3".
print_manual_grade manual_grade_for_nostutter.
idtac " ".
idtac "------------------- filter_challenge --------------------".
idtac " ".
idtac "#> merge_filter".
idtac "Advanced".
idtac "Possible points: 6".
check_type @merge_filter (
(forall (X : Set) (test : forall _ : X, bool) (l l1 l2 : list X)
(_ : @merge X l1 l2 l)
(_ : @All X (fun n : X => @eq bool (test n) true) l1)
(_ : @All X (fun n : X => @eq bool (test n) false) l2),
@eq (list X) (@filter X test l) l1)).
idtac "Assumptions:".
Abort.
Print Assumptions merge_filter.
Goal True.
idtac " ".
idtac " ".
idtac "Max points - standard: 44".
idtac "Max points - advanced: 59".
idtac "".
idtac "Allowed Axioms:".
idtac "functional_extensionality".
idtac "FunctionalExtensionality.functional_extensionality_dep".
idtac "plus_le".
idtac "le_trans".
idtac "le_plus_l".
idtac "add_le_cases".
idtac "Sn_le_Sm__n_le_m".
idtac "O_le_n".
idtac "".
idtac "".
idtac "********** Summary **********".
idtac "".
idtac "Below is a summary of the automatically graded exercises that are incomplete.".
idtac "".
idtac "The output for each exercise can be any of the following:".
idtac " - 'Closed under the global context', if it is complete".
idtac " - 'MANUAL', if it is manually graded".
idtac " - A list of pending axioms, containing unproven assumptions. In this case".
idtac " the exercise is considered complete, if the axioms are all allowed.".
idtac "".
idtac "********** Standard **********".
idtac "---------- ev_double ---------".
Print Assumptions ev_double.
idtac "---------- Perm3_ex1 ---------".
Print Assumptions Perm3_ex1.
idtac "---------- Perm3_refl ---------".
Print Assumptions Perm3_refl.
idtac "---------- le_inversion ---------".
Print Assumptions le_inversion.
idtac "---------- SSSSev__even ---------".
Print Assumptions SSSSev__even.
idtac "---------- ev5_nonsense ---------".
Print Assumptions ev5_nonsense.
idtac "---------- ev_sum ---------".
Print Assumptions ev_sum.
idtac "---------- Perm3_In ---------".
Print Assumptions Perm3_In.
idtac "---------- le_trans ---------".
Print Assumptions le_trans.
idtac "---------- O_le_n ---------".
Print Assumptions O_le_n.
idtac "---------- n_le_m__Sn_le_Sm ---------".
Print Assumptions n_le_m__Sn_le_Sm.
idtac "---------- Sn_le_Sm__n_le_m ---------".
Print Assumptions Sn_le_Sm__n_le_m.
idtac "---------- le_plus_l ---------".
Print Assumptions le_plus_l.
idtac "---------- plus_le ---------".
Print Assumptions plus_le.
idtac "---------- plus_le_cases ---------".
Print Assumptions plus_le_cases.
idtac "---------- plus_le_compat_l ---------".
Print Assumptions plus_le_compat_l.
idtac "---------- plus_le_compat_r ---------".
Print Assumptions plus_le_compat_r.
idtac "---------- le_plus_trans ---------".
Print Assumptions le_plus_trans.
idtac "---------- R_provability ---------".
idtac "MANUAL".
idtac "---------- EmptySet_is_empty ---------".
Print Assumptions EmptySet_is_empty.
idtac "---------- MUnion' ---------".
Print Assumptions MUnion'.
idtac "---------- MStar' ---------".
Print Assumptions MStar'.
idtac "---------- re_not_empty ---------".
Print Assumptions re_not_empty.
idtac "---------- re_not_empty_correct ---------".
Print Assumptions re_not_empty_correct.
idtac "---------- Pumping.weak_pumping_char ---------".
Print Assumptions Pumping.weak_pumping_char.
idtac "---------- Pumping.weak_pumping_app ---------".
Print Assumptions Pumping.weak_pumping_app.
idtac "---------- Pumping.weak_pumping_union_l ---------".
Print Assumptions Pumping.weak_pumping_union_l.
idtac "---------- reflect_iff ---------".
Print Assumptions reflect_iff.
idtac "---------- eqbP_practice ---------".
Print Assumptions eqbP_practice.
idtac "---------- nostutter ---------".
idtac "MANUAL".
idtac "".
idtac "********** Advanced **********".
idtac "---------- ev_ev__ev ---------".
Print Assumptions ev_ev__ev.
idtac "---------- subseq_refl ---------".
Print Assumptions subseq_refl.
idtac "---------- subseq_app ---------".
Print Assumptions subseq_app.
idtac "---------- subseq_trans ---------".
Print Assumptions subseq_trans.
idtac "---------- merge_filter ---------".
Print Assumptions merge_filter.
Abort.
(* 2026-01-07 13:18 *)
(* 2026-01-07 13:18 *)

764
Induction.v Normal file
View File

@@ -0,0 +1,764 @@
(** * Induction: Proof by Induction *)
(* ################################################################# *)
(** * Separate Compilation *)
(** Before getting started on this chapter, we need to import
all of our definitions from the previous chapter: *)
From LF Require Export Basics.
(** For this [Require] command to work, Rocq needs to be able to
find a compiled version of the previous chapter ([Basics.v]).
This compiled version, called [Basics.vo], is analogous to the
[.class] files compiled from [.java] source files and the [.o]
files compiled from [.c] files.
To compile [Basics.v] and obtain [Basics.vo], first make sure that
the files [Basics.v], [Induction.v], and [_CoqProject] are in
the current directory.
The [_CoqProject] file should contain just the following line:
-Q . LF
This maps the current directory ("[.]", which contains [Basics.v],
[Induction.v], etc.) to the prefix (or "logical directory")
"[LF]". Proof General, CoqIDE, and VSCoq read [_CoqProject]
automatically, to find out to where to look for the file
[Basics.vo] corresponding to the library [LF.Basics].
Once the files are in place, there are various ways to build
[Basics.vo] from an IDE, or you can build it from the command
line. From an IDE...
- In Proof General: The compilation can be made to happen
automatically when you submit the [Require] line above to PG, by
setting the emacs variable [coq-compile-before-require] to [t].
This can also be found in the menu: "Coq" > "Auto Compilation" >
"Compile Before Require".
- In CoqIDE: One thing you can do on all platforms is open
[Basics.v]; then, in the "Compile" menu, click on "Compile Buffer".
- For VSCode users, open the terminal pane at the bottom and then
follow the command line instructions below. (If you downloaded
the project setup .tgz file, just doing `make` should build all
the code.)
To compile [Basics.v] from the command line...
- First, generate a [Makefile] using the [rocq makefile] utility,
which comes installed with Rocq. (If you obtained the whole volume as
a single archive, a [Makefile] should already exist and you can
skip this step.)
rocq makefile -f _CoqProject *.v -o Makefile
You should rerun that command whenever you add or remove
Rocq files in this directory.
- Now you can compile [Basics.v] by running [make] with the
corresponding [.vo] file as a target:
make Basics.vo
All files in the directory can be compiled by giving no
arguments:
make
- Under the hood, [make] uses the Rocq compiler, [rocq compile]. You can
also run [rocq compile] directly:
rocq compile -Q . LF Basics.v
- Since [make] also calculates dependencies between source files
to compile them in the right order, [make] should generally be
preferred over running [rocq compile] explicitly. But as a last (but
not terrible) resort, you can simply compile each file manually
as you go. For example, before starting work on the present
chapter, you would need to run the following command:
rocq compile -Q . LF Basics.v
Then, once you've finished this chapter, you'd do
rocq compile -Q . LF Induction.v
to get ready to work on the next one. If you ever remove the
.vo files, you'd need to give both commands again (in that
order).
Troubleshooting:
- For many of the alternatives above you need to make sure that
the [rocq] executable is in your [PATH].
- If you get complaints about missing identifiers, it may be
because the "load path" for Rocq is not set up correctly. The
[Print LoadPath.] command may be helpful in sorting out such
issues.
- When trying to compile a later chapter, if you see a message like
Compiled library Induction makes inconsistent assumptions over
library Basics
a common reason is that the library [Basics] was modified and
recompiled without also recompiling [Induction] which depends
on it. Recompile [Induction], or everything if too many files
are affected (for instance by running [make] and if even this
doesn't work then [make clean; make]).
- If you get complaints about missing identifiers later in this
file it may be because the "load path" for Rocq is not set up
correctly. The [Print LoadPath.] command may be helpful in
sorting out such issues.
In particular, if you see a message like
Compiled library Foo makes inconsistent assumptions over
library Bar
check whether you have multiple installations of Rocq on your
machine. It may be that commands (like [rocq compile]) that you execute
in a terminal window are getting a different version of Rocq than
commands executed by Proof General or CoqIDE.
- One more tip for CoqIDE users: If you see messages like [Error:
Unable to locate library Basics], a likely reason is
inconsistencies between compiling things _within CoqIDE_ vs _using
[rocq] from the command line_. This typically happens when there
are two incompatible versions of Rocq installed on your
system (one associated with CoqIDE, and one associated with [rocq]
from the terminal). The workaround for this situation is
compiling using CoqIDE only (i.e. choosing "make" from the menu),
and avoiding using [rocq] directly at all. *)
(* ################################################################# *)
(** * Proof by Induction *)
(** We can prove that [0] is a neutral element for [+] on the _left_
using just [reflexivity]. But the proof that it is also a neutral
element on the _right_ ... *)
Theorem add_0_r_firsttry : forall n:nat,
n + 0 = n.
(** ... can't be done in the same simple way. Just applying
[reflexivity] doesn't work, since the [n] in [n + 0] is an arbitrary
unknown number, so the [match] in the definition of [+] can't be
simplified. *)
Proof.
intros n.
simpl. (* Does nothing! *)
Abort.
(** And reasoning by cases using [destruct n] doesn't get us much
further: the branch of the case analysis where we assume [n = 0]
goes through just fine, but in the branch where [n = S n'] for
some [n'] we get stuck in exactly the same way. *)
Theorem add_0_r_secondtry : forall n:nat,
n + 0 = n.
Proof.
intros n. destruct n as [| n'] eqn:E.
- (* n = 0 *)
reflexivity. (* so far so good... *)
- (* n = S n' *)
simpl. (* ...but here we are stuck again *)
Abort.
(** We could use [destruct n'] to get a bit further, but,
since [n] can be arbitrarily large, we'll never get all the way
there if we just go on like this. *)
(** To prove interesting facts about numbers, lists, and other
inductively defined sets, we often need a more powerful reasoning
principle: _induction_.
Recall (from a discrete math course, probably) the _principle of
induction over natural numbers_: If [P(n)] is some proposition
involving a natural number [n] and we want to show that [P] holds for
all numbers [n], we can reason like this:
- show that [P(O)] holds;
- show that, for any [n'], if [P(n')] holds, then so does
[P(S n')];
- conclude that [P(n)] holds for all [n].
In Rocq, the steps are the same, except we typically encounter them
in reverse order: we begin with the goal of proving [P(n)] for all
[n] and apply the [induction] tactic to break it down into two
separate subgoals: one where we must show [P(O)] and another where
we must show [P(n') -> P(S n')]. Here's how this works for the
theorem at hand... *)
Theorem add_0_r : forall n:nat, n + 0 = n.
Proof.
intros n. induction n as [| n' IHn'].
- (* n = 0 *) reflexivity.
- (* n = S n' *) simpl. rewrite -> IHn'. reflexivity. Qed.
(** Like [destruct], the [induction] tactic takes an [as...]
clause that specifies the names of the variables to be introduced
in the subgoals. Since there are two subgoals, the [as...] clause
has two parts, separated by a vertical bar, [|]. (Strictly
speaking, we can omit the [as...] clause and Rocq will choose names
for us. In practice, this is a bad practice, as Rocq's automatic
choices tend to be confusing.)
In the first subgoal, [n] is replaced by [0]. No new variables
are introduced (so the first part of the [as...] is empty), and
the goal becomes [0 = 0 + 0], which follows easily by simplification.
In the second subgoal, [n] is replaced by [S n'], and the
assumption [n' + 0 = n'] is added to the context with the name
[IHn'] (i.e., the Induction Hypothesis for [n']). These two names
are specified in the second part of the [as...] clause. The goal
in this case becomes [S n' = (S n') + 0], which simplifies to
[S n' = S (n' + 0)], which in turn follows from [IHn']. *)
Theorem minus_n_n : forall n,
minus n n = 0.
Proof.
(* WORKED IN CLASS *)
intros n. induction n as [| n' IHn'].
- (* n = 0 *)
simpl. reflexivity.
- (* n = S n' *)
simpl. rewrite -> IHn'. reflexivity. Qed.
(** (The use of the [intros] tactic in these proofs is actually
redundant. When applied to a goal that contains quantified
variables, the [induction] tactic will automatically move them
into the context as needed.) *)
(** **** Exercise: 2 stars, standard, especially useful (basic_induction)
Prove the following using induction. You might need previously
proven results. *)
Theorem mul_0_r : forall n:nat,
n * 0 = 0.
Proof.
(* FILL IN HERE *) Admitted.
Theorem plus_n_Sm : forall n m : nat,
S (n + m) = n + (S m).
Proof.
(* FILL IN HERE *) Admitted.
Theorem add_comm : forall n m : nat,
n + m = m + n.
Proof.
(* FILL IN HERE *) Admitted.
Theorem add_assoc : forall n m p : nat,
n + (m + p) = (n + m) + p.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 2 stars, standard (double_plus)
Consider the following function, which doubles its argument: *)
Fixpoint double (n:nat) :=
match n with
| O => O
| S n' => S (S (double n'))
end.
(** Use induction to prove this simple fact about [double]: *)
Lemma double_plus : forall n, double n = n + n .
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 2 stars, standard (eqb_refl)
The following theorem relates the computational equality [=?] on
[nat] with the definitional equality [=] on [bool]. *)
Theorem eqb_refl : forall n : nat,
(n =? n) = true.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 2 stars, standard, optional (even_S)
One inconvenient aspect of our definition of [even n] is the
recursive call on [n - 2]. This makes proofs about [even n]
harder when done by induction on [n], since we may need an
induction hypothesis about [n - 2]. The following lemma gives an
alternative characterization of [even (S n)] that works better
with induction: *)
Theorem even_S : forall n : nat,
even (S n) = negb (even n).
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(* ################################################################# *)
(** * Proofs Within Proofs *)
(** In Rocq, as in informal mathematics, large proofs are often
broken into a sequence of theorems, with later proofs referring to
earlier theorems. But sometimes a proof will involve some
miscellaneous fact that is too trivial and of too little general
interest to bother giving it its own top-level name. In such
cases, it is convenient to be able to simply use the required fact
"in place" and then prove it as a separate step. The [replace]
tactic allows us to do this. *)
Theorem mult_0_plus' : forall n m : nat,
(n + 0 + 0) * m = n * m.
Proof.
intros n m.
replace (n + 0 + 0) with n.
- reflexivity.
- rewrite add_comm. simpl. rewrite add_comm. reflexivity.
Qed.
(** The tactic [replace e1 with e2] tactic introduces two subgoals.
The first subgoal is the same as the one at the point where we
invoke [replace], except that [e1] is replaced by [e2]. The
second subgoal is the equality [e1 = e2] itself. *)
(** As another example, suppose we want to prove that [(n + m)
+ (p + q) = (m + n) + (p + q)]. The only difference between the
two sides of the [=] is that the arguments [m] and [n] to the
first inner [+] are swapped, so it seems we should be able to use
the commutativity of addition ([add_comm]) to rewrite one into the
other. However, the [rewrite] tactic is not very smart about
_where_ it applies the rewrite. There are three uses of [+] here,
and it turns out that doing [rewrite -> add_comm] will affect only
the _outer_ one... *)
Theorem plus_rearrange_firsttry : forall n m p q : nat,
(n + m) + (p + q) = (m + n) + (p + q).
Proof.
intros n m p q.
(* We just need to swap (n + m) for (m + n)... seems
like add_comm should do the trick! *)
rewrite add_comm.
(* Doesn't work... Rocq rewrites the wrong plus! :-( *)
Abort.
(** To use [add_comm] at the point where we need it, we can rewrite
[n + m] to [m + n] using [replace] and then prove [n + m = m + n]
using [add_comm]. *)
Theorem plus_rearrange : forall n m p q : nat,
(n + m) + (p + q) = (m + n) + (p + q).
Proof.
intros n m p q.
replace (n + m) with (m + n).
- reflexivity.
- rewrite add_comm. reflexivity.
Qed.
(* ################################################################# *)
(** * Formal vs. Informal Proof *)
(** "Informal proofs are algorithms; formal proofs are code." *)
(** What constitutes a successful proof of a mathematical claim?
The question has challenged philosophers for millennia, but a
rough and ready answer could be this: A proof of a mathematical
proposition [P] is a written (or spoken) text that instills in the
reader or hearer the certainty that [P] is true -- an unassailable
argument for the truth of [P]. That is, a proof is an act of
communication.
Acts of communication may involve different sorts of readers. On
one hand, the "reader" can be a program like Rocq, in which case
the "belief" that is instilled is that [P] can be mechanically
derived from a certain set of formal logical rules, and the proof
is a recipe that guides the program in checking this fact. Such
recipes are _formal_ proofs.
Alternatively, the reader can be a human being, in which case the
proof will probably be written in English or some other natural
language and will thus necessarily be _informal_. Here, the
criteria for success are less clearly specified. A "valid" proof
is one that makes the reader believe [P]. But the same proof may
be read by many different readers, some of whom may be convinced
by a particular way of phrasing the argument, while others may not
be. Some readers may be particularly pedantic, inexperienced, or
just plain thick-headed; the only way to convince them will be to
make the argument in painstaking detail. Other readers, more
familiar in the area, may find all this detail so overwhelming
that they lose the overall thread; all they want is to be told the
main ideas, since it is easier for them to fill in the details for
themselves than to wade through a written presentation of them.
Ultimately, there is no universal standard, because there is no
single way of writing an informal proof that will convince every
conceivable reader.
In practice, however, mathematicians have developed a rich set of
conventions and idioms for writing about complex mathematical
objects that -- at least within a certain community -- make
communication fairly reliable. The conventions of this stylized
form of communication give a reasonably clear standard for judging
proofs good or bad.
Because we are using Rocq in this course, we will be working
heavily with formal proofs. But this doesn't mean we can
completely forget about informal ones! Formal proofs are useful
in many ways, but they are _not_ very efficient ways of
communicating ideas between human beings. *)
(** For example, here is a proof that addition is associative: *)
Theorem add_assoc' : forall n m p : nat,
n + (m + p) = (n + m) + p.
Proof. intros n m p. induction n as [| n' IHn']. reflexivity.
simpl. rewrite IHn'. reflexivity. Qed.
(** Rocq is perfectly happy with this. For a human, however, it
is difficult to make much sense of it. We can use comments and
bullets to show the structure a little more clearly... *)
Theorem add_assoc'' : forall n m p : nat,
n + (m + p) = (n + m) + p.
Proof.
intros n m p. induction n as [| n' IHn'].
- (* n = 0 *)
reflexivity.
- (* n = S n' *)
simpl. rewrite IHn'. reflexivity. Qed.
(** ... and if you're used to Rocq you might be able to step
through the tactics one after the other in your mind and imagine
the state of the context and goal stack at each point, but if the
proof were even a little bit more complicated this would be next
to impossible.
A (pedantic) mathematician might write the proof something like
this: *)
(** - _Theorem_: For any [n], [m] and [p],
n + (m + p) = (n + m) + p.
_Proof_: By induction on [n].
- First, suppose [n = 0]. We must show that
0 + (m + p) = (0 + m) + p.
This follows directly from the definition of [+].
- Next, suppose [n = S n'], where
n' + (m + p) = (n' + m) + p.
We must now show that
(S n') + (m + p) = ((S n') + m) + p.
By the definition of [+], this follows from
S (n' + (m + p)) = S ((n' + m) + p),
which is immediate from the induction hypothesis. _Qed_. *)
(** The overall form of the proof is basically similar, and of
course this is no accident: Rocq has been designed so that its
[induction] tactic generates the same sub-goals, in the same
order, as the bullet points that a mathematician would usually
write. But there are significant differences of detail: the
formal proof is much more explicit in some ways (e.g., the use of
[reflexivity]) but much less explicit in others (in particular,
the "proof state" at any given point in the Rocq proof is
completely implicit, whereas the informal proof reminds the reader
several times where things stand). *)
(** **** Exercise: 2 stars, advanced, optional (add_comm_informal)
Translate your solution for [add_comm] into an informal proof:
Theorem: Addition is commutative.
Proof: (* FILL IN HERE *)
*)
(* Do not modify the following line: *)
Definition manual_grade_for_add_comm_informal : option (nat*string) := None.
(** [] *)
(** **** Exercise: 2 stars, standard, optional (eqb_refl_informal)
Write an informal proof of the following theorem, using the
informal proof of [add_assoc] as a model. Don't just
paraphrase the Rocq tactics into English!
Theorem: [(n =? n) = true] for any [n].
Proof: (* FILL IN HERE *)
*)
(* Do not modify the following line: *)
Definition manual_grade_for_eqb_refl_informal : option (nat*string) := None.
(** [] *)
(* ################################################################# *)
(** * More Exercises *)
(** **** Exercise: 3 stars, standard, especially useful (mul_comm)
Use [replace] to help prove [add_shuffle3]. You don't need to
use induction yet. *)
Theorem add_shuffle3 : forall n m p : nat,
n + (m + p) = m + (n + p).
Proof.
(* FILL IN HERE *) Admitted.
(** Now prove commutativity of multiplication. You will probably want
to look for (or define and prove) a "helper" theorem to be used in
the proof of this one. Hint: what is [n * (1 + k)]? *)
Theorem mul_comm : forall m n : nat,
m * n = n * m.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 3 stars, standard, optional (more_exercises)
Take a piece of paper. For each of the following theorems, first
_think_ about whether (a) it can be proved using only
simplification and rewriting, (b) it also requires case
analysis ([destruct]), or (c) it also requires induction. Write
down your prediction. Then fill in the proof. (There is no need
to turn in your piece of paper; this is just to encourage you to
reflect before you hack!) *)
Theorem leb_refl : forall n:nat,
(n <=? n) = true.
Proof.
(* FILL IN HERE *) Admitted.
Theorem zero_neqb_S : forall n:nat,
0 =? (S n) = false.
Proof.
(* FILL IN HERE *) Admitted.
Theorem andb_false_r : forall b : bool,
andb b false = false.
Proof.
(* FILL IN HERE *) Admitted.
Theorem S_neqb_0 : forall n:nat,
(S n) =? 0 = false.
Proof.
(* FILL IN HERE *) Admitted.
Theorem mult_1_l : forall n:nat, 1 * n = n.
Proof.
(* FILL IN HERE *) Admitted.
Theorem all3_spec : forall b c : bool,
orb
(andb b c)
(orb (negb b)
(negb c))
= true.
Proof.
(* FILL IN HERE *) Admitted.
Theorem mult_plus_distr_r : forall n m p : nat,
(n + m) * p = (n * p) + (m * p).
Proof.
(* FILL IN HERE *) Admitted.
Theorem mult_assoc : forall n m p : nat,
n * (m * p) = (n * m) * p.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(* ################################################################# *)
(** * Nat to Bin and Back to Nat *)
(** Recall the [bin] type we defined in [Basics]: *)
Inductive bin : Type :=
| Z
| B0 (n : bin)
| B1 (n : bin)
.
(** Before you start working on the next exercise, replace the stub
definitions of [incr] and [bin_to_nat], below, with your solution
from [Basics]. That will make it possible for this file to
be graded on its own. *)
Fixpoint incr (m:bin) : bin
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
Fixpoint bin_to_nat (m:bin) : nat
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
(** In [Basics], we did some unit testing of [bin_to_nat], but we
didn't prove its correctness. Now we'll do so. *)
(** **** Exercise: 3 stars, standard, especially useful (binary_commute)
Prove that the following diagram commutes:
incr
bin ----------------------> bin
| |
bin_to_nat | | bin_to_nat
| |
v v
nat ----------------------> nat
S
That is, incrementing a binary number and then converting it to
a (unary) natural number yields the same result as first converting
it to a natural number and then incrementing.
If you want to change your previous definitions of [incr] or [bin_to_nat]
to make the property easier to prove, feel free to do so! *)
Theorem bin_to_nat_pres_incr : forall b : bin,
bin_to_nat (incr b) = 1 + bin_to_nat b.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 3 stars, standard (nat_bin_nat) *)
(** Write a function to convert natural numbers to binary numbers. *)
Fixpoint nat_to_bin (n:nat) : bin
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
(** Prove that, if we start with any [nat], convert it to [bin], and
convert it back, we get the same [nat] which we started with.
Hint: This proof should go through smoothly using the previous
exercise about [incr] as a lemma. If not, revisit your definitions
of the functions involved and consider whether they are more
complicated than necessary: the shape of a proof by induction will
match the recursive structure of the program being verified, so
make the recursions as simple as possible. *)
Theorem nat_bin_nat : forall n, bin_to_nat (nat_to_bin n) = n.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(* ################################################################# *)
(** * Bin to Nat and Back to Bin (Advanced) *)
(** The opposite direction -- starting with a [bin], converting to [nat],
then converting back to [bin] -- turns out to be problematic. That
is, the following theorem does not hold. *)
Theorem bin_nat_bin_fails : forall b, nat_to_bin (bin_to_nat b) = b.
Abort.
(** Let's explore why that theorem fails, and how to prove a modified
version of it. We'll start with some lemmas that might seem
unrelated, but will turn out to be relevant. *)
(** **** Exercise: 2 stars, advanced (double_bin) *)
(** Prove this lemma about [double], which we defined earlier in the
chapter. *)
Lemma double_incr : forall n : nat, double (S n) = S (S (double n)).
Proof.
(* FILL IN HERE *) Admitted.
(** Now define a similar doubling function for [bin]. *)
Definition double_bin (b:bin) : bin
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
(** Check that your function correctly doubles zero. *)
Example double_bin_zero : double_bin Z = Z.
(* FILL IN HERE *) Admitted.
(** Prove this lemma, which corresponds to [double_incr]. *)
Lemma double_incr_bin : forall b,
double_bin (incr b) = incr (incr (double_bin b)).
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** Let's return to our desired theorem: *)
Theorem bin_nat_bin_fails : forall b, nat_to_bin (bin_to_nat b) = b.
Abort.
(** The theorem fails because there are some [bin] such that we won't
necessarily get back to the _original_ [bin], but instead to an
"equivalent" [bin]. (We deliberately leave that notion undefined
here for you to think about.)
Explain in a comment, below, why this failure occurs. Your
explanation will not be graded, but it's important that you get it
clear in your mind before going on to the next part. If you're
stuck on this, think about alternative implementations of
[double_bin] that might have failed to satisfy [double_bin_zero]
yet otherwise seem correct. *)
(* FILL IN HERE *)
(** To solve that problem, we can introduce a _normalization_ function
that selects the simplest [bin] out of all the equivalent
[bin]. Then we can prove that the conversion from [bin] to [nat] and
back again produces that normalized, simplest [bin]. *)
(** **** Exercise: 4 stars, advanced (bin_nat_bin) *)
(** Define [normalize]. You will need to keep its definition as simple
as possible for later proofs to go smoothly. Do not use
[bin_to_nat] or [nat_to_bin], but do use [double_bin].
Hint: Structure the recursion such that it _always_ reaches the
end of the [bin] and _only_ processes each bit only once. Do not
try to "look ahead" at future bits. *)
Fixpoint normalize (b:bin) : bin
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
(** It would be wise to do some [Example] proofs to check that your definition of
[normalize] works the way you intend before you proceed. They won't be graded,
but fill them in below. *)
(* FILL IN HERE *)
(** Finally, prove the main theorem. The inductive cases could be a
bit tricky.
Hint: Start by trying to prove the main statement, see where you
get stuck, and see if you can find a lemma -- perhaps requiring
its own inductive proof -- that will allow the main proof to make
progress. We have one lemma for the [B0] case (which also makes
use of [double_incr_bin]) and another for the [B1] case. *)
Theorem bin_nat_bin : forall b, nat_to_bin (bin_to_nat b) = normalize b.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(* 2026-01-07 13:17 *)

257
InductionTest.v Normal file
View File

@@ -0,0 +1,257 @@
Set Warnings "-notation-overridden,-parsing".
From Stdlib Require Export String.
From LF Require Import Induction.
Parameter MISSING: Type.
Module Check.
Ltac check_type A B :=
match type of A with
| context[MISSING] => idtac "Missing:" A
| ?T => first [unify T B; idtac "Type: ok" | idtac "Type: wrong - should be (" B ")"]
end.
Ltac print_manual_grade A :=
match eval compute in A with
| Some (_ ?S ?C) =>
idtac "Score:" S;
match eval compute in C with
| ""%string => idtac "Comment: None"
| _ => idtac "Comment:" C
end
| None =>
idtac "Score: Ungraded";
idtac "Comment: None"
end.
End Check.
From LF Require Import Induction.
Import Check.
Goal True.
idtac "------------------- basic_induction --------------------".
idtac " ".
idtac "#> mul_0_r".
idtac "Possible points: 0.5".
check_type @mul_0_r ((forall n : nat, @eq nat (Nat.mul n 0) 0)).
idtac "Assumptions:".
Abort.
Print Assumptions mul_0_r.
Goal True.
idtac " ".
idtac "#> plus_n_Sm".
idtac "Possible points: 0.5".
check_type @plus_n_Sm ((forall n m : nat, @eq nat (S (Nat.add n m)) (Nat.add n (S m)))).
idtac "Assumptions:".
Abort.
Print Assumptions plus_n_Sm.
Goal True.
idtac " ".
idtac "#> add_comm".
idtac "Possible points: 0.5".
check_type @add_comm ((forall n m : nat, @eq nat (Nat.add n m) (Nat.add m n))).
idtac "Assumptions:".
Abort.
Print Assumptions add_comm.
Goal True.
idtac " ".
idtac "#> add_assoc".
idtac "Possible points: 0.5".
check_type @add_assoc (
(forall n m p : nat,
@eq nat (Nat.add n (Nat.add m p)) (Nat.add (Nat.add n m) p))).
idtac "Assumptions:".
Abort.
Print Assumptions add_assoc.
Goal True.
idtac " ".
idtac "------------------- double_plus --------------------".
idtac " ".
idtac "#> double_plus".
idtac "Possible points: 2".
check_type @double_plus ((forall n : nat, @eq nat (double n) (Nat.add n n))).
idtac "Assumptions:".
Abort.
Print Assumptions double_plus.
Goal True.
idtac " ".
idtac "------------------- eqb_refl --------------------".
idtac " ".
idtac "#> eqb_refl".
idtac "Possible points: 2".
check_type @eqb_refl ((forall n : nat, @eq bool (eqb n n) true)).
idtac "Assumptions:".
Abort.
Print Assumptions eqb_refl.
Goal True.
idtac " ".
idtac "------------------- mul_comm --------------------".
idtac " ".
idtac "#> add_shuffle3".
idtac "Possible points: 1".
check_type @add_shuffle3 (
(forall n m p : nat,
@eq nat (Nat.add n (Nat.add m p)) (Nat.add m (Nat.add n p)))).
idtac "Assumptions:".
Abort.
Print Assumptions add_shuffle3.
Goal True.
idtac " ".
idtac "#> mul_comm".
idtac "Possible points: 2".
check_type @mul_comm ((forall m n : nat, @eq nat (Nat.mul m n) (Nat.mul n m))).
idtac "Assumptions:".
Abort.
Print Assumptions mul_comm.
Goal True.
idtac " ".
idtac "------------------- binary_commute --------------------".
idtac " ".
idtac "#> bin_to_nat_pres_incr".
idtac "Possible points: 3".
check_type @bin_to_nat_pres_incr (
(forall b : bin, @eq nat (bin_to_nat (incr b)) (Nat.add 1 (bin_to_nat b)))).
idtac "Assumptions:".
Abort.
Print Assumptions bin_to_nat_pres_incr.
Goal True.
idtac " ".
idtac "------------------- nat_bin_nat --------------------".
idtac " ".
idtac "#> nat_bin_nat".
idtac "Possible points: 3".
check_type @nat_bin_nat ((forall n : nat, @eq nat (bin_to_nat (nat_to_bin n)) n)).
idtac "Assumptions:".
Abort.
Print Assumptions nat_bin_nat.
Goal True.
idtac " ".
idtac "------------------- double_bin --------------------".
idtac " ".
idtac "#> double_incr".
idtac "Advanced".
idtac "Possible points: 0.5".
check_type @double_incr ((forall n : nat, @eq nat (double (S n)) (S (S (double n))))).
idtac "Assumptions:".
Abort.
Print Assumptions double_incr.
Goal True.
idtac " ".
idtac "#> double_bin_zero".
idtac "Advanced".
idtac "Possible points: 0.5".
check_type @double_bin_zero ((@eq bin (double_bin Z) Z)).
idtac "Assumptions:".
Abort.
Print Assumptions double_bin_zero.
Goal True.
idtac " ".
idtac "#> double_incr_bin".
idtac "Advanced".
idtac "Possible points: 1".
check_type @double_incr_bin (
(forall b : bin, @eq bin (double_bin (incr b)) (incr (incr (double_bin b))))).
idtac "Assumptions:".
Abort.
Print Assumptions double_incr_bin.
Goal True.
idtac " ".
idtac "------------------- bin_nat_bin --------------------".
idtac " ".
idtac "#> bin_nat_bin".
idtac "Advanced".
idtac "Possible points: 6".
check_type @bin_nat_bin (
(forall b : bin, @eq bin (nat_to_bin (bin_to_nat b)) (normalize b))).
idtac "Assumptions:".
Abort.
Print Assumptions bin_nat_bin.
Goal True.
idtac " ".
idtac " ".
idtac "Max points - standard: 15".
idtac "Max points - advanced: 23".
idtac "".
idtac "Allowed Axioms:".
idtac "functional_extensionality".
idtac "FunctionalExtensionality.functional_extensionality_dep".
idtac "plus_le".
idtac "le_trans".
idtac "le_plus_l".
idtac "add_le_cases".
idtac "Sn_le_Sm__n_le_m".
idtac "O_le_n".
idtac "".
idtac "".
idtac "********** Summary **********".
idtac "".
idtac "Below is a summary of the automatically graded exercises that are incomplete.".
idtac "".
idtac "The output for each exercise can be any of the following:".
idtac " - 'Closed under the global context', if it is complete".
idtac " - 'MANUAL', if it is manually graded".
idtac " - A list of pending axioms, containing unproven assumptions. In this case".
idtac " the exercise is considered complete, if the axioms are all allowed.".
idtac "".
idtac "********** Standard **********".
idtac "---------- mul_0_r ---------".
Print Assumptions mul_0_r.
idtac "---------- plus_n_Sm ---------".
Print Assumptions plus_n_Sm.
idtac "---------- add_comm ---------".
Print Assumptions add_comm.
idtac "---------- add_assoc ---------".
Print Assumptions add_assoc.
idtac "---------- double_plus ---------".
Print Assumptions double_plus.
idtac "---------- eqb_refl ---------".
Print Assumptions eqb_refl.
idtac "---------- add_shuffle3 ---------".
Print Assumptions add_shuffle3.
idtac "---------- mul_comm ---------".
Print Assumptions mul_comm.
idtac "---------- bin_to_nat_pres_incr ---------".
Print Assumptions bin_to_nat_pres_incr.
idtac "---------- nat_bin_nat ---------".
Print Assumptions nat_bin_nat.
idtac "".
idtac "********** Advanced **********".
idtac "---------- double_incr ---------".
Print Assumptions double_incr.
idtac "---------- double_bin_zero ---------".
Print Assumptions double_bin_zero.
idtac "---------- double_incr_bin ---------".
Print Assumptions double_incr_bin.
idtac "---------- bin_nat_bin ---------".
Print Assumptions bin_nat_bin.
Abort.
(* 2026-01-07 13:18 *)
(* 2026-01-07 13:18 *)

19
LICENSE Normal file
View File

@@ -0,0 +1,19 @@
Copyright (c) 2026
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

1207
Lists.v Normal file

File diff suppressed because it is too large Load Diff

544
ListsTest.v Normal file
View File

@@ -0,0 +1,544 @@
Set Warnings "-notation-overridden,-parsing".
From Stdlib Require Export String.
From LF Require Import Lists.
Parameter MISSING: Type.
Module Check.
Ltac check_type A B :=
match type of A with
| context[MISSING] => idtac "Missing:" A
| ?T => first [unify T B; idtac "Type: ok" | idtac "Type: wrong - should be (" B ")"]
end.
Ltac print_manual_grade A :=
match eval compute in A with
| Some (_ ?S ?C) =>
idtac "Score:" S;
match eval compute in C with
| ""%string => idtac "Comment: None"
| _ => idtac "Comment:" C
end
| None =>
idtac "Score: Ungraded";
idtac "Comment: None"
end.
End Check.
From LF Require Import Lists.
Import Check.
Goal True.
idtac "------------------- snd_fst_is_swap --------------------".
idtac " ".
idtac "#> NatList.snd_fst_is_swap".
idtac "Possible points: 1".
check_type @NatList.snd_fst_is_swap (
(forall p : NatList.natprod,
@eq NatList.natprod (NatList.pair (NatList.snd p) (NatList.fst p))
(NatList.swap_pair p))).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.snd_fst_is_swap.
Goal True.
idtac " ".
idtac "------------------- list_funs --------------------".
idtac " ".
idtac "#> NatList.test_nonzeros".
idtac "Possible points: 0.5".
check_type @NatList.test_nonzeros (
(@eq NatList.natlist
(NatList.nonzeros
(NatList.cons 0
(NatList.cons 1
(NatList.cons 0
(NatList.cons 2
(NatList.cons 3
(NatList.cons 0 (NatList.cons 0 NatList.nil))))))))
(NatList.cons 1 (NatList.cons 2 (NatList.cons 3 NatList.nil))))).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.test_nonzeros.
Goal True.
idtac " ".
idtac "#> NatList.test_oddmembers".
idtac "Possible points: 0.5".
check_type @NatList.test_oddmembers (
(@eq NatList.natlist
(NatList.oddmembers
(NatList.cons 0
(NatList.cons 1
(NatList.cons 0
(NatList.cons 2
(NatList.cons 3
(NatList.cons 0 (NatList.cons 0 NatList.nil))))))))
(NatList.cons 1 (NatList.cons 3 NatList.nil)))).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.test_oddmembers.
Goal True.
idtac " ".
idtac "#> NatList.test_countoddmembers2".
idtac "Possible points: 0.5".
check_type @NatList.test_countoddmembers2 (
(@eq nat
(NatList.countoddmembers
(NatList.cons 0 (NatList.cons 2 (NatList.cons 4 NatList.nil))))
0)).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.test_countoddmembers2.
Goal True.
idtac " ".
idtac "#> NatList.test_countoddmembers3".
idtac "Possible points: 0.5".
check_type @NatList.test_countoddmembers3 (
(@eq nat (NatList.countoddmembers NatList.nil) 0)).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.test_countoddmembers3.
Goal True.
idtac " ".
idtac "------------------- alternate --------------------".
idtac " ".
idtac "#> NatList.test_alternate1".
idtac "Advanced".
idtac "Possible points: 1".
check_type @NatList.test_alternate1 (
(@eq NatList.natlist
(NatList.alternate
(NatList.cons 1 (NatList.cons 2 (NatList.cons 3 NatList.nil)))
(NatList.cons 4 (NatList.cons 5 (NatList.cons 6 NatList.nil))))
(NatList.cons 1
(NatList.cons 4
(NatList.cons 2
(NatList.cons 5 (NatList.cons 3 (NatList.cons 6 NatList.nil)))))))).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.test_alternate1.
Goal True.
idtac " ".
idtac "#> NatList.test_alternate2".
idtac "Advanced".
idtac "Possible points: 1".
check_type @NatList.test_alternate2 (
(@eq NatList.natlist
(NatList.alternate (NatList.cons 1 NatList.nil)
(NatList.cons 4 (NatList.cons 5 (NatList.cons 6 NatList.nil))))
(NatList.cons 1
(NatList.cons 4 (NatList.cons 5 (NatList.cons 6 NatList.nil)))))).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.test_alternate2.
Goal True.
idtac " ".
idtac "#> NatList.test_alternate4".
idtac "Advanced".
idtac "Possible points: 1".
check_type @NatList.test_alternate4 (
(@eq NatList.natlist
(NatList.alternate NatList.nil
(NatList.cons 20 (NatList.cons 30 NatList.nil)))
(NatList.cons 20 (NatList.cons 30 NatList.nil)))).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.test_alternate4.
Goal True.
idtac " ".
idtac "------------------- bag_functions --------------------".
idtac " ".
idtac "#> NatList.test_count2".
idtac "Possible points: 0.5".
check_type @NatList.test_count2 (
(@eq nat
(NatList.count 6
(NatList.cons 1
(NatList.cons 2
(NatList.cons 3
(NatList.cons 1 (NatList.cons 4 (NatList.cons 1 NatList.nil)))))))
0)).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.test_count2.
Goal True.
idtac " ".
idtac "#> NatList.test_sum1".
idtac "Possible points: 0.5".
check_type @NatList.test_sum1 (
(@eq nat
(NatList.count 1
(NatList.sum
(NatList.cons 1 (NatList.cons 2 (NatList.cons 3 NatList.nil)))
(NatList.cons 1 (NatList.cons 4 (NatList.cons 1 NatList.nil)))))
3)).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.test_sum1.
Goal True.
idtac " ".
idtac "#> NatList.test_add1".
idtac "Possible points: 0.5".
check_type @NatList.test_add1 (
(@eq nat
(NatList.count 1
(NatList.add 1
(NatList.cons 1 (NatList.cons 4 (NatList.cons 1 NatList.nil)))))
3)).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.test_add1.
Goal True.
idtac " ".
idtac "#> NatList.test_add2".
idtac "Possible points: 0.5".
check_type @NatList.test_add2 (
(@eq nat
(NatList.count 5
(NatList.add 1
(NatList.cons 1 (NatList.cons 4 (NatList.cons 1 NatList.nil)))))
0)).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.test_add2.
Goal True.
idtac " ".
idtac "#> NatList.test_member1".
idtac "Possible points: 0.5".
check_type @NatList.test_member1 (
(@eq bool
(NatList.member 1
(NatList.cons 1 (NatList.cons 4 (NatList.cons 1 NatList.nil))))
true)).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.test_member1.
Goal True.
idtac " ".
idtac "#> NatList.test_member2".
idtac "Possible points: 0.5".
check_type @NatList.test_member2 (
(@eq bool
(NatList.member 2
(NatList.cons 1 (NatList.cons 4 (NatList.cons 1 NatList.nil))))
false)).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.test_member2.
Goal True.
idtac " ".
idtac "------------------- list_exercises --------------------".
idtac " ".
idtac "#> NatList.app_nil_r".
idtac "Possible points: 0.5".
check_type @NatList.app_nil_r (
(forall l : NatList.natlist,
@eq NatList.natlist (NatList.app l NatList.nil) l)).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.app_nil_r.
Goal True.
idtac " ".
idtac "#> NatList.rev_app_distr".
idtac "Possible points: 0.5".
check_type @NatList.rev_app_distr (
(forall l1 l2 : NatList.natlist,
@eq NatList.natlist (NatList.rev (NatList.app l1 l2))
(NatList.app (NatList.rev l2) (NatList.rev l1)))).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.rev_app_distr.
Goal True.
idtac " ".
idtac "#> NatList.rev_involutive".
idtac "Possible points: 0.5".
check_type @NatList.rev_involutive (
(forall l : NatList.natlist,
@eq NatList.natlist (NatList.rev (NatList.rev l)) l)).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.rev_involutive.
Goal True.
idtac " ".
idtac "#> NatList.app_assoc4".
idtac "Possible points: 0.5".
check_type @NatList.app_assoc4 (
(forall l1 l2 l3 l4 : NatList.natlist,
@eq NatList.natlist (NatList.app l1 (NatList.app l2 (NatList.app l3 l4)))
(NatList.app (NatList.app (NatList.app l1 l2) l3) l4))).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.app_assoc4.
Goal True.
idtac " ".
idtac "#> NatList.nonzeros_app".
idtac "Possible points: 1".
check_type @NatList.nonzeros_app (
(forall l1 l2 : NatList.natlist,
@eq NatList.natlist (NatList.nonzeros (NatList.app l1 l2))
(NatList.app (NatList.nonzeros l1) (NatList.nonzeros l2)))).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.nonzeros_app.
Goal True.
idtac " ".
idtac "------------------- eqblist --------------------".
idtac " ".
idtac "#> NatList.eqblist_refl".
idtac "Possible points: 2".
check_type @NatList.eqblist_refl (
(forall l : NatList.natlist, @eq bool true (NatList.eqblist l l))).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.eqblist_refl.
Goal True.
idtac " ".
idtac "------------------- count_member_nonzero --------------------".
idtac " ".
idtac "#> NatList.count_member_nonzero".
idtac "Possible points: 1".
check_type @NatList.count_member_nonzero (
(forall s : NatList.bag,
@eq bool (leb 1 (NatList.count 1 (NatList.cons 1 s))) true)).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.count_member_nonzero.
Goal True.
idtac " ".
idtac "------------------- remove_does_not_increase_count --------------------".
idtac " ".
idtac "#> NatList.remove_does_not_increase_count".
idtac "Advanced".
idtac "Possible points: 3".
check_type @NatList.remove_does_not_increase_count (
(forall s : NatList.bag,
@eq bool
(leb (NatList.count 0 (NatList.remove_one 0 s)) (NatList.count 0 s)) true)).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.remove_does_not_increase_count.
Goal True.
idtac " ".
idtac "------------------- involution_injective --------------------".
idtac " ".
idtac "#> NatList.involution_injective".
idtac "Advanced".
idtac "Possible points: 3".
check_type @NatList.involution_injective (
(forall (f : forall _ : nat, nat) (_ : forall n : nat, @eq nat n (f (f n)))
(n1 n2 : nat) (_ : @eq nat (f n1) (f n2)),
@eq nat n1 n2)).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.involution_injective.
Goal True.
idtac " ".
idtac "------------------- rev_injective --------------------".
idtac " ".
idtac "#> NatList.rev_injective".
idtac "Advanced".
idtac "Possible points: 2".
check_type @NatList.rev_injective (
(forall (l1 l2 : NatList.natlist)
(_ : @eq NatList.natlist (NatList.rev l1) (NatList.rev l2)),
@eq NatList.natlist l1 l2)).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.rev_injective.
Goal True.
idtac " ".
idtac "------------------- hd_error --------------------".
idtac " ".
idtac "#> NatList.test_hd_error1".
idtac "Possible points: 1".
check_type @NatList.test_hd_error1 (
(@eq NatList.natoption (NatList.hd_error NatList.nil) NatList.None)).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.test_hd_error1.
Goal True.
idtac " ".
idtac "#> NatList.test_hd_error2".
idtac "Possible points: 1".
check_type @NatList.test_hd_error2 (
(@eq NatList.natoption (NatList.hd_error (NatList.cons 1 NatList.nil))
(NatList.Some 1))).
idtac "Assumptions:".
Abort.
Print Assumptions NatList.test_hd_error2.
Goal True.
idtac " ".
idtac "------------------- eqb_id_refl --------------------".
idtac " ".
idtac "#> eqb_id_refl".
idtac "Possible points: 1".
check_type @eqb_id_refl ((forall x : id, @eq bool (eqb_id x x) true)).
idtac "Assumptions:".
Abort.
Print Assumptions eqb_id_refl.
Goal True.
idtac " ".
idtac "------------------- update_eq --------------------".
idtac " ".
idtac "#> PartialMap.update_eq".
idtac "Possible points: 1".
check_type @PartialMap.update_eq (
(forall (d : PartialMap.partial_map) (x : id) (v : nat),
@eq NatList.natoption (PartialMap.find x (PartialMap.update d x v))
(NatList.Some v))).
idtac "Assumptions:".
Abort.
Print Assumptions PartialMap.update_eq.
Goal True.
idtac " ".
idtac "------------------- update_neq --------------------".
idtac " ".
idtac "#> PartialMap.update_neq".
idtac "Possible points: 1".
check_type @PartialMap.update_neq (
(forall (d : PartialMap.partial_map) (x y : id) (o : nat)
(_ : @eq bool (eqb_id x y) false),
@eq NatList.natoption (PartialMap.find x (PartialMap.update d y o))
(PartialMap.find x d))).
idtac "Assumptions:".
Abort.
Print Assumptions PartialMap.update_neq.
Goal True.
idtac " ".
idtac " ".
idtac "Max points - standard: 17".
idtac "Max points - advanced: 28".
idtac "".
idtac "Allowed Axioms:".
idtac "functional_extensionality".
idtac "FunctionalExtensionality.functional_extensionality_dep".
idtac "plus_le".
idtac "le_trans".
idtac "le_plus_l".
idtac "add_le_cases".
idtac "Sn_le_Sm__n_le_m".
idtac "O_le_n".
idtac "".
idtac "".
idtac "********** Summary **********".
idtac "".
idtac "Below is a summary of the automatically graded exercises that are incomplete.".
idtac "".
idtac "The output for each exercise can be any of the following:".
idtac " - 'Closed under the global context', if it is complete".
idtac " - 'MANUAL', if it is manually graded".
idtac " - A list of pending axioms, containing unproven assumptions. In this case".
idtac " the exercise is considered complete, if the axioms are all allowed.".
idtac "".
idtac "********** Standard **********".
idtac "---------- NatList.snd_fst_is_swap ---------".
Print Assumptions NatList.snd_fst_is_swap.
idtac "---------- NatList.test_nonzeros ---------".
Print Assumptions NatList.test_nonzeros.
idtac "---------- NatList.test_oddmembers ---------".
Print Assumptions NatList.test_oddmembers.
idtac "---------- NatList.test_countoddmembers2 ---------".
Print Assumptions NatList.test_countoddmembers2.
idtac "---------- NatList.test_countoddmembers3 ---------".
Print Assumptions NatList.test_countoddmembers3.
idtac "---------- NatList.test_count2 ---------".
Print Assumptions NatList.test_count2.
idtac "---------- NatList.test_sum1 ---------".
Print Assumptions NatList.test_sum1.
idtac "---------- NatList.test_add1 ---------".
Print Assumptions NatList.test_add1.
idtac "---------- NatList.test_add2 ---------".
Print Assumptions NatList.test_add2.
idtac "---------- NatList.test_member1 ---------".
Print Assumptions NatList.test_member1.
idtac "---------- NatList.test_member2 ---------".
Print Assumptions NatList.test_member2.
idtac "---------- NatList.app_nil_r ---------".
Print Assumptions NatList.app_nil_r.
idtac "---------- NatList.rev_app_distr ---------".
Print Assumptions NatList.rev_app_distr.
idtac "---------- NatList.rev_involutive ---------".
Print Assumptions NatList.rev_involutive.
idtac "---------- NatList.app_assoc4 ---------".
Print Assumptions NatList.app_assoc4.
idtac "---------- NatList.nonzeros_app ---------".
Print Assumptions NatList.nonzeros_app.
idtac "---------- NatList.eqblist_refl ---------".
Print Assumptions NatList.eqblist_refl.
idtac "---------- NatList.count_member_nonzero ---------".
Print Assumptions NatList.count_member_nonzero.
idtac "---------- NatList.test_hd_error1 ---------".
Print Assumptions NatList.test_hd_error1.
idtac "---------- NatList.test_hd_error2 ---------".
Print Assumptions NatList.test_hd_error2.
idtac "---------- eqb_id_refl ---------".
Print Assumptions eqb_id_refl.
idtac "---------- PartialMap.update_eq ---------".
Print Assumptions PartialMap.update_eq.
idtac "---------- PartialMap.update_neq ---------".
Print Assumptions PartialMap.update_neq.
idtac "".
idtac "********** Advanced **********".
idtac "---------- NatList.test_alternate1 ---------".
Print Assumptions NatList.test_alternate1.
idtac "---------- NatList.test_alternate2 ---------".
Print Assumptions NatList.test_alternate2.
idtac "---------- NatList.test_alternate4 ---------".
Print Assumptions NatList.test_alternate4.
idtac "---------- NatList.remove_does_not_increase_count ---------".
Print Assumptions NatList.remove_does_not_increase_count.
idtac "---------- NatList.involution_injective ---------".
Print Assumptions NatList.involution_injective.
idtac "---------- NatList.rev_injective ---------".
Print Assumptions NatList.rev_injective.
Abort.
(* 2026-01-07 13:18 *)
(* 2026-01-07 13:18 *)

1805
Logic.v Normal file

File diff suppressed because it is too large Load Diff

423
LogicTest.v Normal file
View File

@@ -0,0 +1,423 @@
Set Warnings "-notation-overridden,-parsing".
From Stdlib Require Export String.
From LF Require Import Logic.
Parameter MISSING: Type.
Module Check.
Ltac check_type A B :=
match type of A with
| context[MISSING] => idtac "Missing:" A
| ?T => first [unify T B; idtac "Type: ok" | idtac "Type: wrong - should be (" B ")"]
end.
Ltac print_manual_grade A :=
match eval compute in A with
| Some (_ ?S ?C) =>
idtac "Score:" S;
match eval compute in C with
| ""%string => idtac "Comment: None"
| _ => idtac "Comment:" C
end
| None =>
idtac "Score: Ungraded";
idtac "Comment: None"
end.
End Check.
From LF Require Import Logic.
Import Check.
Goal True.
idtac "------------------- plus_is_O --------------------".
idtac " ".
idtac "#> plus_is_O".
idtac "Possible points: 2".
check_type @plus_is_O (
(forall (n m : nat) (_ : @eq nat (Nat.add n m) 0),
and (@eq nat n 0) (@eq nat m 0))).
idtac "Assumptions:".
Abort.
Print Assumptions plus_is_O.
Goal True.
idtac " ".
idtac "------------------- and_assoc --------------------".
idtac " ".
idtac "#> and_assoc".
idtac "Possible points: 1".
check_type @and_assoc ((forall (P Q R : Prop) (_ : and P (and Q R)), and (and P Q) R)).
idtac "Assumptions:".
Abort.
Print Assumptions and_assoc.
Goal True.
idtac " ".
idtac "------------------- mult_is_O --------------------".
idtac " ".
idtac "#> mult_is_O".
idtac "Possible points: 2".
check_type @mult_is_O (
(forall (n m : nat) (_ : @eq nat (Nat.mul n m) 0),
or (@eq nat n 0) (@eq nat m 0))).
idtac "Assumptions:".
Abort.
Print Assumptions mult_is_O.
Goal True.
idtac " ".
idtac "------------------- or_commut --------------------".
idtac " ".
idtac "#> or_commut".
idtac "Possible points: 1".
check_type @or_commut ((forall (P Q : Prop) (_ : or P Q), or Q P)).
idtac "Assumptions:".
Abort.
Print Assumptions or_commut.
Goal True.
idtac " ".
idtac "------------------- contrapositive --------------------".
idtac " ".
idtac "#> contrapositive".
idtac "Possible points: 1".
check_type @contrapositive (
(forall (P Q : Prop) (_ : forall _ : P, Q) (_ : not Q), not P)).
idtac "Assumptions:".
Abort.
Print Assumptions contrapositive.
Goal True.
idtac " ".
idtac "------------------- not_both_true_and_false --------------------".
idtac " ".
idtac "#> not_both_true_and_false".
idtac "Possible points: 1".
check_type @not_both_true_and_false ((forall P : Prop, not (and P (not P)))).
idtac "Assumptions:".
Abort.
Print Assumptions not_both_true_and_false.
Goal True.
idtac " ".
idtac "------------------- not_PNP_informal --------------------".
idtac " ".
idtac "#> Manually graded: not_PNP_informal".
idtac "Advanced".
idtac "Possible points: 1".
print_manual_grade manual_grade_for_not_PNP_informal.
idtac " ".
idtac "------------------- de_morgan_not_or --------------------".
idtac " ".
idtac "#> de_morgan_not_or".
idtac "Possible points: 2".
check_type @de_morgan_not_or (
(forall (P Q : Prop) (_ : not (or P Q)), and (not P) (not Q))).
idtac "Assumptions:".
Abort.
Print Assumptions de_morgan_not_or.
Goal True.
idtac " ".
idtac "------------------- or_distributes_over_and --------------------".
idtac " ".
idtac "#> or_distributes_over_and".
idtac "Possible points: 3".
check_type @or_distributes_over_and (
(forall P Q R : Prop, iff (or P (and Q R)) (and (or P Q) (or P R)))).
idtac "Assumptions:".
Abort.
Print Assumptions or_distributes_over_and.
Goal True.
idtac " ".
idtac "------------------- dist_not_exists --------------------".
idtac " ".
idtac "#> dist_not_exists".
idtac "Possible points: 1".
check_type @dist_not_exists (
(forall (X : Type) (P : forall _ : X, Prop) (_ : forall x : X, P x),
not (@ex X (fun x : X => not (P x))))).
idtac "Assumptions:".
Abort.
Print Assumptions dist_not_exists.
Goal True.
idtac " ".
idtac "------------------- dist_exists_or --------------------".
idtac " ".
idtac "#> dist_exists_or".
idtac "Possible points: 2".
check_type @dist_exists_or (
(forall (X : Type) (P Q : forall _ : X, Prop),
iff (@ex X (fun x : X => or (P x) (Q x)))
(or (@ex X (fun x : X => P x)) (@ex X (fun x : X => Q x))))).
idtac "Assumptions:".
Abort.
Print Assumptions dist_exists_or.
Goal True.
idtac " ".
idtac "------------------- In_map_iff --------------------".
idtac " ".
idtac "#> In_map_iff".
idtac "Possible points: 2".
check_type @In_map_iff (
(forall (A B : Type) (f : forall _ : A, B) (l : list A) (y : B),
iff (@In B y (@map A B f l))
(@ex A (fun x : A => and (@eq B (f x) y) (@In A x l))))).
idtac "Assumptions:".
Abort.
Print Assumptions In_map_iff.
Goal True.
idtac " ".
idtac "------------------- In_app_iff --------------------".
idtac " ".
idtac "#> In_app_iff".
idtac "Possible points: 2".
check_type @In_app_iff (
(forall (A : Type) (l l' : list A) (a : A),
iff (@In A a (@app A l l')) (or (@In A a l) (@In A a l')))).
idtac "Assumptions:".
Abort.
Print Assumptions In_app_iff.
Goal True.
idtac " ".
idtac "------------------- All --------------------".
idtac " ".
idtac "#> All_In".
idtac "Possible points: 3".
check_type @All_In (
(forall (T : Type) (P : forall _ : T, Prop) (l : list T),
iff (forall (x : T) (_ : @In T x l), P x) (@All T P l))).
idtac "Assumptions:".
Abort.
Print Assumptions All_In.
Goal True.
idtac " ".
idtac "------------------- even_double_conv --------------------".
idtac " ".
idtac "#> even_double_conv".
idtac "Possible points: 3".
check_type @even_double_conv (
(forall n : nat,
@ex nat
(fun k : nat => @eq nat n (if even n then double k else S (double k))))).
idtac "Assumptions:".
Abort.
Print Assumptions even_double_conv.
Goal True.
idtac " ".
idtac "------------------- logical_connectives --------------------".
idtac " ".
idtac "#> andb_true_iff".
idtac "Possible points: 1".
check_type @andb_true_iff (
(forall b1 b2 : bool,
iff (@eq bool (andb b1 b2) true) (and (@eq bool b1 true) (@eq bool b2 true)))).
idtac "Assumptions:".
Abort.
Print Assumptions andb_true_iff.
Goal True.
idtac " ".
idtac "#> orb_true_iff".
idtac "Possible points: 1".
check_type @orb_true_iff (
(forall b1 b2 : bool,
iff (@eq bool (orb b1 b2) true) (or (@eq bool b1 true) (@eq bool b2 true)))).
idtac "Assumptions:".
Abort.
Print Assumptions orb_true_iff.
Goal True.
idtac " ".
idtac "------------------- eqb_neq --------------------".
idtac " ".
idtac "#> eqb_neq".
idtac "Possible points: 1".
check_type @eqb_neq (
(forall x y : nat, iff (@eq bool (eqb x y) false) (not (@eq nat x y)))).
idtac "Assumptions:".
Abort.
Print Assumptions eqb_neq.
Goal True.
idtac " ".
idtac "------------------- eqb_list --------------------".
idtac " ".
idtac "#> eqb_list_true_iff".
idtac "Possible points: 3".
check_type @eqb_list_true_iff (
(forall (A : Type) (eqb : forall (_ : A) (_ : A), bool)
(_ : forall a1 a2 : A, iff (@eq bool (eqb a1 a2) true) (@eq A a1 a2))
(l1 l2 : list A),
iff (@eq bool (@eqb_list A eqb l1 l2) true) (@eq (list A) l1 l2))).
idtac "Assumptions:".
Abort.
Print Assumptions eqb_list_true_iff.
Goal True.
idtac " ".
idtac "------------------- All_forallb --------------------".
idtac " ".
idtac "#> forallb_true_iff".
idtac "Possible points: 2".
check_type @forallb_true_iff (
(forall (X : Type) (test : forall _ : X, bool) (l : list X),
iff (@eq bool (@forallb X test l) true)
(@All X (fun x : X => @eq bool (test x) true) l))).
idtac "Assumptions:".
Abort.
Print Assumptions forallb_true_iff.
Goal True.
idtac " ".
idtac "------------------- tr_rev_correct --------------------".
idtac " ".
idtac "#> tr_rev_correct".
idtac "Possible points: 6".
check_type @tr_rev_correct (
(forall X : Type, @eq (forall _ : list X, list X) (@tr_rev X) (@rev X))).
idtac "Assumptions:".
Abort.
Print Assumptions tr_rev_correct.
Goal True.
idtac " ".
idtac "------------------- excluded_middle_irrefutable --------------------".
idtac " ".
idtac "#> excluded_middle_irrefutable".
idtac "Possible points: 3".
check_type @excluded_middle_irrefutable ((forall P : Prop, not (not (or P (not P))))).
idtac "Assumptions:".
Abort.
Print Assumptions excluded_middle_irrefutable.
Goal True.
idtac " ".
idtac "------------------- not_exists_dist --------------------".
idtac " ".
idtac "#> not_exists_dist".
idtac "Advanced".
idtac "Possible points: 3".
check_type @not_exists_dist (
(forall (_ : excluded_middle) (X : Type) (P : forall _ : X, Prop)
(_ : not (@ex X (fun x : X => not (P x)))) (x : X),
P x)).
idtac "Assumptions:".
Abort.
Print Assumptions not_exists_dist.
Goal True.
idtac " ".
idtac " ".
idtac "Max points - standard: 43".
idtac "Max points - advanced: 47".
idtac "".
idtac "Allowed Axioms:".
idtac "functional_extensionality".
idtac "FunctionalExtensionality.functional_extensionality_dep".
idtac "plus_le".
idtac "le_trans".
idtac "le_plus_l".
idtac "add_le_cases".
idtac "Sn_le_Sm__n_le_m".
idtac "O_le_n".
idtac "".
idtac "".
idtac "********** Summary **********".
idtac "".
idtac "Below is a summary of the automatically graded exercises that are incomplete.".
idtac "".
idtac "The output for each exercise can be any of the following:".
idtac " - 'Closed under the global context', if it is complete".
idtac " - 'MANUAL', if it is manually graded".
idtac " - A list of pending axioms, containing unproven assumptions. In this case".
idtac " the exercise is considered complete, if the axioms are all allowed.".
idtac "".
idtac "********** Standard **********".
idtac "---------- plus_is_O ---------".
Print Assumptions plus_is_O.
idtac "---------- and_assoc ---------".
Print Assumptions and_assoc.
idtac "---------- mult_is_O ---------".
Print Assumptions mult_is_O.
idtac "---------- or_commut ---------".
Print Assumptions or_commut.
idtac "---------- contrapositive ---------".
Print Assumptions contrapositive.
idtac "---------- not_both_true_and_false ---------".
Print Assumptions not_both_true_and_false.
idtac "---------- de_morgan_not_or ---------".
Print Assumptions de_morgan_not_or.
idtac "---------- or_distributes_over_and ---------".
Print Assumptions or_distributes_over_and.
idtac "---------- dist_not_exists ---------".
Print Assumptions dist_not_exists.
idtac "---------- dist_exists_or ---------".
Print Assumptions dist_exists_or.
idtac "---------- In_map_iff ---------".
Print Assumptions In_map_iff.
idtac "---------- In_app_iff ---------".
Print Assumptions In_app_iff.
idtac "---------- All_In ---------".
Print Assumptions All_In.
idtac "---------- even_double_conv ---------".
Print Assumptions even_double_conv.
idtac "---------- andb_true_iff ---------".
Print Assumptions andb_true_iff.
idtac "---------- orb_true_iff ---------".
Print Assumptions orb_true_iff.
idtac "---------- eqb_neq ---------".
Print Assumptions eqb_neq.
idtac "---------- eqb_list_true_iff ---------".
Print Assumptions eqb_list_true_iff.
idtac "---------- forallb_true_iff ---------".
Print Assumptions forallb_true_iff.
idtac "---------- tr_rev_correct ---------".
Print Assumptions tr_rev_correct.
idtac "---------- excluded_middle_irrefutable ---------".
Print Assumptions excluded_middle_irrefutable.
idtac "".
idtac "********** Advanced **********".
idtac "---------- not_PNP_informal ---------".
idtac "MANUAL".
idtac "---------- not_exists_dist ---------".
Print Assumptions not_exists_dist.
Abort.
(* 2026-01-07 13:18 *)
(* 2026-01-07 13:18 *)

17
Makefile Normal file
View File

@@ -0,0 +1,17 @@
COQMFFLAGS := -Q . LF
ALLVFILES := Preface.v Basics.v Induction.v Lists.v Poly.v Tactics.v Logic.v IndProp.v Maps.v ProofObjects.v IndPrinciples.v Rel.v Imp.v ImpParser.v ImpCEvalFun.v Extraction.v Auto.v AltAuto.v Postscript.v Bib.v PrefaceTest.v BasicsTest.v InductionTest.v ListsTest.v PolyTest.v TacticsTest.v LogicTest.v IndPropTest.v MapsTest.v ProofObjectsTest.v IndPrinciplesTest.v RelTest.v ImpTest.v ImpParserTest.v ImpCEvalFunTest.v ExtractionTest.v AutoTest.v AltAutoTest.v PostscriptTest.v BibTest.v
build: Makefile.coq
$(MAKE) -f Makefile.coq
clean::
if [ -e Makefile.coq ]; then $(MAKE) -f Makefile.coq cleanall; fi
$(RM) $(wildcard Makefile.coq Makefile.coq.conf) imp.ml imp.mli imp1.ml imp1.mli imp2.ml imp2.mli
Makefile.coq:
rocq makefile $(COQMFFLAGS) -o Makefile.coq $(ALLVFILES)
-include Makefile.coq
.PHONY: build clean

380
Maps.v Normal file
View File

@@ -0,0 +1,380 @@
(** * Maps: Total and Partial Maps *)
(** _Maps_ (or _dictionaries_) are ubiquitous data structures both in
ordinary programming and in the theory of programming languages;
we're going to need them in many places in the coming chapters.
They also make a nice case study using ideas we've seen in
previous chapters, including building data structures out of
higher-order functions (from [Basics] and [Poly]) and the use of
reflection to streamline proofs (from [IndProp]).
We'll define two flavors of maps: _total_ maps, which include a
"default" element to be returned when a key being looked up
doesn't exist, and _partial_ maps, which instead return an
[option] to indicate success or failure. Partial maps are defined
in terms of total maps, using [None] as the default element. *)
(* ################################################################# *)
(** * The Standard Library *)
(** One small digression before we begin...
Unlike the chapters we have seen so far, this one does not
[Require Import] the chapter before it (or, transitively, all the
earlier chapters). Instead, in this chapter and from now on,
we're going to import the definitions and theorems we need
directly from Rocq's standard library. You should not notice much
difference, though, because we've been careful to name our own
definitions and theorems the same as their counterparts in the
standard library, wherever they overlap. *)
From Stdlib Require Import Arith.
From Stdlib Require Import Bool.
From Stdlib Require Export Strings.String.
From Stdlib Require Import FunctionalExtensionality.
From Stdlib Require Import List.
Import ListNotations.
(** Documentation for the standard library can be found at
https://rocq-prover.org/doc/V9.0.0/stdlib/index.html.
The [Search] command is a good way to look for theorems involving
objects of specific types. See [Lists] for a reminder of how
to use it. *)
(** If you want to find out how or where a notation is defined, the
[Locate] command is useful. For example, where is the natural
addition operation defined in the standard library? *)
Locate "+".
(** (There are several uses of the [+] notation, but only one for
naturals.) *)
Print Init.Nat.add.
(** We'll see some more uses of [Locate] in the [Imp] chapter. *)
(* ################################################################# *)
(** * Identifiers *)
(** To define maps, we first need a type for the keys that we will use
to index into our maps. In [Lists.v] we introduced a fresh type
[id] for a similar purpose; here and for the rest of _Software
Foundations_ we will use the [string] type from Rocq's standard
library. *)
(** To compare strings, we use the function [eqb_refl] from the [String]
module in the standard library. *)
Check String.eqb_refl :
forall x : string, (x =? x)%string = true.
(** We will often use a few basic properties of string equality... *)
Check String.eqb_eq :
forall n m : string, (n =? m)%string = true <-> n = m.
Check String.eqb_neq :
forall n m : string, (n =? m)%string = false <-> n <> m.
Check String.eqb_spec :
forall x y : string, reflect (x = y) (String.eqb x y).
(* ################################################################# *)
(** * Total Maps *)
(** Our main job in this chapter will be to build a definition of
partial maps that is similar in behavior to the one we saw in the
[Lists] chapter, plus accompanying lemmas about its behavior.
This time around, though, we're going to use _functions_, rather
than lists of key-value pairs, to build maps. The advantage of
this representation is that it offers a more "extensional" view of
maps: two maps that respond to queries in the same way will be
represented as exactly the same function, rather than just as
"equivalent" list structures. This simplifies proofs that use
maps. *)
(** We build up to partial maps in two steps. First, we define a type
of _total maps_ that return a default value when we look up a key
that is not present in the map. *)
Definition total_map (A : Type) := string -> A.
(** Intuitively, a total map over an element type [A] is just a
function that can be used to look up [string]s, yielding [A]s. *)
(** The function [t_empty] yields an empty total map, given a default
element; this map always returns the default element when applied
to any string. *)
Definition t_empty {A : Type} (v : A) : total_map A :=
(fun _ => v).
(** More interesting is the map-updating function, which (as always)
takes a map [m], a key [x], and a value [v] and returns a new map
that takes [x] to [v] and takes every other key to whatever [m]
does. The novelty here is that we achieve this effect by wrapping
a new function around the old one. *)
Definition t_update {A : Type} (m : total_map A)
(x : string) (v : A) :=
fun x' => if String.eqb x x' then v else m x'.
(** This definition is a nice example of higher-order programming:
[t_update] takes a _function_ [m] and yields a new function
[fun x' => ...] that behaves like the desired map. *)
(** For example, we can build a map taking [string]s to [bool]s, where
["foo"] and ["bar"] are mapped to [true] and every other key is
mapped to [false], like this: *)
Definition examplemap :=
t_update (t_update (t_empty false) "foo" true)
"bar" true.
(** Next, let's introduce some notations to facilitate working with
maps. *)
(** First, we use the following notation to represent an empty total
map with a default value. *)
Notation "'__' '!->' v" := (t_empty v)
(at level 100, right associativity).
Example example_empty := ( false).
(** We next introduce a symbolic notation for extending an existing
map with a new binding. *)
Notation "x '!->' v ';' m" := (t_update m x v)
(at level 100, v constr at level 100, right associativity).
(** The [examplemap] above can now be defined as follows: *)
Definition examplemap' :=
( "bar" !-> true;
"foo" !-> true;
__ !-> false
).
(** This completes the definition of total maps. Note that we
don't need to define a [find] operation on this representation of
maps because it is just function application! *)
Example update_example1 : examplemap' "baz" = false.
Proof. reflexivity. Qed.
Example update_example2 : examplemap' "foo" = true.
Proof. reflexivity. Qed.
Example update_example3 : examplemap' "quux" = false.
Proof. reflexivity. Qed.
Example update_example4 : examplemap' "bar" = true.
Proof. reflexivity. Qed.
(** When we use maps in later chapters, we'll need several fundamental
facts about how they behave. *)
(** Even if you don't work the following exercises, make sure
you thoroughly understand the statements of the lemmas! *)
(** (Some of the proofs require the functional extensionality axiom,
which was discussed in the [Logic] chapter.) *)
(** **** Exercise: 1 star, standard, optional (t_apply_empty)
First, the empty map returns its default element for all keys: *)
Lemma t_apply_empty : forall (A : Type) (x : string) (v : A),
(__ !-> v) x = v.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 2 stars, standard, optional (t_update_eq)
Next, if we update a map [m] at a key [x] with a new value [v]
and then look up [x] in the map resulting from the [update], we
get back [v]: *)
Lemma t_update_eq : forall (A : Type) (m : total_map A) x v,
(x !-> v ; m) x = v.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 2 stars, standard, optional (t_update_neq)
On the other hand, if we update a map [m] at a key [x1] and then
look up a _different_ key [x2] in the resulting map, we get the
same result that [m] would have given: *)
Theorem t_update_neq : forall (A : Type) (m : total_map A) x1 x2 v,
x1 <> x2 ->
(x1 !-> v ; m) x2 = m x2.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 2 stars, standard, optional (t_update_shadow)
If we update a map [m] at a key [x] with a value [v1] and then
update again with the same key [x] and another value [v2], the
resulting map behaves the same (gives the same result when applied
to any key) as the simpler map obtained by performing just
the second [update] on [m]: *)
Lemma t_update_shadow : forall (A : Type) (m : total_map A) x v1 v2,
(x !-> v2 ; x !-> v1 ; m) = (x !-> v2 ; m).
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 2 stars, standard (t_update_same)
Given [string]s [x1] and [x2], we can use the tactic
[destruct (eqb_spec x1 x2)] to simultaneously perform case
analysis on the result of [String.eqb x1 x2] and generate
hypotheses about the equality (in the sense of [=]) of [x1] and
[x2]. With the example in chapter [IndProp] as a template,
use [String.eqb_spec] to prove the following theorem, which states
that if we update a map to assign key [x] the same value as it
already has in [m], then the result is equal to [m]: *)
Theorem t_update_same : forall (A : Type) (m : total_map A) x,
(x !-> m x ; m) = m.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 3 stars, standard, especially useful (t_update_permute)
Similarly, use [String.eqb_spec] to prove one final property of
the [update] function: If we update a map [m] at two distinct
keys, it doesn't matter in which order we do the updates. *)
Theorem t_update_permute : forall (A : Type) (m : total_map A)
v1 v2 x1 x2,
x2 <> x1 ->
(x1 !-> v1 ; x2 !-> v2 ; m)
=
(x2 !-> v2 ; x1 !-> v1 ; m).
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(* ################################################################# *)
(** * Partial maps *)
(** Lastly, we define _partial maps_ on top of total maps. A partial
map with elements of type [A] is simply a total map with elements
of type [option A] and default element [None]. *)
Definition partial_map (A : Type) := total_map (option A).
Definition empty {A : Type} : partial_map A :=
t_empty None.
Definition update {A : Type} (m : partial_map A)
(x : string) (v : A) :=
(x !-> Some v ; m).
(** We introduce a similar notation for partial maps: *)
Notation "x '|->' v ';' m" := (update m x v)
(at level 0, x constr, v at level 200, right associativity).
(** We can also hide the last case when it is empty. *)
Notation "x '|->' v" := (update empty x v)
(at level 0, x constr, v at level 200).
Definition examplepmap :=
("Church" |-> true ; "Turing" |-> false).
(** We now straightforwardly lift all of the basic lemmas about total
maps to partial maps. *)
Lemma apply_empty : forall (A : Type) (x : string),
@empty A x = None.
Proof.
intros. unfold empty. rewrite t_apply_empty.
reflexivity.
Qed.
Lemma update_eq : forall (A : Type) (m : partial_map A) x v,
(x |-> v ; m) x = Some v.
Proof.
intros. unfold update. rewrite t_update_eq.
reflexivity.
Qed.
(** The [update_eq] lemma is used very often in proofs. Adding it to
Rocq's global "hint database" allows proof-automation tactics such
as [auto] to find it. *)
#[global] Hint Resolve update_eq : core.
Theorem update_neq : forall (A : Type) (m : partial_map A) x1 x2 v,
x2 <> x1 ->
(x2 |-> v ; m) x1 = m x1.
Proof.
intros A m x1 x2 v H.
unfold update. rewrite t_update_neq.
- reflexivity.
- apply H.
Qed.
Lemma update_shadow : forall (A : Type) (m : partial_map A) x v1 v2,
(x |-> v2 ; x |-> v1 ; m) = (x |-> v2 ; m).
Proof.
intros A m x v1 v2. unfold update. rewrite t_update_shadow.
reflexivity.
Qed.
Theorem update_same : forall (A : Type) (m : partial_map A) x v,
m x = Some v ->
(x |-> v ; m) = m.
Proof.
intros A m x v H. unfold update. rewrite <- H.
apply t_update_same.
Qed.
Theorem update_permute : forall (A : Type) (m : partial_map A)
x1 x2 v1 v2,
x2 <> x1 ->
(x1 |-> v1 ; x2 |-> v2 ; m) = (x2 |-> v2 ; x1 |-> v1 ; m).
Proof.
intros A m x1 x2 v1 v2. unfold update.
apply t_update_permute.
Qed.
(** One last thing: For partial maps, it's convenient to introduce a
notion of map inclusion, stating that all the entries in one map
are also present in another: *)
Definition includedin {A : Type} (m m' : partial_map A) :=
forall x v, m x = Some v -> m' x = Some v.
(** We can then show that map update preserves map inclusion, that is: *)
Lemma includedin_update : forall (A : Type) (m m' : partial_map A)
(x : string) (vx : A),
includedin m m' ->
includedin (x |-> vx ; m) (x |-> vx ; m').
Proof.
unfold includedin.
intros A m m' x vx H.
intros y vy.
destruct (eqb_spec x y) as [Hxy | Hxy].
- rewrite Hxy.
rewrite update_eq. rewrite update_eq. intro H1. apply H1.
- rewrite update_neq.
+ rewrite update_neq.
* apply H.
* apply Hxy.
+ apply Hxy.
Qed.
(** This property is quite useful for reasoning about languages with
variable binding -- e.g., the Simply Typed Lambda Calculus, which
we will see in _Programming Language Foundations_, where maps are
used to keep track of which program variables are defined in a
given scope. *)
(* 2026-01-07 13:18 *)

102
MapsTest.v Normal file
View File

@@ -0,0 +1,102 @@
Set Warnings "-notation-overridden,-parsing".
From Stdlib Require Export String.
From LF Require Import Maps.
Parameter MISSING: Type.
Module Check.
Ltac check_type A B :=
match type of A with
| context[MISSING] => idtac "Missing:" A
| ?T => first [unify T B; idtac "Type: ok" | idtac "Type: wrong - should be (" B ")"]
end.
Ltac print_manual_grade A :=
match eval compute in A with
| Some (_ ?S ?C) =>
idtac "Score:" S;
match eval compute in C with
| ""%string => idtac "Comment: None"
| _ => idtac "Comment:" C
end
| None =>
idtac "Score: Ungraded";
idtac "Comment: None"
end.
End Check.
From LF Require Import Maps.
Import Check.
Goal True.
idtac "------------------- t_update_same --------------------".
idtac " ".
idtac "#> t_update_same".
idtac "Possible points: 2".
check_type @t_update_same (
(forall (A : Type) (m : total_map A) (x : string),
@eq (forall _ : string, A) (@t_update A m x (m x)) m)).
idtac "Assumptions:".
Abort.
Print Assumptions t_update_same.
Goal True.
idtac " ".
idtac "------------------- t_update_permute --------------------".
idtac " ".
idtac "#> t_update_permute".
idtac "Possible points: 3".
check_type @t_update_permute (
(forall (A : Type) (m : total_map A) (v1 v2 : A) (x1 x2 : string)
(_ : not (@eq string x2 x1)),
@eq (forall _ : string, A) (@t_update A (@t_update A m x2 v2) x1 v1)
(@t_update A (@t_update A m x1 v1) x2 v2))).
idtac "Assumptions:".
Abort.
Print Assumptions t_update_permute.
Goal True.
idtac " ".
idtac " ".
idtac "Max points - standard: 5".
idtac "Max points - advanced: 5".
idtac "".
idtac "Allowed Axioms:".
idtac "functional_extensionality".
idtac "FunctionalExtensionality.functional_extensionality_dep".
idtac "plus_le".
idtac "le_trans".
idtac "le_plus_l".
idtac "add_le_cases".
idtac "Sn_le_Sm__n_le_m".
idtac "O_le_n".
idtac "".
idtac "".
idtac "********** Summary **********".
idtac "".
idtac "Below is a summary of the automatically graded exercises that are incomplete.".
idtac "".
idtac "The output for each exercise can be any of the following:".
idtac " - 'Closed under the global context', if it is complete".
idtac " - 'MANUAL', if it is manually graded".
idtac " - A list of pending axioms, containing unproven assumptions. In this case".
idtac " the exercise is considered complete, if the axioms are all allowed.".
idtac "".
idtac "********** Standard **********".
idtac "---------- t_update_same ---------".
Print Assumptions t_update_same.
idtac "---------- t_update_permute ---------".
Print Assumptions t_update_permute.
idtac "".
idtac "********** Advanced **********".
Abort.
(* 2026-01-07 13:18 *)
(* 2026-01-07 13:18 *)

1246
Poly.v Normal file

File diff suppressed because it is too large Load Diff

552
PolyTest.v Normal file
View File

@@ -0,0 +1,552 @@
Set Warnings "-notation-overridden,-parsing".
From Stdlib Require Export String.
From LF Require Import Poly.
Parameter MISSING: Type.
Module Check.
Ltac check_type A B :=
match type of A with
| context[MISSING] => idtac "Missing:" A
| ?T => first [unify T B; idtac "Type: ok" | idtac "Type: wrong - should be (" B ")"]
end.
Ltac print_manual_grade A :=
match eval compute in A with
| Some (_ ?S ?C) =>
idtac "Score:" S;
match eval compute in C with
| ""%string => idtac "Comment: None"
| _ => idtac "Comment:" C
end
| None =>
idtac "Score: Ungraded";
idtac "Comment: None"
end.
End Check.
From LF Require Import Poly.
Import Check.
Goal True.
idtac "------------------- poly_exercises --------------------".
idtac " ".
idtac "#> app_nil_r".
idtac "Possible points: 0.5".
check_type @app_nil_r (
(forall (X : Type) (l : list X), @eq (list X) (@app X l (@nil X)) l)).
idtac "Assumptions:".
Abort.
Print Assumptions app_nil_r.
Goal True.
idtac " ".
idtac "#> app_assoc".
idtac "Possible points: 1".
check_type @app_assoc (
(forall (A : Type) (l m n : list A),
@eq (list A) (@app A l (@app A m n)) (@app A (@app A l m) n))).
idtac "Assumptions:".
Abort.
Print Assumptions app_assoc.
Goal True.
idtac " ".
idtac "#> app_length".
idtac "Possible points: 0.5".
check_type @app_length (
(forall (X : Type) (l1 l2 : list X),
@eq nat (@length X (@app X l1 l2)) (Nat.add (@length X l1) (@length X l2)))).
idtac "Assumptions:".
Abort.
Print Assumptions app_length.
Goal True.
idtac " ".
idtac "------------------- more_poly_exercises --------------------".
idtac " ".
idtac "#> rev_app_distr".
idtac "Possible points: 1".
check_type @rev_app_distr (
(forall (X : Type) (l1 l2 : list X),
@eq (list X) (@rev X (@app X l1 l2)) (@app X (@rev X l2) (@rev X l1)))).
idtac "Assumptions:".
Abort.
Print Assumptions rev_app_distr.
Goal True.
idtac " ".
idtac "#> rev_involutive".
idtac "Possible points: 1".
check_type @rev_involutive (
(forall (X : Type) (l : list X), @eq (list X) (@rev X (@rev X l)) l)).
idtac "Assumptions:".
Abort.
Print Assumptions rev_involutive.
Goal True.
idtac " ".
idtac "------------------- split --------------------".
idtac " ".
idtac "#> split".
idtac "Possible points: 1".
check_type @split ((forall (X Y : Type) (_ : list (prod X Y)), prod (list X) (list Y))).
idtac "Assumptions:".
Abort.
Print Assumptions split.
Goal True.
idtac " ".
idtac "#> test_split".
idtac "Possible points: 1".
check_type @test_split (
(@eq (prod (list nat) (list bool))
(@split nat bool
(@cons (prod nat bool) (@pair nat bool 1 false)
(@cons (prod nat bool) (@pair nat bool 2 false)
(@nil (prod nat bool)))))
(@pair (list nat) (list bool) (@cons nat 1 (@cons nat 2 (@nil nat)))
(@cons bool false (@cons bool false (@nil bool)))))).
idtac "Assumptions:".
Abort.
Print Assumptions test_split.
Goal True.
idtac " ".
idtac "------------------- filter_even_gt7 --------------------".
idtac " ".
idtac "#> test_filter_even_gt7_1".
idtac "Possible points: 1".
check_type @test_filter_even_gt7_1 (
(@eq (list nat)
(filter_even_gt7
(@cons nat 1
(@cons nat 2
(@cons nat 6
(@cons nat 9
(@cons nat 10
(@cons nat 3 (@cons nat 12 (@cons nat 8 (@nil nat))))))))))
(@cons nat 10 (@cons nat 12 (@cons nat 8 (@nil nat)))))).
idtac "Assumptions:".
Abort.
Print Assumptions test_filter_even_gt7_1.
Goal True.
idtac " ".
idtac "#> test_filter_even_gt7_2".
idtac "Possible points: 1".
check_type @test_filter_even_gt7_2 (
(@eq (list nat)
(filter_even_gt7
(@cons nat 5
(@cons nat 2 (@cons nat 6 (@cons nat 19 (@cons nat 129 (@nil nat)))))))
(@nil nat))).
idtac "Assumptions:".
Abort.
Print Assumptions test_filter_even_gt7_2.
Goal True.
idtac " ".
idtac "------------------- partition --------------------".
idtac " ".
idtac "#> partition".
idtac "Possible points: 1".
check_type @partition (
(forall (X : Type) (_ : forall _ : X, bool) (_ : list X),
prod (list X) (list X))).
idtac "Assumptions:".
Abort.
Print Assumptions partition.
Goal True.
idtac " ".
idtac "#> test_partition1".
idtac "Possible points: 1".
check_type @test_partition1 (
(@eq (prod (list nat) (list nat))
(@partition nat odd
(@cons nat 1
(@cons nat 2 (@cons nat 3 (@cons nat 4 (@cons nat 5 (@nil nat)))))))
(@pair (list nat) (list nat)
(@cons nat 1 (@cons nat 3 (@cons nat 5 (@nil nat))))
(@cons nat 2 (@cons nat 4 (@nil nat)))))).
idtac "Assumptions:".
Abort.
Print Assumptions test_partition1.
Goal True.
idtac " ".
idtac "#> test_partition2".
idtac "Possible points: 1".
check_type @test_partition2 (
(@eq (prod (list nat) (list nat))
(@partition nat (fun _ : nat => false)
(@cons nat 5 (@cons nat 9 (@cons nat 0 (@nil nat)))))
(@pair (list nat) (list nat) (@nil nat)
(@cons nat 5 (@cons nat 9 (@cons nat 0 (@nil nat))))))).
idtac "Assumptions:".
Abort.
Print Assumptions test_partition2.
Goal True.
idtac " ".
idtac "------------------- map_rev --------------------".
idtac " ".
idtac "#> map_rev".
idtac "Possible points: 3".
check_type @map_rev (
(forall (X Y : Type) (f : forall _ : X, Y) (l : list X),
@eq (list Y) (@map X Y f (@rev X l)) (@rev Y (@map X Y f l)))).
idtac "Assumptions:".
Abort.
Print Assumptions map_rev.
Goal True.
idtac " ".
idtac "------------------- flat_map --------------------".
idtac " ".
idtac "#> flat_map".
idtac "Possible points: 1".
check_type @flat_map (
(forall (X Y : Type) (_ : forall _ : X, list Y) (_ : list X), list Y)).
idtac "Assumptions:".
Abort.
Print Assumptions flat_map.
Goal True.
idtac " ".
idtac "#> test_flat_map1".
idtac "Possible points: 1".
check_type @test_flat_map1 (
(@eq (list nat)
(@flat_map nat nat
(fun n : nat => @cons nat n (@cons nat n (@cons nat n (@nil nat))))
(@cons nat 1 (@cons nat 5 (@cons nat 4 (@nil nat)))))
(@cons nat 1
(@cons nat 1
(@cons nat 1
(@cons nat 5
(@cons nat 5
(@cons nat 5
(@cons nat 4 (@cons nat 4 (@cons nat 4 (@nil nat)))))))))))).
idtac "Assumptions:".
Abort.
Print Assumptions test_flat_map1.
Goal True.
idtac " ".
idtac "------------------- fold_length --------------------".
idtac " ".
idtac "#> Exercises.fold_length_correct".
idtac "Possible points: 2".
check_type @Exercises.fold_length_correct (
(forall (X : Type) (l : list X),
@eq nat (@Exercises.fold_length X l) (@length X l))).
idtac "Assumptions:".
Abort.
Print Assumptions Exercises.fold_length_correct.
Goal True.
idtac " ".
idtac "------------------- fold_map --------------------".
idtac " ".
idtac "#> Manually graded: Exercises.fold_map".
idtac "Possible points: 3".
print_manual_grade Exercises.manual_grade_for_fold_map.
idtac " ".
idtac "------------------- currying --------------------".
idtac " ".
idtac "#> Exercises.uncurry_curry".
idtac "Advanced".
idtac "Possible points: 1".
check_type @Exercises.uncurry_curry (
(forall (X Y Z : Type) (f : forall (_ : X) (_ : Y), Z) (x : X) (y : Y),
@eq Z (@Exercises.prod_curry X Y Z (@Exercises.prod_uncurry X Y Z f) x y)
(f x y))).
idtac "Assumptions:".
Abort.
Print Assumptions Exercises.uncurry_curry.
Goal True.
idtac " ".
idtac "#> Exercises.curry_uncurry".
idtac "Advanced".
idtac "Possible points: 1".
check_type @Exercises.curry_uncurry (
(forall (X Y Z : Type) (f : forall _ : prod X Y, Z) (p : prod X Y),
@eq Z (@Exercises.prod_uncurry X Y Z (@Exercises.prod_curry X Y Z f) p)
(f p))).
idtac "Assumptions:".
Abort.
Print Assumptions Exercises.curry_uncurry.
Goal True.
idtac " ".
idtac "------------------- church_scc --------------------".
idtac " ".
idtac "#> Exercises.Church.scc_2".
idtac "Advanced".
idtac "Possible points: 1".
check_type @Exercises.Church.scc_2 (
(@eq Exercises.Church.cnat (Exercises.Church.scc Exercises.Church.one)
Exercises.Church.two)).
idtac "Assumptions:".
Abort.
Print Assumptions Exercises.Church.scc_2.
Goal True.
idtac " ".
idtac "#> Exercises.Church.scc_3".
idtac "Advanced".
idtac "Possible points: 1".
check_type @Exercises.Church.scc_3 (
(@eq Exercises.Church.cnat (Exercises.Church.scc Exercises.Church.two)
Exercises.Church.three)).
idtac "Assumptions:".
Abort.
Print Assumptions Exercises.Church.scc_3.
Goal True.
idtac " ".
idtac "------------------- church_plus --------------------".
idtac " ".
idtac "#> Exercises.Church.plus_1".
idtac "Advanced".
idtac "Possible points: 1".
check_type @Exercises.Church.plus_1 (
(@eq Exercises.Church.cnat
(Exercises.Church.plus Exercises.Church.zero Exercises.Church.one)
Exercises.Church.one)).
idtac "Assumptions:".
Abort.
Print Assumptions Exercises.Church.plus_1.
Goal True.
idtac " ".
idtac "#> Exercises.Church.plus_2".
idtac "Advanced".
idtac "Possible points: 1".
check_type @Exercises.Church.plus_2 (
(@eq Exercises.Church.cnat
(Exercises.Church.plus Exercises.Church.two Exercises.Church.three)
(Exercises.Church.plus Exercises.Church.three Exercises.Church.two))).
idtac "Assumptions:".
Abort.
Print Assumptions Exercises.Church.plus_2.
Goal True.
idtac " ".
idtac "#> Exercises.Church.plus_3".
idtac "Advanced".
idtac "Possible points: 1".
check_type @Exercises.Church.plus_3 (
(@eq Exercises.Church.cnat
(Exercises.Church.plus
(Exercises.Church.plus Exercises.Church.two Exercises.Church.two)
Exercises.Church.three)
(Exercises.Church.plus Exercises.Church.one
(Exercises.Church.plus Exercises.Church.three Exercises.Church.three)))).
idtac "Assumptions:".
Abort.
Print Assumptions Exercises.Church.plus_3.
Goal True.
idtac " ".
idtac "------------------- church_mult --------------------".
idtac " ".
idtac "#> Exercises.Church.mult_1".
idtac "Advanced".
idtac "Possible points: 1".
check_type @Exercises.Church.mult_1 (
(@eq Exercises.Church.cnat
(Exercises.Church.mult Exercises.Church.one Exercises.Church.one)
Exercises.Church.one)).
idtac "Assumptions:".
Abort.
Print Assumptions Exercises.Church.mult_1.
Goal True.
idtac " ".
idtac "#> Exercises.Church.mult_2".
idtac "Advanced".
idtac "Possible points: 1".
check_type @Exercises.Church.mult_2 (
(@eq Exercises.Church.cnat
(Exercises.Church.mult Exercises.Church.zero
(Exercises.Church.plus Exercises.Church.three Exercises.Church.three))
Exercises.Church.zero)).
idtac "Assumptions:".
Abort.
Print Assumptions Exercises.Church.mult_2.
Goal True.
idtac " ".
idtac "#> Exercises.Church.mult_3".
idtac "Advanced".
idtac "Possible points: 1".
check_type @Exercises.Church.mult_3 (
(@eq Exercises.Church.cnat
(Exercises.Church.mult Exercises.Church.two Exercises.Church.three)
(Exercises.Church.plus Exercises.Church.three Exercises.Church.three))).
idtac "Assumptions:".
Abort.
Print Assumptions Exercises.Church.mult_3.
Goal True.
idtac " ".
idtac "------------------- church_exp --------------------".
idtac " ".
idtac "#> Exercises.Church.exp_1".
idtac "Advanced".
idtac "Possible points: 1".
check_type @Exercises.Church.exp_1 (
(@eq Exercises.Church.cnat
(Exercises.Church.exp Exercises.Church.two Exercises.Church.two)
(Exercises.Church.plus Exercises.Church.two Exercises.Church.two))).
idtac "Assumptions:".
Abort.
Print Assumptions Exercises.Church.exp_1.
Goal True.
idtac " ".
idtac "#> Exercises.Church.exp_2".
idtac "Advanced".
idtac "Possible points: 1".
check_type @Exercises.Church.exp_2 (
(@eq Exercises.Church.cnat
(Exercises.Church.exp Exercises.Church.three Exercises.Church.zero)
Exercises.Church.one)).
idtac "Assumptions:".
Abort.
Print Assumptions Exercises.Church.exp_2.
Goal True.
idtac " ".
idtac "#> Exercises.Church.exp_3".
idtac "Advanced".
idtac "Possible points: 1".
check_type @Exercises.Church.exp_3 (
(@eq Exercises.Church.cnat
(Exercises.Church.exp Exercises.Church.three Exercises.Church.two)
(Exercises.Church.plus
(Exercises.Church.mult Exercises.Church.two
(Exercises.Church.mult Exercises.Church.two Exercises.Church.two))
Exercises.Church.one))).
idtac "Assumptions:".
Abort.
Print Assumptions Exercises.Church.exp_3.
Goal True.
idtac " ".
idtac " ".
idtac "Max points - standard: 21".
idtac "Max points - advanced: 34".
idtac "".
idtac "Allowed Axioms:".
idtac "functional_extensionality".
idtac "FunctionalExtensionality.functional_extensionality_dep".
idtac "plus_le".
idtac "le_trans".
idtac "le_plus_l".
idtac "add_le_cases".
idtac "Sn_le_Sm__n_le_m".
idtac "O_le_n".
idtac "".
idtac "".
idtac "********** Summary **********".
idtac "".
idtac "Below is a summary of the automatically graded exercises that are incomplete.".
idtac "".
idtac "The output for each exercise can be any of the following:".
idtac " - 'Closed under the global context', if it is complete".
idtac " - 'MANUAL', if it is manually graded".
idtac " - A list of pending axioms, containing unproven assumptions. In this case".
idtac " the exercise is considered complete, if the axioms are all allowed.".
idtac "".
idtac "********** Standard **********".
idtac "---------- app_nil_r ---------".
Print Assumptions app_nil_r.
idtac "---------- app_assoc ---------".
Print Assumptions app_assoc.
idtac "---------- app_length ---------".
Print Assumptions app_length.
idtac "---------- rev_app_distr ---------".
Print Assumptions rev_app_distr.
idtac "---------- rev_involutive ---------".
Print Assumptions rev_involutive.
idtac "---------- split ---------".
Print Assumptions split.
idtac "---------- test_split ---------".
Print Assumptions test_split.
idtac "---------- test_filter_even_gt7_1 ---------".
Print Assumptions test_filter_even_gt7_1.
idtac "---------- test_filter_even_gt7_2 ---------".
Print Assumptions test_filter_even_gt7_2.
idtac "---------- partition ---------".
Print Assumptions partition.
idtac "---------- test_partition1 ---------".
Print Assumptions test_partition1.
idtac "---------- test_partition2 ---------".
Print Assumptions test_partition2.
idtac "---------- map_rev ---------".
Print Assumptions map_rev.
idtac "---------- flat_map ---------".
Print Assumptions flat_map.
idtac "---------- test_flat_map1 ---------".
Print Assumptions test_flat_map1.
idtac "---------- Exercises.fold_length_correct ---------".
Print Assumptions Exercises.fold_length_correct.
idtac "---------- fold_map ---------".
idtac "MANUAL".
idtac "".
idtac "********** Advanced **********".
idtac "---------- Exercises.uncurry_curry ---------".
Print Assumptions Exercises.uncurry_curry.
idtac "---------- Exercises.curry_uncurry ---------".
Print Assumptions Exercises.curry_uncurry.
idtac "---------- Exercises.Church.scc_2 ---------".
Print Assumptions Exercises.Church.scc_2.
idtac "---------- Exercises.Church.scc_3 ---------".
Print Assumptions Exercises.Church.scc_3.
idtac "---------- Exercises.Church.plus_1 ---------".
Print Assumptions Exercises.Church.plus_1.
idtac "---------- Exercises.Church.plus_2 ---------".
Print Assumptions Exercises.Church.plus_2.
idtac "---------- Exercises.Church.plus_3 ---------".
Print Assumptions Exercises.Church.plus_3.
idtac "---------- Exercises.Church.mult_1 ---------".
Print Assumptions Exercises.Church.mult_1.
idtac "---------- Exercises.Church.mult_2 ---------".
Print Assumptions Exercises.Church.mult_2.
idtac "---------- Exercises.Church.mult_3 ---------".
Print Assumptions Exercises.Church.mult_3.
idtac "---------- Exercises.Church.exp_1 ---------".
Print Assumptions Exercises.Church.exp_1.
idtac "---------- Exercises.Church.exp_2 ---------".
Print Assumptions Exercises.Church.exp_2.
idtac "---------- Exercises.Church.exp_3 ---------".
Print Assumptions Exercises.Church.exp_3.
Abort.
(* 2026-01-07 13:18 *)
(* 2026-01-07 13:18 *)

85
Postscript.v Normal file
View File

@@ -0,0 +1,85 @@
(** * Postscript *)
(** Congratulations: We've made it to the end of _Logical
Foundations_! *)
(* ################################################################# *)
(** * Looking Back *)
(** We've covered quite a bit of ground so far. Here's a quick review...
- _Functional programming_:
- "declarative" programming style (recursion over immutable
data structures, rather than looping over mutable arrays
or pointer structures)
- higher-order functions
- polymorphism *)
(**
- _Logic_, the mathematical basis for software engineering:
logic calculus
-------------------- ~ ----------------------------
software engineering mechanical/civil engineering
- inductively defined sets and relations
- inductive proofs
- proof objects *)
(**
- _Rocq_, an industrial-strength proof assistant
- functional core language
- core tactics
- automation
*)
(* ################################################################# *)
(** * Looking Forward *)
(** If what you've seen so far has whetted your interest, you have
several choices for further reading in later volumes of the
_Software Foundations_ series. Some of these are intended to be
accessible to readers immediately after finishing _Logical
Foundations_; others require a few chapters from Volume 2,
_Programming Language Foundations_. The Preface chapter in each
volume gives details about prerequisites. *)
(* ################################################################# *)
(** * Resources *)
(** Here are some other good places to learn more...
- This book includes some optional chapters covering topics
that you may find useful. Take a look at the table of contents and the chapter dependency diagram to find
them.
- For questions about Rocq, the [#coq] area of Stack
Overflow ({https://stackoverflow.com/questions/tagged/coq})
is an excellent community resource.
- Here are some great books on functional programming
- Learn You a Haskell for Great Good, by Miran Lipovaca
[Lipovaca 2011] (in Bib.v).
- Real World Haskell, by Bryan O'Sullivan, John Goerzen,
and Don Stewart [O'Sullivan 2008] (in Bib.v)
- ...and many other excellent books on Haskell, OCaml,
Scheme, Racket, Scala, F sharp, etc., etc.
- And some further resources for Rocq:
- Certified Programming with Dependent Types, by Adam
Chlipala [Chlipala 2013] (in Bib.v).
- Interactive Theorem Proving and Program Development:
Coq'Art: The Calculus of Inductive Constructions, by Yves
Bertot and Pierre Casteran [Bertot 2004] (in Bib.v).
- If you're interested in real-world applications of formal
verification to critical software, see the Postscript chapter
of _Programming Language Foundations_.
- For applications of Rocq in building verified systems, the
lectures and course materials for the 2017 DeepSpec Summer
School are a great resource.
{https://deepspec.org/event/dsss17/index.html}
*)
(* 2026-01-07 13:18 *)

68
PostscriptTest.v Normal file
View File

@@ -0,0 +1,68 @@
Set Warnings "-notation-overridden,-parsing".
From Stdlib Require Export String.
From LF Require Import Postscript.
Parameter MISSING: Type.
Module Check.
Ltac check_type A B :=
match type of A with
| context[MISSING] => idtac "Missing:" A
| ?T => first [unify T B; idtac "Type: ok" | idtac "Type: wrong - should be (" B ")"]
end.
Ltac print_manual_grade A :=
match eval compute in A with
| Some (_ ?S ?C) =>
idtac "Score:" S;
match eval compute in C with
| ""%string => idtac "Comment: None"
| _ => idtac "Comment:" C
end
| None =>
idtac "Score: Ungraded";
idtac "Comment: None"
end.
End Check.
From LF Require Import Postscript.
Import Check.
Goal True.
idtac " ".
idtac "Max points - standard: 0".
idtac "Max points - advanced: 0".
idtac "".
idtac "Allowed Axioms:".
idtac "functional_extensionality".
idtac "FunctionalExtensionality.functional_extensionality_dep".
idtac "plus_le".
idtac "le_trans".
idtac "le_plus_l".
idtac "add_le_cases".
idtac "Sn_le_Sm__n_le_m".
idtac "O_le_n".
idtac "".
idtac "".
idtac "********** Summary **********".
idtac "".
idtac "Below is a summary of the automatically graded exercises that are incomplete.".
idtac "".
idtac "The output for each exercise can be any of the following:".
idtac " - 'Closed under the global context', if it is complete".
idtac " - 'MANUAL', if it is manually graded".
idtac " - A list of pending axioms, containing unproven assumptions. In this case".
idtac " the exercise is considered complete, if the axioms are all allowed.".
idtac "".
idtac "********** Standard **********".
idtac "".
idtac "********** Advanced **********".
Abort.
(* 2026-01-07 13:18 *)
(* 2026-01-07 13:18 *)

549
Preface.v Normal file
View File

@@ -0,0 +1,549 @@
(** * Preface *)
(* ################################################################# *)
(** * Welcome *)
(** This is the entry point to a series of electronic textbooks on
various aspects of _Software Foundations_, the mathematical
underpinnings of reliable software. Topics in the series include
basic concepts of logic, computer-assisted theorem proving, the
Rocq prover, functional programming, operational semantics, logics
and techniques for reasoning about programs, static type systems,
property-based random testing, and verification of practical C
code. The exposition is intended for a broad range of readers,
from advanced undergraduates to PhD students and researchers. No
specific background in logic or programming languages is assumed,
though a degree of mathematical maturity will be helpful.
The principal novelty of the series is that it is one hundred
percent formalized and machine-checked: each text is literally a
script for Rocq. The books are intended to be read alongside (or
inside) an interactive session with Rocq. All the details in the
text are fully formalized in Rocq, and most of the exercises are
designed to be worked using Rocq.
The files in each book are organized into a sequence of core
chapters, covering about one semester's worth of material and
organized into a coherent linear narrative, plus a number of
"offshoot" chapters covering additional topics. All the core
chapters are suitable for both upper-level undergraduate and
graduate students.
This book, _Logical Foundations_, lays groundwork for the others,
introducing the reader to the basic ideas of functional
programming, constructive logic, and the Rocq prover. *)
(* ################################################################# *)
(** * Overview *)
(** Building reliable software is hard -- really hard. The scale and
complexity of modern systems, the number of people involved, and
the range of demands placed on them make it challenging to build
software that is even more-or-less correct, much less 100%%
correct. At the same time, the increasing degree to which
information processing is woven into every aspect of society
greatly amplifies the cost of bugs and insecurities.
Computer scientists and software engineers have responded to these
challenges by developing a host of techniques for improving
software reliability, ranging from recommendations about managing
software projects teams (e.g., extreme programming) to design
philosophies for libraries (e.g., model-view-controller,
publish-subscribe, etc.) and programming languages (e.g.,
object-oriented programming, functional programming, ...)
to mathematical techniques for
specifying and reasoning about properties of software and tools
for helping validate these properties. The _Software Foundations_
series is focused on this last set of tools.
This volume weaves together three conceptual threads:
(A) basic tools from _logic_ for making and justifying precise
claims about programs;
(B) the use of _proof assistants_ (or _provers_) to construct
rigorous logical arguments;
(C) _functional programming_, both as a method of programming that
simplifies reasoning about programs and as a bridge between
programming and logic. *)
(* ================================================================= *)
(** ** Logic *)
(** Logic is the field of study whose subject matter is _proofs_ --
unassailable arguments for the truth of particular propositions.
Volumes have been written about the central role of logic in
computer science. Manna and Waldinger called it "the calculus of
computer science," while Halpern et al.'s paper _On the Unusual
Effectiveness of Logic in Computer Science_ catalogs scores of
ways in which logic offers critical tools and insights. Indeed,
they observe that, "As a matter of fact, logic has turned out to
be significantly more effective in computer science than it has
been in mathematics. This is quite remarkable, especially since
much of the impetus for the development of logic during the past
one hundred years came from mathematics."
In particular, the fundamental tools of _inductive proof_ are
ubiquitous in all of computer science. You have surely seen them
before, perhaps in a course on discrete math or analysis of
algorithms, but in this course we will examine them more deeply
than you have probably done so far. *)
(* ================================================================= *)
(** ** Proof Assistants *)
(** The flow of ideas between logic and computer science has not been
unidirectional: CS has also made important contributions to logic.
One of these has been the development of software tools for
helping construct proofs of logical propositions. These tools
fall into two broad categories:
- _Automated theorem provers_ provide "push-button" operation:
you give them a proposition and they return either _true_ or
_false_ (or, sometimes, _don't know: ran out of time_).
Although their reasoning capabilities are still limited,
they have matured tremendously in recent decades and
are used now in a multitude of settings. Examples of such
tools include SAT solvers, SMT solvers, and model checkers.
- _Proof assistants_ are hybrid tools that automate the more
routine aspects of building proofs while depending on human
guidance for more difficult aspects. Widely used proof
assistants include Isabelle, Agda, Twelf, ACL2, PVS, F*,
HOL4, Lean, and Rocq, among many others.
This course is based around Rocq, a proof assistant that has been
under development since 1983 and has attracted a large community
of users in both research and industry. Rocq provides a rich
environment for interactive development of machine-checked formal
reasoning. The kernel of the Rocq system is a simple
proof-checker, which guarantees that only correct deduction steps
are ever performed. On top of this kernel, the Rocq environment
provides high-level facilities for proof development, including a
large library of common definitions and lemmas, powerful tactics
for constructing complex proofs semi-automatically, and a
special-purpose programming language for defining new
proof-automation tactics for specific situations.
Rocq has been a critical enabler for a huge variety of work across
computer science and mathematics:
- As a _platform for modeling programming languages_, it has
become a standard tool for researchers who need to describe and
reason about complex language definitions. It has been used,
for example, to check the security of the JavaCard platform,
obtaining the highest level of common criteria certification,
and for formal specifications of the x86 and LLVM instruction
sets and programming languages such as C.
- As an _environment for developing formally certified software
and hardware_, Rocq has been used, for example, to build
CompCert, a fully-verified optimizing compiler for C, and
CertiKOS, a fully verified hypervisor, for proving the
correctness of subtle algorithms involving floating point
numbers, and as the basis for CertiCrypt, FCF, and SSProve,
which are frameworks for proving cryptographic algorithms secure.
It is also being used to build verified implementations of the
open-source RISC-V processor architecture.
- As a _realistic environment for functional programming with
dependent types_, it has inspired numerous innovations. For
example, Hoare Type Theory embeds reasoning about
"pre-conditions" and "post-conditions" (an extension of the
_Hoare Logic_ we will see later in this course) in Rocq.
- As a _proof assistant for higher-order logic_, it has been used
to validate a number of important results in mathematics. For
example, its ability to include complex computations inside
proofs made it possible to develop the first formally verified
proof of the 4-color theorem. This proof had previously been
controversial among mathematicians because it required checking
a large number of configurations using a program. In the Rocq
formalization, everything is checked, including the correctness
of the computational part. More recently, an even more massive
effort led to a Rocq formalization of the Feit-Thompson Theorem,
the first major step in the classification of finite simple
groups. *)
(* ================================================================= *)
(** ** Functional Programming *)
(** The term _functional programming_ refers both to a collection of
programming idioms that can be used in almost any programming
language and to a family of programming languages designed to
emphasize these idioms, including Haskell, OCaml, Standard ML,
F##, Scala, Scheme, Racket, Common Lisp, Clojure, Erlang, F*,
and Rocq.
Functional programming has been developed over many decades --
indeed, its roots go back to Church's lambda-calculus, which was
invented in the 1930s, well _before_ the first electronic
computers! But since the early '90s it has enjoyed a surge of
interest among industrial engineers and language designers,
playing a key role in high-value systems at companies like Jane
Street Capital, Microsoft, Facebook, Twitter, and Ericsson.
The most basic tenet of functional programming is that, as much as
possible, computation should be _pure_, in the sense that the only
effect of execution should be to produce a result: it should be
free from _side effects_ such as I/O, assignments to mutable
variables, redirecting pointers, etc. For example, whereas an
_imperative_ sorting function might take a list of numbers and
rearrange its pointers to put the list in order, a pure sorting
function would take the original list and return a _new_ list
containing the same numbers in sorted order.
A significant benefit of this style of programming is that it
makes programs easier to understand and reason about. If every
operation on a data structure yields a new data structure, leaving
the old one intact, then there is no need to worry about how that
structure is being shared and whether a change by one part of the
program might break an invariant relied on by another part of the
program. These considerations are particularly critical in
concurrent systems, where every piece of mutable state that is
shared between threads is a potential source of pernicious bugs.
Indeed, a large part of the recent interest in functional
programming in industry is due to its simpler behavior in the
presence of concurrency.
Another reason for the current excitement about functional
programming is related to the first: functional programs are often
much easier to parallelize and physically distribute than their
imperative counterparts. If running a computation has no effect
other than producing a result, then it does not matter _where_ it
is run. Similarly, if a data structure is never modified
destructively, then it can be copied freely, across cores or
across the network. Indeed, the "Map-Reduce" idiom, which lies at
the heart of massively distributed query processors like Hadoop
and is used by Google to index the entire web is a classic example
of functional programming.
For purposes of this course, functional programming has yet
another significant attraction: it serves as a bridge between
logic and computer science. Indeed, Rocq itself can be viewed as a
combination of a small but extremely expressive functional
programming language plus a set of tools for stating and proving
logical assertions. Moreover, when we come to look more closely,
we find that these two sides of Rocq are actually aspects of the
very same underlying machinery -- i.e., _proofs are programs_. *)
(* ================================================================= *)
(** ** Rocq vs. Coq *)
(** Until 2025, the Rocq prover was known as Coq. According to the
official webpage, "The name 'Coq' referenced the Calculus of
Constructions (CoC), the foundational system it is based on, as
well as one of its creators, Thierry Coquand. Additionally, it
paid homage to the French national symbol, the rooster. The new
name, 'the Rocq Prover', honors Inria Rocquencourt, the original
site where the prover was developed. It also alludes to the
mythological bird Roc (or Rokh), symbolizing strength and not so
disconnected to a rooster. Furthermore, the name conveys a sense
of solidity, and its unintended connection to music adds a
pleasant resonance."
The current release of Software Foundations is still in a
transitional state, and you will see references to both Coq and
Rocq. *)
(* ================================================================= *)
(** ** Further Reading *)
(** This text is intended to be self contained, but readers looking
for a deeper treatment of particular topics will find some
suggestions for further reading in the [Postscript] chapter.
Bibliographic information for all cited works can be found in the
file [Bib].*)
(* ################################################################# *)
(** * Practicalities *)
(* ================================================================= *)
(** ** System Requirements *)
(** Rocq runs on Windows, Linux, and macOS. The files in this book
have been tested with Rocq 9.0.0. *)
(* ----------------------------------------------------------------- *)
(** *** Recommended Installation Method: VSCode + Docker *)
(** The Visual Studio Code IDE can cooperate with the Docker
virtualization platform to compile Rocq scripts without the need
for any separate Rocq installation. This method is recommended for
most Software Foundations readers.
- Install Docker from {https://www.docker.com/get-started/} or
make sure your existing installation is up to date.
- Make sure Docker is running.
- Install VSCode from {https://code.visualstudio.com} and start it
running.
- Install VSCode's Dev Containers Extension from
{https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers}
(Note that this extension only works with the official version
of VSCode, not with some VSCode forks like VsCodium.)
- Set up a directory for this SF volume by downloading the
provided [.tgz] file. Besides the [.v] file for each chapter,
this directory will contain a [.devcontainer] subdirectory with
instructions for VSCode about where to find an appropriate
Docker image and a [_CoqProject] file, whose presence triggers
the VSCoq extension.
- In VSCode, use [File > Open Folder] to open the new directory.
VSCode should ask you whether you want to run the project in the
associated Docker container. (If it does not ask you, you can
open the command palette by pressing F1 and run the command “Dev
Containers: Reopen in Container”.)
This step may take some time.
- Check that VSCoq is working by double-clicking the file
[Basics.v] from the list on the left (you should see a blinking
cursor in the window that opens; if not you can click in that
window to select it), and pressing [alt+downarrow] (on MacOS,
[control+option+downarrow]) a few times. You should see the
cursor move through the file and the region above the cursor get
highlighted.
- If VSCoq does not work and you receive an error indicating that
[vscoqtop] was not found, open a new terminal in the container
(you can do this by opening the command palette and running the
command “Terminal: Create New Terminal”) and run the command
[which vscoqtop]. This should print the path to the VSCoq
installation inside the container. Copy this path
and paste it into the “VSCoq: Path” textbox in the
VSCoq extension settings (accessible via the gear icon on
the VSCoq extension page in VSCode), then reload your window.
- To see what other key bindings are available, press F1 and then
type [Coq:], or visit the VSCoq web pages:
{https://github.com/rocq-prover/vsrocq}. *)
(* ================================================================= *)
(** ** Alternative Installation Methods *)
(** If you prefer, there are several other ways to use Rocq. You will need:
- A current installation of Rocq, available from the Rocq home
page ({https://rocq-prover.org/install}). The "Rocq Platform"
offers the easiest installation experience for most people,
especially on Windows.
- An IDE for interacting with Rocq. There are several choices:
- _VsCoq_ is an extension for Visual Studio Code that offers a
simple interface via a familiar IDE. This option is the
recommended default.
VsCoq can be used as an ordinary IDE or it can be combined
with Docker (see below) for a lightweight installation
experience.
- _Proof General_ is an Emacs-based IDE. It tends to be
preferred by users who are already comfortable with Emacs.
It requires a separate installation (google "Proof General",
but generally all you need to do is [M-x package-list-packages],
then select the [proof-general] package from the list and
hit the [i] key for install, then hit the [x] key for execute).
There are only a few commands you need to know to use ProofGeneral
effectively. They are:
- [C-c C-n]: send the next command to Rocq.
- [C-c C-u]: undo (retract) the most recently executed command.
- [C-c C-RET]: submit everything up to the current cursor location to
Rocq for processing.
- [C-c C-.]: move the cursor to the end of the last command which has
been processed by Rocq.
- [C-c .]: toggle "electric terminator mode". When this mode is
turned on, simply typing a period will send the current command to
Rocq (normally you have to type a period and then type [C-c C-n]).
Adventurous users of Rocq within Emacs may want to check out
extensions such as [company-coq] and [control-lock].
- _RocqIDE_ is a simpler stand-alone IDE. It is distributed with
the Rocq Platform, so it should be available once you have Rocq
installed. It can also be compiled from scratch, but on some
platforms this may involve installing additional packages for GUI
libraries and such.
Users who like RocqIDE should consider running it with the
"asynchronous" and "error resilience" modes disabled:
coqide -async-proofs off \
-async-proofs-command-error-resilience off Foo.v &
*)
(* ================================================================= *)
(** ** Exercises *)
(** Each chapter includes numerous exercises. Each is marked with a
"star rating," which can be interpreted as follows:
- One star: easy exercises that underscore points in the text
and that, for most readers, should take only a minute or two.
Get in the habit of working these as you reach them.
- Two stars: straightforward exercises (five or ten minutes).
- Three stars: exercises requiring a bit of thought (ten
minutes to half an hour).
- Four and five stars: more difficult exercises (half an hour
and up).
Those using SF in a classroom setting should note that the autograder
assigns extra points to harder exercises:
1 star = 1 point
2 stars = 2 points
3 stars = 3 points
4 stars = 6 points
5 stars = 10 points
Some exercises are marked "advanced," and some are marked
"optional." Doing just the non-optional, non-advanced exercises
should provide good coverage of the core material. Optional
exercises provide a bit of extra practice with key concepts and
introduce secondary themes that may be of interest to some
readers. Advanced exercises are for readers who want an extra
challenge and a deeper cut at the material.
_Please do not post solutions to the exercises in a public place_.
Software Foundations is widely used both for self-study and for
university courses. Having solutions easily available makes it
much less useful for courses, which typically have graded homework
assignments. We especially request that readers not post
solutions to the exercises anyplace where they can be found by
search engines. *)
(* ================================================================= *)
(** ** Downloading the Rocq Files *)
(** A tar file containing the full sources for the "release version"
of this book (as a collection of Rocq scripts and HTML files) is
available at {https://softwarefoundations.cis.upenn.edu}.
If you are using the book as part of a class, your professor may
give you access to a locally modified version of the files; you
should use that one instead of the public release version, so that
you get any local updates during the semester. *)
(* ================================================================= *)
(** ** Chapter Dependencies *)
(** A diagram of the dependencies between chapters and some suggested
paths through the material can be found in the file [deps.html]. *)
(* ================================================================= *)
(** ** Recommended Citation Format *)
(** If you want to refer to this volume in your own writing, please
do so as follows:
@book {Pierce:SF1,
author = {Benjamin C. Pierce and
Arthur Azevedo de Amorim and
Chris Casinghino and
Marco Gaboardi and
Michael Greenberg and
Cătălin Hriţcu and
Vilhelm Sjöberg and
Brent Yorgey},
editor = {Benjamin C. Pierce},
title = "Logical Foundations",
series = "Software Foundations",
volume = "1",
year = "2026",
publisher = "Electronic textbook",
note = {Version 7.0, \URL{http://softwarefoundations.cis.upenn.edu}}
}
*)
(* ################################################################# *)
(** * Resources *)
(* ================================================================= *)
(** ** Sample Exams *)
(** A large compendium of exams from many offerings of
CIS5000 ("Software Foundations") at the University of Pennsylvania
can be found at
{https://www.seas.upenn.edu/~cis5000/current/exams/index.html}.
There has been some drift of notations over the years, but most of
the problems are still relevant to the current text. *)
(* ================================================================= *)
(** ** Lecture Videos *)
(** Lectures for two intensive summer courses based on _Logical
Foundations_ (part of the DeepSpec summer school series) can be
found at {https://deepspec.org/event/dsss17} and
{https://deepspec.org/event/dsss18/}. The video quality in the
2017 lectures is poor at the beginning but gets better in the
later lectures. *)
(* ################################################################# *)
(** * Note for Instructors and Contributors *)
(** If you plan to use these materials in your own teaching, or if you
are using software foundations for self study and are finding
things you'd like to help add or improve, your contributions are
welcome! You are warmly invited to join the private SF git repo.
In order to keep the legalities simple and to have a single point
of responsibility in case the need should ever arise to adjust the
license terms, sublicense, etc., we ask all contributors (i.e.,
everyone with access to the developers' repository) to assign
copyright in their contributions to the appropriate "author of
record," as follows:
- I hereby assign copyright in my past and future contributions
to the Software Foundations project to the Author of Record of
each volume or component, to be licensed under the same terms
as the rest of Software Foundations. I understand that, at
present, the Authors of Record are as follows: For Volumes 1
and 2, known until 2016 as "Software Foundations" and from
2016 as (respectively) "Logical Foundations" and "Programming
Foundations," and for Volume 4, "QuickChick: Property-Based
Testing in Rocq," the Author of Record is Benjamin C. Pierce.
For Volume 3, "Verified Functional Algorithms," and volume 5,
"Verifiable C," the Author of Record is Andrew W. Appel. For
Volume 6, "Separation Logic Foundations," the author of record
is Arthur Chargueraud. For components outside of designated
volumes (e.g., typesetting and grading tools and other
software infrastructure), the Author of Record is Benjamin C.
Pierce.
To get started, please send an email to Benjamin Pierce,
describing yourself and how you plan to use the materials and
including (A) the above copyright transfer text and (B) your
github username.
We'll set you up with access to the git repository and developers'
mailing lists. In the repository you'll find the files
[INSTRUCTORS] and [CONTRIBUTING] with further instructions. *)
(* ################################################################# *)
(** * Translations *)
(** Thanks to the efforts of a team of volunteer translators,
_Software Foundations_ can be enjoyed in Japanese at
{http://proofcafe.org/sf}. A Chinese translation is also underway;
you can preview it at {https://coq-zh.github.io/SF-zh/}. *)
(* ################################################################# *)
(** * Thanks *)
(** Development of the _Software Foundations_ series has been
supported, in part, by the National Science Foundation under the
NSF Expeditions grant 1521523, _The Science of Deep
Specification_. *)
(* 2026-01-07 13:17 *)

68
PrefaceTest.v Normal file
View File

@@ -0,0 +1,68 @@
Set Warnings "-notation-overridden,-parsing".
From Stdlib Require Export String.
From LF Require Import Preface.
Parameter MISSING: Type.
Module Check.
Ltac check_type A B :=
match type of A with
| context[MISSING] => idtac "Missing:" A
| ?T => first [unify T B; idtac "Type: ok" | idtac "Type: wrong - should be (" B ")"]
end.
Ltac print_manual_grade A :=
match eval compute in A with
| Some (_ ?S ?C) =>
idtac "Score:" S;
match eval compute in C with
| ""%string => idtac "Comment: None"
| _ => idtac "Comment:" C
end
| None =>
idtac "Score: Ungraded";
idtac "Comment: None"
end.
End Check.
From LF Require Import Preface.
Import Check.
Goal True.
idtac " ".
idtac "Max points - standard: 0".
idtac "Max points - advanced: 0".
idtac "".
idtac "Allowed Axioms:".
idtac "functional_extensionality".
idtac "FunctionalExtensionality.functional_extensionality_dep".
idtac "plus_le".
idtac "le_trans".
idtac "le_plus_l".
idtac "add_le_cases".
idtac "Sn_le_Sm__n_le_m".
idtac "O_le_n".
idtac "".
idtac "".
idtac "********** Summary **********".
idtac "".
idtac "Below is a summary of the automatically graded exercises that are incomplete.".
idtac "".
idtac "The output for each exercise can be any of the following:".
idtac " - 'Closed under the global context', if it is complete".
idtac " - 'MANUAL', if it is manually graded".
idtac " - A list of pending axioms, containing unproven assumptions. In this case".
idtac " the exercise is considered complete, if the axioms are all allowed.".
idtac "".
idtac "********** Standard **********".
idtac "".
idtac "********** Advanced **********".
Abort.
(* 2026-01-07 13:18 *)
(* 2026-01-07 13:18 *)

946
ProofObjects.v Normal file
View File

@@ -0,0 +1,946 @@
(** * ProofObjects: The Curry-Howard Correspondence *)
Set Warnings "-notation-overridden,-notation-incompatible-prefix".
From LF Require Export IndProp.
(** "Algorithms are the computational content of proofs."
(Robert Harper) *)
(** We have seen that Rocq has mechanisms both for _programming_,
using inductive data types like [nat] or [list] and functions over
these types, and for _proving_ properties of these programs, using
inductive propositions (like [ev]), implication, universal
quantification, and the like. So far, we have mostly treated
these mechanisms as if they were quite separate, and for many
purposes this is a good way to think. But we have also seen hints
that Rocq's programming and proving facilities are closely related.
For example, the keyword [Inductive] is used to declare both data
types and propositions, and [->] is used both to describe the type
of functions on data and logical implication. This is not just a
syntactic accident! In fact, programs and proofs in Rocq are
almost the same thing. In this chapter we will study this connection
in more detail.
We have already seen the fundamental idea: provability in Rocq is
always witnessed by _evidence_. When we construct the proof of a
basic proposition, we are actually building a tree of evidence,
which can be thought of as a concrete data structure.
If the proposition is an implication like [A -> B], then its proof
is an evidence _transformer_: a recipe for converting evidence for
A into evidence for B. So at a fundamental level, proofs are
simply programs that manipulate evidence. *)
(** Question: If evidence is data, what are propositions themselves?
Answer: They are types! *)
(** Look again at the formal definition of the [ev] property. *)
Inductive ev : nat -> Prop :=
| ev_0 : ev 0
| ev_SS (n : nat) (H : ev n) : ev (S (S n)).
(** We can pronounce the ":" here as either "has type" or "is a proof
of." For example, the second line in the definition of [ev]
declares that [ev_0 : ev 0]. Instead of "[ev_0] has type [ev 0],"
we can say that "[ev_0] is a proof of [ev 0]." *)
(** This pun between types and propositions -- between [:] as "has type"
and [:] as "is a proof of" or "is evidence for" -- is called the
_Curry-Howard correspondence_. It proposes a deep connection
between the world of logic and the world of computation:
propositions ~ types
proofs ~ programs
See [Wadler 2015] (in Bib.v) for a brief history and modern exposition. *)
(** Many useful insights follow from this connection. To begin with,
it gives us a natural interpretation of the type of the [ev_SS]
constructor: *)
Check ev_SS
: forall n,
ev n ->
ev (S (S n)).
(** This can be read "[ev_SS] is a constructor that takes two
arguments -- a number [n] and evidence for the proposition [ev
n] -- and yields evidence for the proposition [ev (S (S n))]." *)
(** Now let's look again at an earlier proof involving [ev]. *)
Theorem ev_4 : ev 4.
Proof.
apply ev_SS. apply ev_SS. apply ev_0. Qed.
(** Just as with ordinary data values and functions, we can use the
[Print] command to see the _proof object_ that results from this
proof script. *)
Print ev_4.
(* ===> ev_4 = ev_SS 2 (ev_SS 0 ev_0)
: ev 4 *)
(** Indeed, we can also write down this proof object directly,
with no need for a proof script at all: *)
Check (ev_SS 2 (ev_SS 0 ev_0))
: ev 4.
(** The expression [ev_SS 2 (ev_SS 0 ev_0)] instantiates the
parameterized constructor [ev_SS] with the specific arguments [2]
and [0] plus the corresponding proof objects for its premises [ev
2] and [ev 0]. Alternatively, we can think of [ev_SS] as a
primitive "evidence constructor" that, when applied to a
particular number, wants to be further applied to evidence that
this number is even; its type,
forall n, ev n -> ev (S (S n)),
expresses this functionality, in the same way that the polymorphic
type [forall X, list X] expresses the fact that the constructor
[nil] can be thought of as a function from types to empty lists
with elements of that type. *)
(** We saw in the [Logic] chapter that we can use function
application syntax to instantiate universally quantified variables
in lemmas, as well as to supply evidence for assumptions that
these lemmas impose. For instance: *)
Theorem ev_4': ev 4.
Proof.
apply (ev_SS 2 (ev_SS 0 ev_0)).
Qed.
(* ################################################################# *)
(** * Proof Scripts *)
(** The _proof objects_ we've been discussing lie at the core of how
Rocq operates. When Rocq is following a proof script, what is
happening internally is that it is gradually constructing a proof
object -- a term whose type is the proposition being proved. The
tactics between [Proof] and [Qed] tell it how to build up a term
of the required type. To see this process in action, let's use
the [Show Proof] command to display the current state of the proof
tree at various points in the following tactic proof. *)
Theorem ev_4'' : ev 4.
Proof.
Show Proof.
apply ev_SS.
Show Proof.
apply ev_SS.
Show Proof.
apply ev_0.
Show Proof.
Qed.
(** At any given moment, Rocq has constructed a term with a
"hole" (indicated by [?Goal] here, and so on), and it knows what
type of evidence is needed to fill this hole.
Each hole corresponds to a subgoal, and the proof is
finished when there are no more subgoals. At this point, the
evidence we've built is stored in the global context under the name
given in the [Theorem] command. *)
(** Tactic proofs are convenient, but they are not essential in Rocq:
in principle, we can always just construct the required evidence
by hand. Then we can use [Definition] (rather than [Theorem]) to
introduce a global name for this evidence. *)
Definition ev_4''' : ev 4 :=
ev_SS 2 (ev_SS 0 ev_0).
(** All these different ways of building the proof lead to exactly the
same evidence being saved in the global environment. *)
Print ev_4.
(* ===> ev_4 = ev_SS 2 (ev_SS 0 ev_0) : ev 4 *)
Print ev_4'.
(* ===> ev_4' = ev_SS 2 (ev_SS 0 ev_0) : ev 4 *)
Print ev_4''.
(* ===> ev_4'' = ev_SS 2 (ev_SS 0 ev_0) : ev 4 *)
Print ev_4'''.
(* ===> ev_4''' = ev_SS 2 (ev_SS 0 ev_0) : ev 4 *)
(** **** Exercise: 2 stars, standard (eight_is_even)
Give a tactic proof and a proof object showing that [ev 8]. *)
Theorem ev_8 : ev 8.
Proof.
(* FILL IN HERE *) Admitted.
Definition ev_8' : ev 8
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
(** [] *)
(* ################################################################# *)
(** * Quantifiers, Implications, Functions *)
(** In Rocq's computational universe (where data structures and
programs live), there are two sorts of values that have arrows in
their types: _constructors_ introduced by [Inductive]ly defined
data types, and _functions_.
Similarly, in Rocq's logical universe (where we carry out proofs),
there are two ways of giving evidence for an implication:
constructors introduced by [Inductive]ly defined propositions,
and... functions! *)
(** For example, consider this statement: *)
Theorem ev_plus4 : forall n, ev n -> ev (4 + n).
Proof.
intros n H. simpl.
apply ev_SS.
apply ev_SS.
apply H.
Qed.
(** What is the proof object corresponding to [ev_plus4]? *)
(** We're looking for an expression whose _type_ is [forall n, ev n ->
ev (4 + n)] -- that is, a _function_ that takes two arguments (one
number and a piece of evidence) and returns a piece of evidence!
Here it is: *)
Definition ev_plus4' : forall n, ev n -> ev (4 + n) :=
fun (n : nat) => fun (H : ev n) =>
ev_SS (S (S n)) (ev_SS n H).
(** Recall that [fun n => blah] means "the function that, given [n],
yields [blah]," and that Rocq treats [4 + n] and [S (S (S (S n)))]
as synonyms. Another equivalent way to write this definition is: *)
Definition ev_plus4'' (n : nat) (H : ev n)
: ev (4 + n) :=
ev_SS (S (S n)) (ev_SS n H).
Check ev_plus4'' : forall n : nat, ev n -> ev (4 + n).
(** When we view the proposition being proved by [ev_plus4] as a
function type, one interesting point becomes apparent: The second
argument's type, [ev n], mentions the _value_ of the first
argument, [n].
While such _dependent types_ are not found in most mainstream
programming languages, they can be quite useful in programming
too, as the flurry of activity in the functional programming
community over the past couple of decades demonstrates. *)
(** Notice that both implication ([->]) and quantification ([forall])
correspond to functions on evidence. In fact, they are really the
same thing: [->] is just a shorthand for a degenerate use of
[forall] where there is no dependency, i.e., no need to give a
name to the type on the left-hand side of the arrow:
forall (x:nat), nat
= forall (_:nat), nat
= nat -> nat
*)
(** For example, consider this proposition: *)
Definition ev_plus2 : Prop :=
forall n, forall (E : ev n), ev (n + 2).
(** A proof term inhabiting this proposition would be a function
with two arguments: a number [n] and some evidence [E] that [n] is
even. But the name [E] for this evidence is not used in the rest
of the statement of [ev_plus2], so it's a bit silly to bother
making up a name for it. We could write it like this instead,
using the dummy identifier [_] in place of a real name: *)
Definition ev_plus2' : Prop :=
forall n, forall (_ : ev n), ev (n + 2).
(** Or, equivalently, we can write it in a more familiar way: *)
Definition ev_plus2'' : Prop :=
forall n, ev n -> ev (n + 2).
(** In general, "[P -> Q]" is just syntactic sugar for
"[forall (_:P), Q]". *)
(* ################################################################# *)
(** * Programming with Tactics *)
(** If we can build proofs by giving explicit terms rather than
executing tactic scripts, you may wonder whether we can build
_programs_ using tactics rather than by writing down explicit
terms.
Naturally, the answer is yes! *)
Definition add2 : nat -> nat.
intros n.
Show Proof.
apply S.
Show Proof.
apply S.
Show Proof.
apply n. Defined.
Print add2.
(* ==>
add2 = fun n : nat => S (S n)
: nat -> nat
*)
Compute add2 2.
(* ==> 4 : nat *)
(** Notice that we terminated the [Definition] with a [.] rather than
with [:=] followed by a term. This tells Rocq to enter _proof
scripting mode_ to build an object of type [nat -> nat]. Also, we
terminate the proof with [Defined] rather than [Qed]; this makes
the definition _transparent_ so that it can be used in computation
like a normally-defined function. ([Qed]-defined objects are
opaque during computation.)
This feature is mainly useful for writing functions with dependent
types, which we won't explore much further in this book. But it
does illustrate the uniformity and orthogonality of the basic
ideas in Rocq. *)
(* ################################################################# *)
(** * Logical Connectives as Inductive Types *)
(** Inductive definitions are powerful enough to express most of the
logical connectives we have seen so far. Indeed, only universal
quantification (with implication as a special case) is built into
Rocq; all the others are defined inductively.
Let's see how. *)
Module Props.
(* ================================================================= *)
(** ** Conjunction *)
(** To prove that [P /\ Q] holds, we must present evidence for both
[P] and [Q]. Thus, it makes sense to define a proof object for
[P /\ Q] to consist of a pair of two proofs: one for [P] and
another one for [Q]. This leads to the following definition. *)
Module And.
Inductive and (P Q : Prop) : Prop :=
| conj : P -> Q -> and P Q.
Arguments conj [P] [Q].
Notation "P /\ Q" := (and P Q) : type_scope.
(** Notice the similarity with the definition of the [prod] type,
given in chapter [Poly]; the only difference is that [prod] takes
[Type] arguments, whereas [and] takes [Prop] arguments. *)
Print prod.
(* ===>
Inductive prod (X Y : Type) : Type :=
| pair : X -> Y -> X * Y. *)
(** This similarity should clarify why [destruct] and [intros]
patterns can be used on a conjunctive hypothesis. Case analysis
allows us to consider all possible ways in which [P /\ Q] was
proved -- here just one (the [conj] constructor). *)
Theorem proj1' : forall P Q,
P /\ Q -> P.
Proof.
intros P Q HPQ. destruct HPQ as [HP HQ]. apply HP.
Show Proof.
Qed.
(** Similarly, the [split] tactic actually works for any inductively
defined proposition with exactly one constructor. In particular,
it works for [and]: *)
Lemma and_comm : forall P Q : Prop, P /\ Q <-> Q /\ P.
Proof.
intros P Q. split.
- intros [HP HQ]. split.
+ apply HQ.
+ apply HP.
- intros [HQ HP]. split.
+ apply HP.
+ apply HQ.
Qed.
End And.
(** This shows why the inductive definition of [and] can be
manipulated by tactics as we've been doing. We can also use it to
build proofs directly, using pattern-matching. For instance: *)
Definition proj1'' P Q (HPQ : P /\ Q) : P :=
match HPQ with
| conj HP HQ => HP
end.
Definition and_comm'_aux P Q (H : P /\ Q) : Q /\ P :=
match H with
| conj HP HQ => conj HQ HP
end.
Definition and_comm' P Q : P /\ Q <-> Q /\ P :=
conj (and_comm'_aux P Q) (and_comm'_aux Q P).
(** **** Exercise: 2 stars, standard (conj_fact)
Construct a proof object for the following proposition. *)
Definition conj_fact : forall P Q R, P /\ Q -> Q /\ R -> P /\ R
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
(** [] *)
(* ================================================================= *)
(** ** Disjunction *)
(** The inductive definition of disjunction uses two constructors, one
for each side of the disjunction: *)
Module Or.
Inductive or (P Q : Prop) : Prop :=
| or_introl : P -> or P Q
| or_intror : Q -> or P Q.
Arguments or_introl [P] [Q].
Arguments or_intror [P] [Q].
Notation "P \/ Q" := (or P Q) : type_scope.
(** This declaration explains the behavior of the [destruct] tactic on
a disjunctive hypothesis, since the generated subgoals match the
shape of the [or_introl] and [or_intror] constructors. *)
(** Once again, we can also directly write proof objects for theorems
involving [or], without resorting to tactics. *)
Definition inj_l : forall (P Q : Prop), P -> P \/ Q :=
fun P Q HP => or_introl HP.
Theorem inj_l' : forall (P Q : Prop), P -> P \/ Q.
Proof.
intros P Q HP. left. apply HP.
Show Proof.
Qed.
Definition or_elim : forall (P Q R : Prop), (P \/ Q) -> (P -> R) -> (Q -> R) -> R :=
fun P Q R HPQ HPR HQR =>
match HPQ with
| or_introl HP => HPR HP
| or_intror HQ => HQR HQ
end.
Theorem or_elim' : forall (P Q R : Prop), (P \/ Q) -> (P -> R) -> (Q -> R) -> R.
Proof.
intros P Q R HPQ HPR HQR.
destruct HPQ as [HP | HQ].
- apply HPR. apply HP.
- apply HQR. apply HQ.
Qed.
End Or.
(** **** Exercise: 2 stars, standard (or_commut')
Construct a proof object for the following proposition. *)
Definition or_commut' : forall P Q, P \/ Q -> Q \/ P
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
(** [] *)
(* ================================================================= *)
(** ** Existential Quantification *)
(** To give evidence for an existential quantifier, we package a
witness [x] together with a proof that [x] satisfies the property
[P]: *)
Module Ex.
Inductive ex {A : Type} (P : A -> Prop) : Prop :=
| ex_intro : forall x : A, P x -> ex P.
Notation "'exists' x , p" :=
(ex (fun x => p))
(at level 200, right associativity) : type_scope.
End Ex.
(** This probably needs a little unpacking. The core definition is
for a type former [ex] that can be used to build propositions of
the form [ex P], where [P] itself is a _function_ from witness
values in the type [A] to propositions. The [ex_intro]
constructor then offers a way of constructing evidence for [ex P],
given a witness [x] and a proof of [P x].
The notation in the standard library is a slight extension of
the above, enabling syntactic forms such as [exists x y, P x y]. *)
(** The more familiar form [exists x, ev x] desugars to an expression
involving [ex]: *)
Check ex (fun n => ev n) : Prop.
(** Here's how to define an explicit proof object involving [ex]: *)
Definition some_nat_is_even : exists n, ev n :=
ex_intro ev 4 (ev_SS 2 (ev_SS 0 ev_0)).
(** **** Exercise: 2 stars, standard (ex_ev_Sn)
Construct a proof object for the following proposition. *)
Definition ex_ev_Sn : ex (fun n => ev (S n))
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
(** [] *)
(** To destruct existentials in a proof term we simply use match: *)
Definition dist_exists_or_term (X:Type) (P Q : X -> Prop) :
(exists x, P x \/ Q x) -> (exists x, P x) \/ (exists x, Q x) :=
fun H => match H with
| ex_intro _ x Hx =>
match Hx with
| or_introl HPx => or_introl (ex_intro _ x HPx)
| or_intror HQx => or_intror (ex_intro _ x HQx)
end
end.
(** **** Exercise: 2 stars, standard (ex_match)
Construct a proof object for the following proposition: *)
Definition ex_match : forall (A : Type) (P Q : A -> Prop),
(forall x, P x -> Q x) ->
(exists x, P x) -> (exists x, Q x)
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
(** [] *)
(* ================================================================= *)
(** ** [True] and [False] *)
(** The inductive definition of the [True] proposition is simple: *)
Inductive True : Prop :=
| I : True.
(** It has one constructor (so every proof of [True] is the same, so
being given a proof of [True] is not informative.) *)
(** **** Exercise: 1 star, standard (p_implies_true)
Construct a proof object for the following proposition. *)
Definition p_implies_true : forall P, P -> True
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
(** [] *)
(** [False] is equally simple -- indeed, so simple it may look
syntactically wrong at first glance! *)
Inductive False : Prop := .
(** That is, [False] is an inductive type with _no_ constructors --
i.e., no way to build evidence for it. For example, there is
no way to complete the following definition such that it
succeeds. *)
Fail
Definition contra : False :=
42.
(** But it is possible to destruct [False] by pattern matching. There can
be no patterns that match it, since it has no constructors. So
the pattern match also is so simple it may look syntactically
wrong at first glance. *)
Definition false_implies_zero_eq_one : False -> 0 = 1 :=
fun contra => match contra with end.
(** Since there are no branches to evaluate, the [match] expression
can be considered to have any type we want, including [0 = 1].
Fortunately, it's impossible to ever cause the [match] to be
evaluated, because we can never construct a value of type [False]
to pass to the function. *)
(** **** Exercise: 1 star, standard (ex_falso_quodlibet')
Construct a proof object for the following proposition. *)
Definition ex_falso_quodlibet' : forall P, False -> P
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
(** [] *)
End Props.
(* ################################################################# *)
(** * Equality *)
(** Even Rocq's equality relation is not built in. We can define
it ourselves: *)
Module EqualityPlayground.
Inductive eq {X:Type} : X -> X -> Prop :=
| eq_refl : forall x, eq x x.
Notation "x == y" := (eq x y)
(at level 70, no associativity)
: type_scope.
(** The way to think about this definition (which is just a slight
variant of the standard library's) is that, given a set [X], it
defines a _family_ of propositions "[x] is equal to [y]," indexed
by pairs of values ([x] and [y]) from [X]. There is just one way
of constructing evidence for members of this family: applying the
constructor [eq_refl] to a type [X] and a single value [x : X],
which yields evidence that [x] is equal to [x].
Other types of the form [eq x y] where [x] and [y] are not the
same are thus uninhabited. *)
(** We can use [eq_refl] to construct evidence that, for example, [2 =
2]. Can we also use it to construct evidence that [1 + 1 = 2]?
Yes, we can. Indeed, it is the very same piece of evidence!
The reason is that Rocq treats as "the same" any two terms that are
_convertible_ according to a simple set of computation rules.
These rules, which are similar to those used by [Compute], include
evaluation of function application, inlining of definitions, and
simplification of [match]es. *)
Lemma four: 2 + 2 == 1 + 3.
Proof.
apply eq_refl.
Qed.
(** The [reflexivity] tactic that we have used to prove
equalities up to now is essentially just shorthand for [apply
eq_refl].
In tactic-based proofs of equality, the conversion rules are
normally hidden in uses of [simpl] (either explicit or implicit in
other tactics such as [reflexivity]).
But you can see them directly at work in the following explicit
proof objects: *)
Definition four' : 2 + 2 == 1 + 3 :=
eq_refl 4.
Definition singleton : forall (X:Type) (x:X), []++[x] == x::[] :=
fun (X:Type) (x:X) => eq_refl [x].
(** We can also pattern-match on an equality proof: *)
Definition eq_add : forall (n1 n2 : nat), n1 == n2 -> (S n1) == (S n2) :=
fun n1 n2 Heq =>
match Heq with
| eq_refl n => eq_refl (S n)
end.
(** By pattern-matching against [n1 == n2], we obtain a term [n]
that replaces [n1] and [n2] in the type we have to produce, so
instead of [(S n1) == (S n2)], we now have to produce something
of type [(S n) == (S n)], which we establish by [eq_refl (S n)]. *)
(** A tactic-based proof runs into some difficulties if we try to use
our usual repertoire of tactics, such as [rewrite] and
[reflexivity]. Those work with *setoid* relations that Rocq knows
about, such as [=], but not our [==]. We could prove to Rocq that
[==] is a setoid, but a simpler way is to use [destruct] and
[apply] instead. *)
Theorem eq_add' : forall (n1 n2 : nat), n1 == n2 -> (S n1) == (S n2).
Proof.
intros n1 n2 Heq.
Fail rewrite Heq. (* doesn't work for _our_ == relation *)
destruct Heq as [n]. (* n1 and n2 replaced by n in the goal! *)
Fail reflexivity. (* doesn't work for _our_ == relation *)
apply eq_refl.
Qed.
(** **** Exercise: 2 stars, standard (eq_cons)
Construct the proof object for the following theorem. Use pattern
matching on the equality hypotheses. *)
Definition eq_cons : forall (X : Type) (h1 h2 : X) (t1 t2 : list X),
h1 == h2 -> t1 == t2 -> h1 :: t1 == h2 :: t2
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
(** [] *)
(** **** Exercise: 2 stars, standard (equality__leibniz_equality)
The inductive definition of equality implies _Leibniz equality_:
what we mean when we say "[x] and [y] are equal" is that every
property on [P] that is true of [x] is also true of [y]. Prove
that. *)
Lemma equality__leibniz_equality : forall (X : Type) (x y: X),
x == y -> forall (P : X -> Prop), P x -> P y.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 2 stars, standard (equality__leibniz_equality_term)
Construct the proof object for the previous exercise. All it
requires is anonymous functions and pattern-matching; the large
proof term constructed by tactics in the previous exercise is
needessly complicated. Hint: pattern-match as soon as possible. *)
Definition equality__leibniz_equality_term : forall (X : Type) (x y: X),
x == y -> forall P : (X -> Prop), P x -> P y
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
(** [] *)
(** **** Exercise: 3 stars, standard, optional (leibniz_equality__equality)
Show that, in fact, the inductive definition of equality is
_equivalent_ to Leibniz equality. Hint: the proof is quite short;
about all you need to do is to invent a clever property [P] to
instantiate the antecedent.*)
Lemma leibniz_equality__equality : forall (X : Type) (x y: X),
(forall P:X->Prop, P x -> P y) -> x == y.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
End EqualityPlayground.
(* ================================================================= *)
(** ** Inversion, Again *)
(** We've seen [inversion] used with both equality hypotheses and
hypotheses about inductively defined propositions. Now that we've
seen that these are actually the same thing, we're in a position
to take a closer look at how [inversion] behaves.
In general, the [inversion] tactic...
- takes a hypothesis [H] whose type [P] is inductively defined,
and
- for each constructor [C] in [P]'s definition,
- generates a new subgoal in which we assume [H] was
built with [C],
- adds the arguments (premises) of [C] to the context of
the subgoal as extra hypotheses,
- matches the conclusion (result type) of [C] against the
current goal and calculates a set of equalities that must
hold in order for [C] to be applicable,
- adds these equalities to the context (and, for convenience,
rewrites them in the goal), and
- if the equalities are not satisfiable (e.g., they involve
things like [S n = O]), immediately solves the subgoal. *)
(** _Example_: If we invert a hypothesis built with [or], there are
two constructors, so two subgoals get generated. The
conclusion (result type) of the constructor ([P \/ Q]) doesn't
place any restrictions on the form of [P] or [Q], so we don't get
any extra equalities in the context of the subgoal. *)
(** _Example_: If we invert a hypothesis built with [and], there is
only one constructor, so only one subgoal gets generated. Again,
the conclusion (result type) of the constructor ([P /\ Q]) doesn't
place any restrictions on the form of [P] or [Q], so we don't get
any extra equalities in the context of the subgoal. The
constructor does have two arguments, though, and these can be seen
in the context in the subgoal. *)
(** _Example_: If we invert a hypothesis built with [eq], there is
again only one constructor, so only one subgoal gets generated.
Now, though, the form of the [eq_refl] constructor does give us
some extra information: it tells us that the two arguments to [eq]
must be the same! The [inversion] tactic adds this fact to the
context. *)
(* ################################################################# *)
(** * Rocq's Trusted Computing Base *)
(** One question that arises with any automated proof assistant
is "why should we trust it?" -- i.e., what if there is a bug in
the implementation that renders all its reasoning suspect?
While it is impossible to allay such concerns completely, the fact
that Rocq is based on the Curry-Howard correspondence gives it a
strong foundation. Because propositions are just types and proofs
are just terms, checking that an alleged proof of a proposition is
valid just amounts to _type-checking_ the term. Type checkers are
relatively small and straightforward programs, so the "trusted
computing base" for Rocq -- the part of the code that we have to
believe is operating correctly -- is small too.
What must a typechecker do? Its primary job is to make sure that
in each function application the expected and actual argument
types match, that the arms of a [match] expression are constructor
patterns belonging to the inductive type being matched over and
all arms of the [match] return the same type, and so on. *)
(** There are a few additional wrinkles:
First, since Rocq types can themselves be expressions, the checker
must normalize these (by using the computation rules) before
comparing them.
Second, the checker must make sure that [match] expressions are
_exhaustive_. That is, there must be an arm for every possible
constructor. To see why, consider the following alleged proof
object: *)
Fail Definition or_bogus : forall P Q, P \/ Q -> P :=
fun (P Q : Prop) (A : P \/ Q) =>
match A with
| or_introl H => H
end.
(** All the types here match correctly, but the [match] only
considers one of the possible constructors for [or]. Rocq's
exhaustiveness check will reject this definition.
Third, the checker must make sure that each recursive function
terminates. It does this using a syntactic check to make sure
that each recursive call is on a subexpression of the original
argument. To see why this is essential, consider this alleged
proof: *)
Fail Fixpoint infinite_loop {X : Type} (n : nat) {struct n} : X :=
infinite_loop n.
Fail Definition falso : False := infinite_loop 0.
(** Recursive function [infinite_loop] purports to return a
value of any type [X] that you would like. (The [struct]
annotation on the function tells Rocq that it recurses on argument
[n], not [X].) Were Rocq to allow [infinite_loop], then [falso]
would be definable, thus giving evidence for [False]. So Rocq rejects
[infinite_loop]. *)
(** Note that the soundness of Rocq depends only on the
correctness of this typechecking engine, not on the tactic
machinery. If there is a bug in a tactic implementation (which
does happen occasionally), that tactic might construct an invalid
proof term. But when you type [Qed], Rocq checks the term for
validity from scratch. Only theorems whose proofs pass the
type-checker can be used in further proof developments. *)
(* ################################################################# *)
(** * More Exercises *)
(** Most of the following theorems were already proved with tactics in
[Logic]. Now construct the proof objects for them
directly. *)
(** **** Exercise: 2 stars, standard (and_assoc) *)
Definition and_assoc : forall P Q R : Prop,
P /\ (Q /\ R) -> (P /\ Q) /\ R
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
(** [] *)
(** **** Exercise: 3 stars, standard (or_distributes_over_and) *)
Definition or_distributes_over_and : forall P Q R : Prop,
P \/ (Q /\ R) <-> (P \/ Q) /\ (P \/ R)
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
(** [] *)
(** **** Exercise: 3 stars, standard (negations) *)
Definition double_neg : forall P : Prop,
P -> ~~P
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
Definition contradiction_implies_anything : forall P Q : Prop,
(P /\ ~P) -> Q
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
Definition de_morgan_not_or : forall P Q : Prop,
~ (P \/ Q) -> ~P /\ ~Q
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
(** [] *)
(** **** Exercise: 2 stars, standard (currying) *)
Definition curry : forall P Q R : Prop,
((P /\ Q) -> R) -> (P -> (Q -> R))
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
Definition uncurry : forall P Q R : Prop,
(P -> (Q -> R)) -> ((P /\ Q) -> R)
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
(** [] *)
(* ################################################################# *)
(** * Proof Irrelevance (Advanced) *)
(** In [Logic] we saw that functional extensionality could be
added to Rocq. A similar notion about propositions can also
be defined (and added as an axiom, if desired): *)
Definition propositional_extensionality : Prop :=
forall (P Q : Prop), (P <-> Q) -> P = Q.
(** Propositional extensionality asserts that if two propositions are
equivalent -- i.e., each implies the other -- then they are in
fact equal. The _proof objects_ for the propositions might be
syntactically different terms. But propositional extensionality
overlooks that, just as functional extensionality overlooks the
syntactic differences between functions. *)
(** **** Exercise: 1 star, advanced (pe_implies_or_eq)
Prove the following consequence of propositional extensionality. *)
Theorem pe_implies_or_eq :
propositional_extensionality ->
forall (P Q : Prop), (P \/ Q) = (Q \/ P).
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 1 star, advanced (pe_implies_true_eq)
Prove that if a proposition [P] is provable, then it is equal to
[True] -- as a consequence of propositional extensionality. *)
Lemma pe_implies_true_eq :
propositional_extensionality ->
forall (P : Prop), P -> True = P.
Proof. (* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 3 stars, advanced (pe_implies_pi)
(Acknowledgment: this theorem and its proof technique are inspired
by Gert Smolka's manuscript Modeling and Proving in Computational
Type Theory Using the Coq Proof Assistant, 2021. *)
(** Another, perhaps surprising, consequence of propositional
extensionality is that it implies _proof irrelevance_, which
asserts that all proof objects for a proposition are equal.*)
Definition proof_irrelevance : Prop :=
forall (P : Prop) (pf1 pf2 : P), pf1 = pf2.
(** Prove that fact. Use [pe_implies_true_eq] to establish that the
proposition [P] in [proof_irrelevance] is equal to [True]. Leverage
that equality to establish that both proof objects [pf1] and
[pf2] must be just [I]. *)
Theorem pe_implies_pi :
propositional_extensionality -> proof_irrelevance.
Proof. (* FILL IN HERE *) Admitted.
(** [] *)
(* 2026-01-07 13:18 *)

375
ProofObjectsTest.v Normal file
View File

@@ -0,0 +1,375 @@
Set Warnings "-notation-overridden,-parsing".
From Stdlib Require Export String.
From LF Require Import ProofObjects.
Parameter MISSING: Type.
Module Check.
Ltac check_type A B :=
match type of A with
| context[MISSING] => idtac "Missing:" A
| ?T => first [unify T B; idtac "Type: ok" | idtac "Type: wrong - should be (" B ")"]
end.
Ltac print_manual_grade A :=
match eval compute in A with
| Some (_ ?S ?C) =>
idtac "Score:" S;
match eval compute in C with
| ""%string => idtac "Comment: None"
| _ => idtac "Comment:" C
end
| None =>
idtac "Score: Ungraded";
idtac "Comment: None"
end.
End Check.
From LF Require Import ProofObjects.
Import Check.
Goal True.
idtac "------------------- eight_is_even --------------------".
idtac " ".
idtac "#> ev_8".
idtac "Possible points: 1".
check_type @ev_8 ((ev 8)).
idtac "Assumptions:".
Abort.
Print Assumptions ev_8.
Goal True.
idtac " ".
idtac "#> ev_8'".
idtac "Possible points: 1".
check_type @ev_8' ((ev 8)).
idtac "Assumptions:".
Abort.
Print Assumptions ev_8'.
Goal True.
idtac " ".
idtac "------------------- conj_fact --------------------".
idtac " ".
idtac "#> Props.conj_fact".
idtac "Possible points: 2".
check_type @Props.conj_fact (
(forall (P Q R : Prop) (_ : and P Q) (_ : and Q R), and P R)).
idtac "Assumptions:".
Abort.
Print Assumptions Props.conj_fact.
Goal True.
idtac " ".
idtac "------------------- or_commut' --------------------".
idtac " ".
idtac "#> Props.or_commut'".
idtac "Possible points: 2".
check_type @Props.or_commut' ((forall (P Q : Prop) (_ : or P Q), or Q P)).
idtac "Assumptions:".
Abort.
Print Assumptions Props.or_commut'.
Goal True.
idtac " ".
idtac "------------------- ex_ev_Sn --------------------".
idtac " ".
idtac "#> Props.ex_ev_Sn".
idtac "Possible points: 2".
check_type @Props.ex_ev_Sn ((@ex nat (fun n : nat => ev (S n)))).
idtac "Assumptions:".
Abort.
Print Assumptions Props.ex_ev_Sn.
Goal True.
idtac " ".
idtac "------------------- ex_match --------------------".
idtac " ".
idtac "#> Props.ex_match".
idtac "Possible points: 2".
check_type @Props.ex_match (
(forall (A : Type) (P Q : forall _ : A, Prop)
(_ : forall (x : A) (_ : P x), Q x) (_ : @ex A (fun x : A => P x)),
@ex A (fun x : A => Q x))).
idtac "Assumptions:".
Abort.
Print Assumptions Props.ex_match.
Goal True.
idtac " ".
idtac "------------------- p_implies_true --------------------".
idtac " ".
idtac "#> Props.p_implies_true".
idtac "Possible points: 1".
check_type @Props.p_implies_true ((forall (P : Type) (_ : P), Props.True)).
idtac "Assumptions:".
Abort.
Print Assumptions Props.p_implies_true.
Goal True.
idtac " ".
idtac "------------------- ex_falso_quodlibet' --------------------".
idtac " ".
idtac "#> Props.ex_falso_quodlibet'".
idtac "Possible points: 1".
check_type @Props.ex_falso_quodlibet' ((forall (P : Type) (_ : Props.False), P)).
idtac "Assumptions:".
Abort.
Print Assumptions Props.ex_falso_quodlibet'.
Goal True.
idtac " ".
idtac "------------------- eq_cons --------------------".
idtac " ".
idtac "#> EqualityPlayground.eq_cons".
idtac "Possible points: 2".
check_type @EqualityPlayground.eq_cons (
(forall (X : Type) (h1 h2 : X) (t1 t2 : list X)
(_ : @EqualityPlayground.eq X h1 h2)
(_ : @EqualityPlayground.eq (list X) t1 t2),
@EqualityPlayground.eq (list X) (@cons X h1 t1) (@cons X h2 t2))).
idtac "Assumptions:".
Abort.
Print Assumptions EqualityPlayground.eq_cons.
Goal True.
idtac " ".
idtac "------------------- equality__leibniz_equality --------------------".
idtac " ".
idtac "#> EqualityPlayground.equality__leibniz_equality".
idtac "Possible points: 2".
check_type @EqualityPlayground.equality__leibniz_equality (
(forall (X : Type) (x y : X) (_ : @EqualityPlayground.eq X x y)
(P : forall _ : X, Prop) (_ : P x),
P y)).
idtac "Assumptions:".
Abort.
Print Assumptions EqualityPlayground.equality__leibniz_equality.
Goal True.
idtac " ".
idtac "------------------- equality__leibniz_equality_term --------------------".
idtac " ".
idtac "#> EqualityPlayground.equality__leibniz_equality_term".
idtac "Possible points: 2".
check_type @EqualityPlayground.equality__leibniz_equality_term (
(forall (X : Type) (x y : X) (_ : @EqualityPlayground.eq X x y)
(P : forall _ : X, Prop) (_ : P x),
P y)).
idtac "Assumptions:".
Abort.
Print Assumptions EqualityPlayground.equality__leibniz_equality_term.
Goal True.
idtac " ".
idtac "------------------- and_assoc --------------------".
idtac " ".
idtac "#> and_assoc".
idtac "Possible points: 2".
check_type @and_assoc ((forall (P Q R : Prop) (_ : and P (and Q R)), and (and P Q) R)).
idtac "Assumptions:".
Abort.
Print Assumptions and_assoc.
Goal True.
idtac " ".
idtac "------------------- or_distributes_over_and --------------------".
idtac " ".
idtac "#> or_distributes_over_and".
idtac "Possible points: 3".
check_type @or_distributes_over_and (
(forall P Q R : Prop, iff (or P (and Q R)) (and (or P Q) (or P R)))).
idtac "Assumptions:".
Abort.
Print Assumptions or_distributes_over_and.
Goal True.
idtac " ".
idtac "------------------- negations --------------------".
idtac " ".
idtac "#> double_neg".
idtac "Possible points: 1".
check_type @double_neg ((forall (P : Prop) (_ : P), not (not P))).
idtac "Assumptions:".
Abort.
Print Assumptions double_neg.
Goal True.
idtac " ".
idtac "#> contradiction_implies_anything".
idtac "Possible points: 1".
check_type @contradiction_implies_anything ((forall (P Q : Prop) (_ : and P (not P)), Q)).
idtac "Assumptions:".
Abort.
Print Assumptions contradiction_implies_anything.
Goal True.
idtac " ".
idtac "#> de_morgan_not_or".
idtac "Possible points: 1".
check_type @de_morgan_not_or (
(forall (P Q : Prop) (_ : not (or P Q)), and (not P) (not Q))).
idtac "Assumptions:".
Abort.
Print Assumptions de_morgan_not_or.
Goal True.
idtac " ".
idtac "------------------- currying --------------------".
idtac " ".
idtac "#> curry".
idtac "Possible points: 1".
check_type @curry (
(forall (P Q R : Prop) (_ : forall _ : and P Q, R) (_ : P) (_ : Q), R)).
idtac "Assumptions:".
Abort.
Print Assumptions curry.
Goal True.
idtac " ".
idtac "#> uncurry".
idtac "Possible points: 1".
check_type @uncurry (
(forall (P Q R : Prop) (_ : forall (_ : P) (_ : Q), R) (_ : and P Q), R)).
idtac "Assumptions:".
Abort.
Print Assumptions uncurry.
Goal True.
idtac " ".
idtac "------------------- pe_implies_or_eq --------------------".
idtac " ".
idtac "#> pe_implies_or_eq".
idtac "Advanced".
idtac "Possible points: 1".
check_type @pe_implies_or_eq (
(forall (_ : propositional_extensionality) (P Q : Prop),
@eq Prop (or P Q) (or Q P))).
idtac "Assumptions:".
Abort.
Print Assumptions pe_implies_or_eq.
Goal True.
idtac " ".
idtac "------------------- pe_implies_true_eq --------------------".
idtac " ".
idtac "#> pe_implies_true_eq".
idtac "Advanced".
idtac "Possible points: 1".
check_type @pe_implies_true_eq (
(forall (_ : propositional_extensionality) (P : Prop) (_ : P),
@eq Prop True P)).
idtac "Assumptions:".
Abort.
Print Assumptions pe_implies_true_eq.
Goal True.
idtac " ".
idtac "------------------- pe_implies_pi --------------------".
idtac " ".
idtac "#> pe_implies_pi".
idtac "Advanced".
idtac "Possible points: 3".
check_type @pe_implies_pi ((forall _ : propositional_extensionality, proof_irrelevance)).
idtac "Assumptions:".
Abort.
Print Assumptions pe_implies_pi.
Goal True.
idtac " ".
idtac " ".
idtac "Max points - standard: 28".
idtac "Max points - advanced: 33".
idtac "".
idtac "Allowed Axioms:".
idtac "functional_extensionality".
idtac "FunctionalExtensionality.functional_extensionality_dep".
idtac "plus_le".
idtac "le_trans".
idtac "le_plus_l".
idtac "add_le_cases".
idtac "Sn_le_Sm__n_le_m".
idtac "O_le_n".
idtac "".
idtac "".
idtac "********** Summary **********".
idtac "".
idtac "Below is a summary of the automatically graded exercises that are incomplete.".
idtac "".
idtac "The output for each exercise can be any of the following:".
idtac " - 'Closed under the global context', if it is complete".
idtac " - 'MANUAL', if it is manually graded".
idtac " - A list of pending axioms, containing unproven assumptions. In this case".
idtac " the exercise is considered complete, if the axioms are all allowed.".
idtac "".
idtac "********** Standard **********".
idtac "---------- ev_8 ---------".
Print Assumptions ev_8.
idtac "---------- ev_8' ---------".
Print Assumptions ev_8'.
idtac "---------- Props.conj_fact ---------".
Print Assumptions Props.conj_fact.
idtac "---------- Props.or_commut' ---------".
Print Assumptions Props.or_commut'.
idtac "---------- Props.ex_ev_Sn ---------".
Print Assumptions Props.ex_ev_Sn.
idtac "---------- Props.ex_match ---------".
Print Assumptions Props.ex_match.
idtac "---------- Props.p_implies_true ---------".
Print Assumptions Props.p_implies_true.
idtac "---------- Props.ex_falso_quodlibet' ---------".
Print Assumptions Props.ex_falso_quodlibet'.
idtac "---------- EqualityPlayground.eq_cons ---------".
Print Assumptions EqualityPlayground.eq_cons.
idtac "---------- EqualityPlayground.equality__leibniz_equality ---------".
Print Assumptions EqualityPlayground.equality__leibniz_equality.
idtac "---------- EqualityPlayground.equality__leibniz_equality_term ---------".
Print Assumptions EqualityPlayground.equality__leibniz_equality_term.
idtac "---------- and_assoc ---------".
Print Assumptions and_assoc.
idtac "---------- or_distributes_over_and ---------".
Print Assumptions or_distributes_over_and.
idtac "---------- double_neg ---------".
Print Assumptions double_neg.
idtac "---------- contradiction_implies_anything ---------".
Print Assumptions contradiction_implies_anything.
idtac "---------- de_morgan_not_or ---------".
Print Assumptions de_morgan_not_or.
idtac "---------- curry ---------".
Print Assumptions curry.
idtac "---------- uncurry ---------".
Print Assumptions uncurry.
idtac "".
idtac "********** Advanced **********".
idtac "---------- pe_implies_or_eq ---------".
Print Assumptions pe_implies_or_eq.
idtac "---------- pe_implies_true_eq ---------".
Print Assumptions pe_implies_true_eq.
idtac "---------- pe_implies_pi ---------".
Print Assumptions pe_implies_pi.
Abort.
(* 2026-01-07 13:18 *)
(* 2026-01-07 13:18 *)

38
README.md Normal file
View File

@@ -0,0 +1,38 @@
# Software Foundations 1
This is a template repository for the code found in [Software Foundations 1](https://softwarefoundations.cis.upenn.edu/lf-current/index.html)
It contains an additional nix-shell and envrc file to easily get started.
All the HTML has been removed, most of the content is inlined in the files anyway, and the rendered HTML is available online if you need it.
Fork me, and go ham.
![Deps](./deps.svg)
---
## Original README
```
#########################################################################
SOFTWARE FOUNDATIONS
#########################################################################
This directory contains both Rocq scripts (.v files) and generated HTML
files for Volume 1 of the Software Foundations electronic textbook
series.
- Preface.v or Preface.html
The place to start reading, including details on how to install
required software
- index.html
The book's cover page and navigation starting point
- deps.html
Overview of the ordering of chapters
- LICENSE
Explanation of how these files may be redistributed
```

412
Rel.v Normal file
View File

@@ -0,0 +1,412 @@
(** * Rel: Properties of Relations *)
(** This short (and optional) chapter develops some basic definitions
and a few theorems about binary relations in Rocq. The key
definitions are repeated where they are actually used (in the
[Smallstep] chapter of _Programming Language Foundations_),
so readers who are already comfortable with these ideas can safely
skim or skip this chapter. However, relations are also a good
source of exercises for developing facility with Rocq's basic
reasoning facilities, so it may be useful to look at this material
just after the [IndProp] chapter. *)
Set Warnings "-notation-overridden".
From LF Require Export IndProp.
(* ################################################################# *)
(** * Relations *)
(** A binary _relation_ on a set [X] is a family of propositions
parameterized by two elements of [X] -- i.e., a proposition about
pairs of elements of [X]. *)
Definition relation (X: Type) := X -> X -> Prop.
(** Somewhat confusingly, the Rocq standard library hijacks the generic
term "relation" for this specific instance of the idea. To
maintain consistency with the library, we will do the same. So,
henceforth, the Rocq identifier [relation] will always refer to a
binary relation _on_ some set (between the set and itself),
whereas in ordinary mathematical English the word "relation" can
refer either to this specific concept or the more general concept
of a relation between any number of possibly different sets. The
context of the discussion should always make clear which is
meant. *)
(** An example relation on [nat] is [le], the less-than-or-equal-to
relation, which we usually write [n1 <= n2]. *)
Print le.
(* ====> Inductive le (n : nat) : nat -> Prop :=
le_n : n <= n
| le_S : forall m : nat, n <= m -> n <= S m *)
Check le : nat -> nat -> Prop.
Check le : relation nat.
(** (Why did we write it this way instead of starting with [Inductive
le : relation nat...]? Because we wanted to put the first [nat]
to the left of the [:], which makes Rocq generate a somewhat nicer
induction principle for reasoning about [<=].) *)
(* ################################################################# *)
(** * Basic Properties *)
(** As anyone knows who has taken an undergraduate discrete math
course, there is a lot to be said about relations in general,
including ways of classifying relations (as reflexive, transitive,
etc.), theorems that can be proved generically about certain sorts
of relations, constructions that build one relation from another,
etc. For example... *)
(* ----------------------------------------------------------------- *)
(** *** Partial Functions *)
(** A relation [R] on a set [X] is a _partial function_ if, for every
[x], there is at most one [y] such that [R x y] -- i.e., [R x y1]
and [R x y2] together imply [y1 = y2]. *)
Definition partial_function {X: Type} (R: relation X) :=
forall x y1 y2 : X, R x y1 -> R x y2 -> y1 = y2.
(** For example, the [next_nat] relation is a partial function. *)
Inductive next_nat : nat -> nat -> Prop :=
| nn n : next_nat n (S n).
Check next_nat : relation nat.
Theorem next_nat_partial_function :
partial_function next_nat.
Proof.
unfold partial_function.
intros x y1 y2 H1 H2.
inversion H1. inversion H2.
reflexivity. Qed.
(** However, the [<=] relation on numbers is not a partial
function. (Assume, for a contradiction, that [<=] is a partial
function. But then, since [0 <= 0] and [0 <= 1], it follows that
[0 = 1]. This is nonsense, so our assumption was
contradictory.) *)
Theorem le_not_a_partial_function :
~ (partial_function le).
Proof.
unfold not. unfold partial_function. intros Hc.
assert (0 = 1) as Nonsense. {
apply Hc with (x := 0).
- apply le_n.
- apply le_S. apply le_n. }
discriminate Nonsense. Qed.
(** **** Exercise: 2 stars, standard, optional (total_relation_not_partial_function)
Show that the [total_relation] defined in (an exercise in)
[IndProp] is not a partial function. *)
(** Copy the definition of [total_relation] from your [IndProp]
here so that this file can be graded on its own. *)
Inductive total_relation : nat -> nat -> Prop :=
(* FILL IN HERE *)
.
Theorem total_relation_not_partial_function :
~ (partial_function total_relation).
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 2 stars, standard, optional (empty_relation_partial_function)
Show that the [empty_relation] defined in (an exercise in)
[IndProp] is a partial function. *)
(** Copy the definition of [empty_relation] from your [IndProp]
here so that this file can be graded on its own. *)
Inductive empty_relation : nat -> nat -> Prop :=
(* FILL IN HERE *)
.
Theorem empty_relation_partial_function :
partial_function empty_relation.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(* ----------------------------------------------------------------- *)
(** *** Reflexive Relations *)
(** A _reflexive_ relation on a set [X] is one for which every element
of [X] is related to itself. *)
Definition reflexive {X: Type} (R: relation X) :=
forall a : X, R a a.
Theorem le_reflexive :
reflexive le.
Proof.
unfold reflexive. intros n. apply le_n. Qed.
(* ----------------------------------------------------------------- *)
(** *** Transitive Relations *)
(** A relation [R] is _transitive_ if [R a c] holds whenever [R a b]
and [R b c] do. *)
Definition transitive {X: Type} (R: relation X) :=
forall a b c : X, (R a b) -> (R b c) -> (R a c).
Theorem le_trans :
transitive le.
Proof.
intros n m o Hnm Hmo.
induction Hmo.
- (* le_n *) apply Hnm.
- (* le_S *) apply le_S. apply IHHmo. Qed.
Theorem lt_trans:
transitive lt.
Proof.
unfold lt. unfold transitive.
intros n m o Hnm Hmo.
apply le_S in Hnm.
apply le_trans with (a := (S n)) (b := (S m)) (c := o).
apply Hnm.
apply Hmo. Qed.
(** **** Exercise: 2 stars, standard, optional (le_trans_hard_way)
We can also prove [lt_trans] more laboriously by induction,
without using [le_trans]. Do this. *)
Theorem lt_trans' :
transitive lt.
Proof.
(* Prove this by induction on evidence that [m] is less than [o]. *)
unfold lt. unfold transitive.
intros n m o Hnm Hmo.
induction Hmo as [| m' Hm'o].
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 2 stars, standard, optional (lt_trans'')
Prove the same thing again by induction on [o]. *)
Theorem lt_trans'' :
transitive lt.
Proof.
unfold lt. unfold transitive.
intros n m o Hnm Hmo.
induction o as [| o'].
(* FILL IN HERE *) Admitted.
(** [] *)
(** The transitivity of [le], in turn, can be used to prove some facts
that will be useful later (e.g., for the proof of antisymmetry
below)... *)
Theorem le_Sn_le : forall n m, S n <= m -> n <= m.
Proof.
intros n m H. apply le_trans with (S n).
- apply le_S. apply le_n.
- apply H.
Qed.
(** **** Exercise: 1 star, standard, optional (le_S_n) *)
Theorem le_S_n : forall n m,
(S n <= S m) -> (n <= m).
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 2 stars, standard, optional (le_Sn_n_inf)
Provide an informal proof of the following theorem:
Theorem: For every [n], [~ (S n <= n)]
A formal proof of this is an optional exercise below, but try
writing an informal proof without doing the formal proof first.
Proof: *)
(* FILL IN HERE
[] *)
(** **** Exercise: 1 star, standard, optional (le_Sn_n) *)
Theorem le_Sn_n : forall n,
~ (S n <= n).
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** Reflexivity and transitivity are the main concepts we'll need for
later chapters, but, for a bit of additional practice working with
relations in Rocq, let's look at a few other common ones... *)
(* ----------------------------------------------------------------- *)
(** *** Symmetric and Antisymmetric Relations *)
(** A relation [R] is _symmetric_ if [R a b] implies [R b a]. *)
Definition symmetric {X: Type} (R: relation X) :=
forall a b : X, (R a b) -> (R b a).
(** **** Exercise: 2 stars, standard, optional (le_not_symmetric) *)
Theorem le_not_symmetric :
~ (symmetric le).
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** A relation [R] is _antisymmetric_ if [R a b] and [R b a] together
imply [a = b] -- that is, if the only "cycles" in [R] are trivial
ones. *)
Definition antisymmetric {X: Type} (R: relation X) :=
forall a b : X, (R a b) -> (R b a) -> a = b.
(** **** Exercise: 2 stars, standard, optional (le_antisymmetric) *)
Theorem le_antisymmetric :
antisymmetric le.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 2 stars, standard, optional (le_step) *)
Theorem le_step : forall n m p,
n < m ->
m <= S p ->
n <= p.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(* ----------------------------------------------------------------- *)
(** *** Equivalence Relations *)
(** A relation is an _equivalence_ if it's reflexive, symmetric, and
transitive. *)
Definition equivalence {X:Type} (R: relation X) :=
(reflexive R) /\ (symmetric R) /\ (transitive R).
(* ----------------------------------------------------------------- *)
(** *** Partial Orders and Preorders *)
(** A relation is a _partial order_ when it's reflexive,
_anti_-symmetric, and transitive. In the Rocq standard library
it's called just "order" for short. *)
Definition order {X:Type} (R: relation X) :=
(reflexive R) /\ (antisymmetric R) /\ (transitive R).
(** A preorder is almost like a partial order, but doesn't have to be
antisymmetric. *)
Definition preorder {X:Type} (R: relation X) :=
(reflexive R) /\ (transitive R).
Theorem le_order :
order le.
Proof.
unfold order. split.
- (* refl *) apply le_reflexive.
- split.
+ (* antisym *) apply le_antisymmetric.
+ (* transitive. *) apply le_trans. Qed.
(* ################################################################# *)
(** * Reflexive, Transitive Closure *)
(** The _reflexive, transitive closure_ of a relation [R] is the
smallest relation that contains [R] and that is both reflexive and
transitive. Formally, it is defined like this in the Relations
module of the Rocq standard library: *)
Inductive clos_refl_trans {A: Type} (R: relation A) : relation A :=
| rt_step x y (H : R x y) : clos_refl_trans R x y
| rt_refl x : clos_refl_trans R x x
| rt_trans x y z
(Hxy : clos_refl_trans R x y)
(Hyz : clos_refl_trans R y z) :
clos_refl_trans R x z.
(** For example, the reflexive and transitive closure of the
[next_nat] relation coincides with the [le] relation. *)
Theorem next_nat_closure_is_le : forall n m,
(n <= m) <-> ((clos_refl_trans next_nat) n m).
Proof.
intros n m. split.
- (* -> *)
intro H. induction H.
+ (* le_n *) apply rt_refl.
+ (* le_S *)
apply rt_trans with m. apply IHle. apply rt_step.
apply nn.
- (* <- *)
intro H. induction H.
+ (* rt_step *) inversion H. apply le_S. apply le_n.
+ (* rt_refl *) apply le_n.
+ (* rt_trans *)
apply le_trans with y.
apply IHclos_refl_trans1.
apply IHclos_refl_trans2. Qed.
(** The above definition of reflexive, transitive closure is natural:
it says, explicitly, that the reflexive and transitive closure of
[R] is the least relation that includes [R] and that is closed
under rules of reflexivity and transitivity. But it turns out
that this definition is not very convenient for doing proofs,
since the "nondeterminism" of the [rt_trans] rule can sometimes
lead to tricky inductions. Here is a more useful definition: *)
Inductive clos_refl_trans_1n {A : Type}
(R : relation A) (x : A)
: A -> Prop :=
| rt1n_refl : clos_refl_trans_1n R x x
| rt1n_trans (y z : A)
(Hxy : R x y) (Hrest : clos_refl_trans_1n R y z) :
clos_refl_trans_1n R x z.
(** Our new definition of reflexive, transitive closure "bundles"
the [rt_step] and [rt_trans] rules into the single rule step.
The left-hand premise of this step is a single use of [R],
leading to a much simpler induction principle.
Before we go on, we should check that the two definitions do
indeed define the same relation...
First, we prove two lemmas showing that [clos_refl_trans_1n] mimics
the behavior of the two "missing" [clos_refl_trans]
constructors. *)
Lemma rsc_R : forall (X:Type) (R:relation X) (x y : X),
R x y -> clos_refl_trans_1n R x y.
Proof.
intros X R x y H.
apply rt1n_trans with y. apply H. apply rt1n_refl. Qed.
(** **** Exercise: 2 stars, standard, optional (rsc_trans) *)
Lemma rsc_trans :
forall (X:Type) (R: relation X) (x y z : X),
clos_refl_trans_1n R x y ->
clos_refl_trans_1n R y z ->
clos_refl_trans_1n R x z.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** Then we use these facts to prove that the two definitions of
reflexive, transitive closure do indeed define the same
relation. *)
(** **** Exercise: 3 stars, standard, optional (rtc_rsc_coincide) *)
Theorem rtc_rsc_coincide :
forall (X:Type) (R: relation X) (x y : X),
clos_refl_trans R x y <-> clos_refl_trans_1n R x y.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(* 2026-01-07 13:18 *)

68
RelTest.v Normal file
View File

@@ -0,0 +1,68 @@
Set Warnings "-notation-overridden,-parsing".
From Stdlib Require Export String.
From LF Require Import Rel.
Parameter MISSING: Type.
Module Check.
Ltac check_type A B :=
match type of A with
| context[MISSING] => idtac "Missing:" A
| ?T => first [unify T B; idtac "Type: ok" | idtac "Type: wrong - should be (" B ")"]
end.
Ltac print_manual_grade A :=
match eval compute in A with
| Some (_ ?S ?C) =>
idtac "Score:" S;
match eval compute in C with
| ""%string => idtac "Comment: None"
| _ => idtac "Comment:" C
end
| None =>
idtac "Score: Ungraded";
idtac "Comment: None"
end.
End Check.
From LF Require Import Rel.
Import Check.
Goal True.
idtac " ".
idtac "Max points - standard: 0".
idtac "Max points - advanced: 0".
idtac "".
idtac "Allowed Axioms:".
idtac "functional_extensionality".
idtac "FunctionalExtensionality.functional_extensionality_dep".
idtac "plus_le".
idtac "le_trans".
idtac "le_plus_l".
idtac "add_le_cases".
idtac "Sn_le_Sm__n_le_m".
idtac "O_le_n".
idtac "".
idtac "".
idtac "********** Summary **********".
idtac "".
idtac "Below is a summary of the automatically graded exercises that are incomplete.".
idtac "".
idtac "The output for each exercise can be any of the following:".
idtac " - 'Closed under the global context', if it is complete".
idtac " - 'MANUAL', if it is manually graded".
idtac " - A list of pending axioms, containing unproven assumptions. In this case".
idtac " the exercise is considered complete, if the axioms are all allowed.".
idtac "".
idtac "********** Standard **********".
idtac "".
idtac "********** Advanced **********".
Abort.
(* 2026-01-07 13:18 *)
(* 2026-01-07 13:18 *)

1301
Tactics.v Normal file

File diff suppressed because it is too large Load Diff

275
TacticsTest.v Normal file
View File

@@ -0,0 +1,275 @@
Set Warnings "-notation-overridden,-parsing".
From Stdlib Require Export String.
From LF Require Import Tactics.
Parameter MISSING: Type.
Module Check.
Ltac check_type A B :=
match type of A with
| context[MISSING] => idtac "Missing:" A
| ?T => first [unify T B; idtac "Type: ok" | idtac "Type: wrong - should be (" B ")"]
end.
Ltac print_manual_grade A :=
match eval compute in A with
| Some (_ ?S ?C) =>
idtac "Score:" S;
match eval compute in C with
| ""%string => idtac "Comment: None"
| _ => idtac "Comment:" C
end
| None =>
idtac "Score: Ungraded";
idtac "Comment: None"
end.
End Check.
From LF Require Import Tactics.
Import Check.
Goal True.
idtac "------------------- apply_exercise1 --------------------".
idtac " ".
idtac "#> rev_exercise1".
idtac "Possible points: 2".
check_type @rev_exercise1 (
(forall (l l' : list nat) (_ : @eq (list nat) l (@rev nat l')),
@eq (list nat) l' (@rev nat l))).
idtac "Assumptions:".
Abort.
Print Assumptions rev_exercise1.
Goal True.
idtac " ".
idtac "------------------- injection_ex3 --------------------".
idtac " ".
idtac "#> injection_ex3".
idtac "Possible points: 3".
check_type @injection_ex3 (
(forall (X : Type) (x y z : X) (l j : list X)
(_ : @eq (list X) (@cons X x (@cons X y l)) (@cons X z j))
(_ : @eq (list X) j (@cons X z l)),
@eq X x y)).
idtac "Assumptions:".
Abort.
Print Assumptions injection_ex3.
Goal True.
idtac " ".
idtac "------------------- discriminate_ex3 --------------------".
idtac " ".
idtac "#> discriminate_ex3".
idtac "Possible points: 1".
check_type @discriminate_ex3 (
(forall (X : Type) (x y z : X) (l _ : list X)
(_ : @eq (list X) (@cons X x (@cons X y l)) (@nil X)),
@eq X x z)).
idtac "Assumptions:".
Abort.
Print Assumptions discriminate_ex3.
Goal True.
idtac " ".
idtac "------------------- nth_error_always_none --------------------".
idtac " ".
idtac "#> nth_error_always_none".
idtac "Possible points: 3".
check_type @nth_error_always_none (
(forall (l : list nat)
(_ : forall i : nat, @eq (option nat) (@nth_error nat l i) (@None nat)),
@eq (list nat) l (@nil nat))).
idtac "Assumptions:".
Abort.
Print Assumptions nth_error_always_none.
Goal True.
idtac " ".
idtac "------------------- eqb_true --------------------".
idtac " ".
idtac "#> eqb_true".
idtac "Possible points: 2".
check_type @eqb_true ((forall (n m : nat) (_ : @eq bool (eqb n m) true), @eq nat n m)).
idtac "Assumptions:".
Abort.
Print Assumptions eqb_true.
Goal True.
idtac " ".
idtac "------------------- plus_n_n_injective --------------------".
idtac " ".
idtac "#> plus_n_n_injective".
idtac "Possible points: 3".
check_type @plus_n_n_injective (
(forall (n m : nat) (_ : @eq nat (Nat.add n n) (Nat.add m m)), @eq nat n m)).
idtac "Assumptions:".
Abort.
Print Assumptions plus_n_n_injective.
Goal True.
idtac " ".
idtac "------------------- gen_dep_practice --------------------".
idtac " ".
idtac "#> nth_error_after_last".
idtac "Possible points: 3".
check_type @nth_error_after_last (
(forall (n : nat) (X : Type) (l : list X) (_ : @eq nat (@length X l) n),
@eq (option X) (@nth_error X l n) (@None X))).
idtac "Assumptions:".
Abort.
Print Assumptions nth_error_after_last.
Goal True.
idtac " ".
idtac "------------------- combine_split --------------------".
idtac " ".
idtac "#> combine_split".
idtac "Possible points: 3".
check_type @combine_split (
(forall (X Y : Type) (l : list (prod X Y)) (l1 : list X)
(l2 : list Y)
(_ : @eq (prod (list X) (list Y)) (@split X Y l)
(@pair (list X) (list Y) l1 l2)),
@eq (list (prod X Y)) (@combine X Y l1 l2) l)).
idtac "Assumptions:".
Abort.
Print Assumptions combine_split.
Goal True.
idtac " ".
idtac "------------------- destruct_eqn_practice --------------------".
idtac " ".
idtac "#> bool_fn_applied_thrice".
idtac "Possible points: 2".
check_type @bool_fn_applied_thrice (
(forall (f : forall _ : bool, bool) (b : bool), @eq bool (f (f (f b))) (f b))).
idtac "Assumptions:".
Abort.
Print Assumptions bool_fn_applied_thrice.
Goal True.
idtac " ".
idtac "------------------- eqb_sym --------------------".
idtac " ".
idtac "#> eqb_sym".
idtac "Possible points: 3".
check_type @eqb_sym ((forall n m : nat, @eq bool (eqb n m) (eqb m n))).
idtac "Assumptions:".
Abort.
Print Assumptions eqb_sym.
Goal True.
idtac " ".
idtac "------------------- split_combine --------------------".
idtac " ".
idtac "#> Manually graded: split_combine".
idtac "Advanced".
idtac "Possible points: 3".
print_manual_grade manual_grade_for_split_combine.
idtac " ".
idtac "------------------- filter_exercise --------------------".
idtac " ".
idtac "#> filter_exercise".
idtac "Advanced".
idtac "Possible points: 3".
check_type @filter_exercise (
(forall (X : Type) (test : forall _ : X, bool) (x : X)
(l lf : list X) (_ : @eq (list X) (@filter X test l) (@cons X x lf)),
@eq bool (test x) true)).
idtac "Assumptions:".
Abort.
Print Assumptions filter_exercise.
Goal True.
idtac " ".
idtac "------------------- forall_exists_challenge --------------------".
idtac " ".
idtac "#> existsb_existsb'".
idtac "Advanced".
idtac "Possible points: 6".
check_type @existsb_existsb' (
(forall (X : Type) (test : forall _ : X, bool) (l : list X),
@eq bool (@existsb X test l) (@existsb' X test l))).
idtac "Assumptions:".
Abort.
Print Assumptions existsb_existsb'.
Goal True.
idtac " ".
idtac " ".
idtac "Max points - standard: 25".
idtac "Max points - advanced: 37".
idtac "".
idtac "Allowed Axioms:".
idtac "functional_extensionality".
idtac "FunctionalExtensionality.functional_extensionality_dep".
idtac "plus_le".
idtac "le_trans".
idtac "le_plus_l".
idtac "add_le_cases".
idtac "Sn_le_Sm__n_le_m".
idtac "O_le_n".
idtac "".
idtac "".
idtac "********** Summary **********".
idtac "".
idtac "Below is a summary of the automatically graded exercises that are incomplete.".
idtac "".
idtac "The output for each exercise can be any of the following:".
idtac " - 'Closed under the global context', if it is complete".
idtac " - 'MANUAL', if it is manually graded".
idtac " - A list of pending axioms, containing unproven assumptions. In this case".
idtac " the exercise is considered complete, if the axioms are all allowed.".
idtac "".
idtac "********** Standard **********".
idtac "---------- rev_exercise1 ---------".
Print Assumptions rev_exercise1.
idtac "---------- injection_ex3 ---------".
Print Assumptions injection_ex3.
idtac "---------- discriminate_ex3 ---------".
Print Assumptions discriminate_ex3.
idtac "---------- nth_error_always_none ---------".
Print Assumptions nth_error_always_none.
idtac "---------- eqb_true ---------".
Print Assumptions eqb_true.
idtac "---------- plus_n_n_injective ---------".
Print Assumptions plus_n_n_injective.
idtac "---------- nth_error_after_last ---------".
Print Assumptions nth_error_after_last.
idtac "---------- combine_split ---------".
Print Assumptions combine_split.
idtac "---------- bool_fn_applied_thrice ---------".
Print Assumptions bool_fn_applied_thrice.
idtac "---------- eqb_sym ---------".
Print Assumptions eqb_sym.
idtac "".
idtac "********** Advanced **********".
idtac "---------- split_combine ---------".
idtac "MANUAL".
idtac "---------- filter_exercise ---------".
Print Assumptions filter_exercise.
idtac "---------- existsb_existsb' ---------".
Print Assumptions existsb_existsb'.
Abort.
(* 2026-01-07 13:18 *)
(* 2026-01-07 13:18 *)

1
_CoqProject Normal file
View File

@@ -0,0 +1 @@
-Q . LF

387
deps.svg Normal file
View File

@@ -0,0 +1,387 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?xml-stylesheet href="common/css/sf.css" type="text/css"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<!-- Generated by graphviz version 2.42.3 (20191010.1750)
-->
<!-- Pages: 1 -->
<svg width="504pt" height="1065pt"
viewBox="0.00 21.60 504.00 1043.38" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id="graph0" class="graph deps" transform="scale(1.02 1.02) rotate(0) translate(4 1055.94)">
<polygon fill="white" stroke="transparent" points="-4,4 -4,-1034 508,-1034 508,4 -4,4"/>
<!-- legend -->
<g id="node1" class="node">
<title>legend</title>
<g id="a_node1"><a xlink:title="Go to chapter" target="_top">
<polygon fill="WhiteSmoke" stroke="transparent" points="222,-1030 16,-1030 16,-936 222,-936 222,-1030"/>
<polygon fill="lightblue" stroke="transparent" points="20,-994 20,-1026 102,-1026 102,-994 20,-994"/>
<polygon fill="none" stroke="#7f7f7f" points="20,-994 20,-1026 102,-1026 102,-994 20,-994"/>
<text text-anchor="start" x="30.87" y="-1007" font-family="Times,serif" font-size="10.00"> Core chapters </text>
<polygon fill="lightyellow" stroke="transparent" points="112,-994 112,-1026 218,-1026 218,-994 112,-994"/>
<polygon fill="none" stroke="#7f7f7f" points="112,-994 112,-1026 218,-1026 218,-994 112,-994"/>
<text text-anchor="start" x="123.21" y="-1007" font-family="Times,serif" font-size="10.00">Contributed chapters</text>
<text text-anchor="start" x="25" y="-978" font-family="Times,serif" font-size="10.00" fill="gray"></text>
<text text-anchor="start" x="40.74" y="-978" font-family="Times,serif" font-size="10.00"> &#160;&#160;&#160;&#160;Chapter dependencies</text>
<text text-anchor="start" x="24.91" y="-953" font-family="Times,serif" font-size="10.00" fill="blue"></text>
<text text-anchor="start" x="40.65" y="-953" font-family="Times,serif" font-size="10.00"> &#160;&#160;&#160;&#160;Recommended path for semester course</text>
</a>
</g>
</g>
<!-- Preface -->
<g id="node2" class="node">
<title>Preface</title>
<g id="a_node2"><a xlink:href="Preface.html" xlink:title="Go to chapter" target="_top">
<polygon fill="lightblue" stroke="black" points="315.47,-1001 254.53,-1001 254.53,-965 315.47,-965 315.47,-1001"/>
<text text-anchor="middle" x="285" y="-979.7" font-family="Times,serif" font-size="11.00">Preface</text>
</a>
</g>
</g>
<!-- Basics -->
<g id="node4" class="node">
<title>Basics</title>
<g id="a_node4"><a xlink:href="Basics.html" xlink:title="Go to chapter" target="_top">
<polygon fill="lightblue" stroke="black" points="361,-900 209,-900 209,-858 361,-858 361,-900"/>
<text text-anchor="start" x="270.64" y="-883.2" font-family="Times,serif" font-size="11.00">Basics</text>
<text text-anchor="start" x="226.63" y="-867.8" font-family="Times,serif" font-size="9.00">Functional Programming in Rocq</text>
</a>
</g>
</g>
<!-- Preface&#45;&gt;Basics -->
<g id="edge18" class="edge">
<title>Preface&#45;&gt;Basics</title>
<path fill="none" stroke="blue" d="M285,-964.7C285,-949.92 285,-928.3 285,-910.61"/>
<polygon fill="blue" stroke="blue" points="288.5,-910.2 285,-900.2 281.5,-910.2 288.5,-910.2"/>
</g>
<!-- Postscript -->
<g id="node3" class="node">
<title>Postscript</title>
<g id="a_node3"><a xlink:href="Postscript.html" xlink:title="Go to chapter" target="_top">
<polygon fill="lightblue" stroke="black" points="405.89,-117 334.11,-117 334.11,-81 405.89,-81 405.89,-117"/>
<text text-anchor="middle" x="370" y="-95.7" font-family="Times,serif" font-size="11.00">Postscript</text>
</a>
</g>
</g>
<!-- Bib -->
<g id="node18" class="node">
<title>Bib</title>
<g id="a_node18"><a xlink:href="Bib.html" xlink:title="Go to chapter" target="_top">
<polygon fill="lightblue" stroke="black" points="411.5,-42 328.5,-42 328.5,0 411.5,0 411.5,-42"/>
<text text-anchor="start" x="362.55" y="-25.2" font-family="Times,serif" font-size="11.00">Bib</text>
<text text-anchor="start" x="346.75" y="-9.8" font-family="Times,serif" font-size="9.00">Bibliography</text>
</a>
</g>
</g>
<!-- Postscript&#45;&gt;Bib -->
<g id="edge14" class="edge">
<title>Postscript&#45;&gt;Bib</title>
<path fill="none" stroke="gray" d="M370,-80.75C370,-72.33 370,-61.94 370,-52.25"/>
<polygon fill="gray" stroke="gray" points="373.5,-52.14 370,-42.14 366.5,-52.14 373.5,-52.14"/>
</g>
<!-- Induction -->
<g id="node5" class="node">
<title>Induction</title>
<g id="a_node5"><a xlink:href="Induction.html" xlink:title="Go to chapter" target="_top">
<polygon fill="lightblue" stroke="black" points="336.5,-822 233.5,-822 233.5,-780 336.5,-780 336.5,-822"/>
<text text-anchor="start" x="264.42" y="-805.2" font-family="Times,serif" font-size="11.00">Induction</text>
<text text-anchor="start" x="251.5" y="-789.8" font-family="Times,serif" font-size="9.00">Proof by Induction</text>
</a>
</g>
</g>
<!-- Basics&#45;&gt;Induction -->
<g id="edge1" class="edge">
<title>Basics&#45;&gt;Induction</title>
<path fill="none" stroke="gray" d="M278.94,-857.63C278.28,-849.82 278.09,-840.73 278.38,-832.18"/>
<polygon fill="gray" stroke="gray" points="281.87,-832.35 278.96,-822.16 274.89,-831.94 281.87,-832.35"/>
</g>
<!-- Basics&#45;&gt;Induction -->
<g id="edge19" class="edge">
<title>Basics&#45;&gt;Induction</title>
<path fill="none" stroke="blue" d="M291.06,-857.63C291.72,-849.82 291.91,-840.73 291.62,-832.18"/>
<polygon fill="blue" stroke="blue" points="295.11,-831.94 291.04,-822.16 288.13,-832.35 295.11,-831.94"/>
</g>
<!-- Lists -->
<g id="node6" class="node">
<title>Lists</title>
<g id="a_node6"><a xlink:href="Lists.html" xlink:title="Go to chapter" target="_top">
<polygon fill="lightblue" stroke="black" points="357,-744 213,-744 213,-702 357,-702 357,-744"/>
<text text-anchor="start" x="274.3" y="-727.2" font-family="Times,serif" font-size="11.00">Lists</text>
<text text-anchor="start" x="230.75" y="-711.8" font-family="Times,serif" font-size="9.00">Working with Structured Data</text>
</a>
</g>
</g>
<!-- Induction&#45;&gt;Lists -->
<g id="edge2" class="edge">
<title>Induction&#45;&gt;Lists</title>
<path fill="none" stroke="gray" d="M278.94,-779.63C278.28,-771.82 278.09,-762.73 278.38,-754.18"/>
<polygon fill="gray" stroke="gray" points="281.87,-754.35 278.96,-744.16 274.89,-753.94 281.87,-754.35"/>
</g>
<!-- Induction&#45;&gt;Lists -->
<g id="edge20" class="edge">
<title>Induction&#45;&gt;Lists</title>
<path fill="none" stroke="blue" d="M291.06,-779.63C291.72,-771.82 291.91,-762.73 291.62,-754.18"/>
<polygon fill="blue" stroke="blue" points="295.11,-753.94 291.04,-744.16 288.13,-754.35 295.11,-753.94"/>
</g>
<!-- Poly -->
<g id="node7" class="node">
<title>Poly</title>
<g id="a_node7"><a xlink:href="Poly.html" xlink:title="Go to chapter" target="_top">
<polygon fill="lightblue" stroke="black" points="381.5,-666 188.5,-666 188.5,-624 381.5,-624 381.5,-666"/>
<text text-anchor="start" x="275.41" y="-649.2" font-family="Times,serif" font-size="11.00">Poly</text>
<text text-anchor="start" x="206.97" y="-633.8" font-family="Times,serif" font-size="9.00">Polymorphism and Higher&#45;Order Functions</text>
</a>
</g>
</g>
<!-- Lists&#45;&gt;Poly -->
<g id="edge3" class="edge">
<title>Lists&#45;&gt;Poly</title>
<path fill="none" stroke="gray" d="M278.94,-701.63C278.28,-693.82 278.09,-684.73 278.38,-676.18"/>
<polygon fill="gray" stroke="gray" points="281.87,-676.35 278.96,-666.16 274.89,-675.94 281.87,-676.35"/>
</g>
<!-- Lists&#45;&gt;Poly -->
<g id="edge21" class="edge">
<title>Lists&#45;&gt;Poly</title>
<path fill="none" stroke="blue" d="M291.06,-701.63C291.72,-693.82 291.91,-684.73 291.62,-676.18"/>
<polygon fill="blue" stroke="blue" points="295.11,-675.94 291.04,-666.16 288.13,-676.35 295.11,-675.94"/>
</g>
<!-- Tactics -->
<g id="node8" class="node">
<title>Tactics</title>
<g id="a_node8"><a xlink:href="Tactics.html" xlink:title="Go to chapter" target="_top">
<polygon fill="lightblue" stroke="black" points="337.5,-588 232.5,-588 232.5,-546 337.5,-546 337.5,-588"/>
<text text-anchor="start" x="270" y="-571.2" font-family="Times,serif" font-size="11.00">Tactics</text>
<text text-anchor="start" x="250.91" y="-555.8" font-family="Times,serif" font-size="9.00">More Basic Tactics</text>
</a>
</g>
</g>
<!-- Poly&#45;&gt;Tactics -->
<g id="edge4" class="edge">
<title>Poly&#45;&gt;Tactics</title>
<path fill="none" stroke="gray" d="M278.94,-623.63C278.28,-615.82 278.09,-606.73 278.38,-598.18"/>
<polygon fill="gray" stroke="gray" points="281.87,-598.35 278.96,-588.16 274.89,-597.94 281.87,-598.35"/>
</g>
<!-- Poly&#45;&gt;Tactics -->
<g id="edge22" class="edge">
<title>Poly&#45;&gt;Tactics</title>
<path fill="none" stroke="blue" d="M291.06,-623.63C291.72,-615.82 291.91,-606.73 291.62,-598.18"/>
<polygon fill="blue" stroke="blue" points="295.11,-597.94 291.04,-588.16 288.13,-598.35 295.11,-597.94"/>
</g>
<!-- Logic -->
<g id="node10" class="node">
<title>Logic</title>
<g id="a_node10"><a xlink:href="Logic.html" xlink:title="Go to chapter" target="_top">
<polygon fill="lightblue" stroke="black" points="326.5,-510 243.5,-510 243.5,-468 326.5,-468 326.5,-510"/>
<text text-anchor="start" x="272.67" y="-493.2" font-family="Times,serif" font-size="11.00">Logic</text>
<text text-anchor="start" x="261.75" y="-477.8" font-family="Times,serif" font-size="9.00">Logic in Rocq</text>
</a>
</g>
</g>
<!-- Tactics&#45;&gt;Logic -->
<g id="edge5" class="edge">
<title>Tactics&#45;&gt;Logic</title>
<path fill="none" stroke="gray" d="M278.94,-545.63C278.28,-537.82 278.09,-528.73 278.38,-520.18"/>
<polygon fill="gray" stroke="gray" points="281.87,-520.35 278.96,-510.16 274.89,-519.94 281.87,-520.35"/>
</g>
<!-- Tactics&#45;&gt;Logic -->
<g id="edge23" class="edge">
<title>Tactics&#45;&gt;Logic</title>
<path fill="none" stroke="blue" d="M291.06,-545.63C291.72,-537.82 291.91,-528.73 291.62,-520.18"/>
<polygon fill="blue" stroke="blue" points="295.11,-519.94 291.04,-510.16 288.13,-520.35 295.11,-519.94"/>
</g>
<!-- IndProp -->
<g id="node9" class="node">
<title>IndProp</title>
<g id="a_node9"><a xlink:href="IndProp.html" xlink:title="Go to chapter" target="_top">
<polygon fill="lightblue" stroke="black" points="362,-432 208,-432 208,-390 362,-390 362,-432"/>
<text text-anchor="start" x="267.28" y="-415.2" font-family="Times,serif" font-size="11.00">IndProp</text>
<text text-anchor="start" x="225.5" y="-399.8" font-family="Times,serif" font-size="9.00">Inductively Defined Propositions</text>
</a>
</g>
</g>
<!-- Maps -->
<g id="node11" class="node">
<title>Maps</title>
<g id="a_node11"><a xlink:href="Maps.html" xlink:title="Go to chapter" target="_top">
<polygon fill="lightblue" stroke="black" points="184.5,-354 67.5,-354 67.5,-312 184.5,-312 184.5,-354"/>
<text text-anchor="start" x="114.28" y="-337.2" font-family="Times,serif" font-size="11.00">Maps</text>
<text text-anchor="start" x="85.7" y="-321.8" font-family="Times,serif" font-size="9.00">Total and Partial Maps</text>
</a>
</g>
</g>
<!-- IndProp&#45;&gt;Maps -->
<g id="edge7" class="edge">
<title>IndProp&#45;&gt;Maps</title>
<path fill="none" stroke="gray" d="M236.77,-389.83C215.92,-380.26 191.7,-368.74 171.21,-358.65"/>
<polygon fill="gray" stroke="gray" points="172.59,-355.43 162.08,-354.13 169.48,-361.7 172.59,-355.43"/>
</g>
<!-- IndProp&#45;&gt;Maps -->
<g id="edge25" class="edge">
<title>IndProp&#45;&gt;Maps</title>
<path fill="none" stroke="blue" d="M248.85,-389.83C229.47,-380.17 205.21,-368.53 183.35,-358.38"/>
<polygon fill="blue" stroke="blue" points="184.7,-355.14 174.15,-354.13 181.76,-361.5 184.7,-355.14"/>
</g>
<!-- ProofObjects -->
<g id="node12" class="node">
<title>ProofObjects</title>
<g id="a_node12"><a xlink:href="ProofObjects.html" xlink:title="Go to chapter" target="_top">
<polygon fill="lightblue" stroke="black" points="367.5,-354 202.5,-354 202.5,-312 367.5,-312 367.5,-354"/>
<text text-anchor="start" x="256.48" y="-337.2" font-family="Times,serif" font-size="11.00">ProofObjects</text>
<text text-anchor="start" x="220.52" y="-321.8" font-family="Times,serif" font-size="9.00">The Curry&#45;Howard Correspondence</text>
</a>
</g>
</g>
<!-- IndProp&#45;&gt;ProofObjects -->
<g id="edge9" class="edge">
<title>IndProp&#45;&gt;ProofObjects</title>
<path fill="none" stroke="gray" d="M285,-389.63C285,-381.82 285,-372.73 285,-364.18"/>
<polygon fill="gray" stroke="gray" points="288.5,-364.16 285,-354.16 281.5,-364.16 288.5,-364.16"/>
</g>
<!-- Rel -->
<g id="node17" class="node">
<title>Rel</title>
<g id="a_node17"><a xlink:href="Rel.html" xlink:title="Go to chapter" target="_top">
<polygon fill="lightblue" stroke="black" points="504,-354 386,-354 386,-312 504,-312 504,-354"/>
<text text-anchor="start" x="437.36" y="-337.2" font-family="Times,serif" font-size="11.00">Rel</text>
<text text-anchor="start" x="403.76" y="-321.8" font-family="Times,serif" font-size="9.00">Properties of Relations</text>
</a>
</g>
</g>
<!-- IndProp&#45;&gt;Rel -->
<g id="edge11" class="edge">
<title>IndProp&#45;&gt;Rel</title>
<path fill="none" stroke="gray" d="M327.46,-389.83C347.7,-380.22 372.09,-368.63 393.39,-358.51"/>
<polygon fill="gray" stroke="gray" points="395.09,-361.58 402.62,-354.13 392.09,-355.26 395.09,-361.58"/>
</g>
<!-- Logic&#45;&gt;IndProp -->
<g id="edge6" class="edge">
<title>Logic&#45;&gt;IndProp</title>
<path fill="none" stroke="gray" d="M278.94,-467.63C278.28,-459.82 278.09,-450.73 278.38,-442.18"/>
<polygon fill="gray" stroke="gray" points="281.87,-442.35 278.96,-432.16 274.89,-441.94 281.87,-442.35"/>
</g>
<!-- Logic&#45;&gt;IndProp -->
<g id="edge24" class="edge">
<title>Logic&#45;&gt;IndProp</title>
<path fill="none" stroke="blue" d="M291.06,-467.63C291.72,-459.82 291.91,-450.73 291.62,-442.18"/>
<polygon fill="blue" stroke="blue" points="295.11,-441.94 291.04,-432.16 288.13,-442.35 295.11,-441.94"/>
</g>
<!-- Imp -->
<g id="node14" class="node">
<title>Imp</title>
<g id="a_node14"><a xlink:href="Imp.html" xlink:title="Go to chapter" target="_top">
<polygon fill="lightblue" stroke="black" points="205.5,-276 66.5,-276 66.5,-234 205.5,-234 205.5,-276"/>
<text text-anchor="start" x="127.64" y="-259.2" font-family="Times,serif" font-size="11.00">Imp</text>
<text text-anchor="start" x="84.76" y="-243.8" font-family="Times,serif" font-size="9.00">Simple Imperative Programs</text>
</a>
</g>
</g>
<!-- Maps&#45;&gt;Imp -->
<g id="edge8" class="edge">
<title>Maps&#45;&gt;Imp</title>
<path fill="none" stroke="gray" d="M122.62,-311.63C122.99,-303.82 124,-294.73 125.41,-286.18"/>
<polygon fill="gray" stroke="gray" points="128.88,-286.64 127.31,-276.16 122.01,-285.33 128.88,-286.64"/>
</g>
<!-- Maps&#45;&gt;Imp -->
<g id="edge26" class="edge">
<title>Maps&#45;&gt;Imp</title>
<path fill="none" stroke="blue" d="M134.74,-311.63C136.43,-303.82 137.81,-294.73 138.65,-286.18"/>
<polygon fill="blue" stroke="blue" points="142.14,-286.39 139.39,-276.16 135.16,-285.88 142.14,-286.39"/>
</g>
<!-- IndPrinciples -->
<g id="node13" class="node">
<title>IndPrinciples</title>
<g id="a_node13"><a xlink:href="IndPrinciples.html" xlink:title="Go to chapter" target="_top">
<polygon fill="lightblue" stroke="black" points="336.5,-276 233.5,-276 233.5,-234 336.5,-234 336.5,-276"/>
<text text-anchor="start" x="256.17" y="-259.2" font-family="Times,serif" font-size="11.00">IndPrinciples</text>
<text text-anchor="start" x="251.76" y="-243.8" font-family="Times,serif" font-size="9.00">More on Induction</text>
</a>
</g>
</g>
<!-- ProofObjects&#45;&gt;IndPrinciples -->
<g id="edge10" class="edge">
<title>ProofObjects&#45;&gt;IndPrinciples</title>
<path fill="none" stroke="gray" d="M285,-311.63C285,-303.82 285,-294.73 285,-286.18"/>
<polygon fill="gray" stroke="gray" points="288.5,-286.16 285,-276.16 281.5,-286.16 288.5,-286.16"/>
</g>
<!-- ImpParser -->
<g id="node15" class="node">
<title>ImpParser</title>
<g id="a_node15"><a xlink:href="ImpParser.html" xlink:title="Go to chapter" target="_top">
<polygon fill="lightblue" stroke="black" points="132,-198 0,-198 0,-156 132,-156 132,-198"/>
<text text-anchor="start" x="43.4" y="-181.2" font-family="Times,serif" font-size="11.00">ImpParser</text>
<text text-anchor="start" x="17.75" y="-165.8" font-family="Times,serif" font-size="9.00">Lexing and Parsing in Rocq</text>
</a>
</g>
</g>
<!-- Imp&#45;&gt;ImpParser -->
<g id="edge15" class="edge">
<title>Imp&#45;&gt;ImpParser</title>
<path fill="none" stroke="gray" d="M117.24,-233.63C109.33,-225.05 100,-214.91 91.48,-205.67"/>
<polygon fill="gray" stroke="gray" points="93.92,-203.14 84.57,-198.16 88.77,-207.89 93.92,-203.14"/>
</g>
<!-- ImpCEvalFun -->
<g id="node16" class="node">
<title>ImpCEvalFun</title>
<g id="a_node16"><a xlink:href="ImpCEvalFun.html" xlink:title="Go to chapter" target="_top">
<polygon fill="lightblue" stroke="black" points="301.5,-198 150.5,-198 150.5,-156 301.5,-156 301.5,-198"/>
<text text-anchor="start" x="195.33" y="-181.2" font-family="Times,serif" font-size="11.00">ImpCEvalFun</text>
<text text-anchor="start" x="168.51" y="-165.8" font-family="Times,serif" font-size="9.00">An Evaluation Function for Imp</text>
</a>
</g>
</g>
<!-- Imp&#45;&gt;ImpCEvalFun -->
<g id="edge17" class="edge">
<title>Imp&#45;&gt;ImpCEvalFun</title>
<path fill="none" stroke="gray" d="M160.12,-233.63C170.59,-224.79 183.01,-214.3 194.22,-204.84"/>
<polygon fill="gray" stroke="gray" points="196.74,-207.29 202.13,-198.16 192.23,-201.94 196.74,-207.29"/>
</g>
<!-- Auto -->
<g id="node19" class="node">
<title>Auto</title>
<g id="a_node19"><a xlink:href="Auto.html" xlink:title="Go to chapter" target="_top">
<polygon fill="lightblue" stroke="black" points="420,-198 320,-198 320,-156 420,-156 420,-198"/>
<text text-anchor="start" x="359" y="-181.2" font-family="Times,serif" font-size="11.00">Auto</text>
<text text-anchor="start" x="337.88" y="-165.8" font-family="Times,serif" font-size="9.00">More Automation</text>
</a>
</g>
</g>
<!-- Imp&#45;&gt;Auto -->
<g id="edge12" class="edge">
<title>Imp&#45;&gt;Auto</title>
<path fill="none" stroke="gray" d="M191.64,-233.91C223.9,-223.36 265.19,-210.13 302,-198 304.7,-197.11 307.46,-196.2 310.26,-195.27"/>
<polygon fill="gray" stroke="gray" points="311.47,-198.55 319.86,-192.08 309.27,-191.91 311.47,-198.55"/>
</g>
<!-- Imp&#45;&gt;Auto -->
<g id="edge27" class="edge">
<title>Imp&#45;&gt;Auto</title>
<path fill="none" stroke="blue" d="M205.61,-234.56C237.1,-224.86 274.93,-212.78 309.81,-201.35"/>
<polygon fill="blue" stroke="blue" points="311.15,-204.59 319.56,-198.15 308.97,-197.94 311.15,-204.59"/>
</g>
<!-- Extraction -->
<g id="node20" class="node">
<title>Extraction</title>
<g id="a_node20"><a xlink:href="Extraction.html" xlink:title="Go to chapter" target="_top">
<polygon fill="lightblue" stroke="black" points="128.5,-120 3.5,-120 3.5,-78 128.5,-78 128.5,-120"/>
<text text-anchor="start" x="43.59" y="-103.2" font-family="Times,serif" font-size="11.00">Extraction</text>
<text text-anchor="start" x="21.55" y="-87.8" font-family="Times,serif" font-size="9.00">Extracting ML From Stdlib</text>
</a>
</g>
</g>
<!-- ImpParser&#45;&gt;Extraction -->
<g id="edge16" class="edge">
<title>ImpParser&#45;&gt;Extraction</title>
<path fill="none" stroke="gray" d="M66,-155.63C66,-147.82 66,-138.73 66,-130.18"/>
<polygon fill="gray" stroke="gray" points="69.5,-130.16 66,-120.16 62.5,-130.16 69.5,-130.16"/>
</g>
<!-- Auto&#45;&gt;Postscript -->
<g id="edge13" class="edge">
<title>Auto&#45;&gt;Postscript</title>
<path fill="none" stroke="gray" d="M363.94,-155.63C363.21,-146.94 363.06,-136.67 363.49,-127.34"/>
<polygon fill="gray" stroke="gray" points="366.99,-127.56 364.23,-117.33 360,-127.04 366.99,-127.56"/>
</g>
<!-- Auto&#45;&gt;Postscript -->
<g id="edge28" class="edge">
<title>Auto&#45;&gt;Postscript</title>
<path fill="none" stroke="blue" d="M376.06,-155.63C376.79,-146.94 376.94,-136.67 376.51,-127.34"/>
<polygon fill="blue" stroke="blue" points="380,-127.04 375.77,-117.33 373.01,-127.56 380,-127.04"/>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 19 KiB

37
impdriver.ml Normal file
View File

@@ -0,0 +1,37 @@
open Imp
let explode s =
let rec exp i l =
if i < 0 then l else exp (i - 1) (s.[i] :: l) in
exp (String.length s - 1) [];;
let test s =
print_newline();
print_endline ("Propgram: " ^ s);
let parse_res = parse (explode s) in
(match parse_res with
NoneE _ -> print_endline ("Syntax error");
| SomeE c ->
let fuel = 1000 in
match (ceval_step empty_st c fuel) with
None ->
print_endline
("Still running after " ^ string_of_int fuel ^ " steps")
| Some res ->
print_endline (
"Result: ["
^ string_of_int (res ['w']) ^ " "
^ string_of_int (res ['x']) ^ " "
^ string_of_int (res ['y']) ^ " "
^ string_of_int (res ['z']) ^ " ...]"))
;;
test "x:=1 ; y:=2";;
test "true";; (* syntax error *)
test "skip";;
test "skip;skip";;
test "while true do skip end";;
test "x:=3";;
test "x:=3; while 0<=x do skip end";;
test "x:=3; while 1<=x do y:=y+1; x:=x-1 end";;

14
shell.nix Normal file
View File

@@ -0,0 +1,14 @@
{ pkgs ? import <nixpkgs-unstable> {} }:
pkgs.mkShell {
packages = with pkgs; [
# coq_8_20
coq
coqPackages.stdlib
# coqPackages.vscoq-language-server
];
shellHook = ''
export ROCQPATH="$COQPATH"
unset COQPATH
'';
}