Verified SAT Solver

Logic Verified Software

A verified tautology checker in the style of Boyer-Moore '79 — both soundness and completeness proved, with ordinal-based termination for the if-normalization algorithm.


Overview

This case study presents a complete, verified propositional logic decision procedure: a tautology checker that normalizes formulas into an if-normal form (unordered BDD-style) and then checks validity by case-splitting on variables. Both directions of correctness are established:

  • Soundness: if the checker says "tautology", the formula is true under every assignment
  • Completeness: if the checker says "not a tautology", a concrete falsifying assignment is produced

This follows the classic Boyer-Moore '79 approach, updated for ImandraX's modern tactic language. The complete development is self-contained IML you can paste into ImandraX.


The Model

Formulas

type formula =
  | True
  | False
  | Var of int
  | If of formula * formula * formula

Termination Measures

The normalizer may increase formula size (the transformation duplicates subformulas), so a simple structural measure doesn't work. Instead, we use an ordinal pair combining if-complexity and if-depth:

let rec if_depth (f : formula) =
  match f with
  | If (c, _, _) -> 1 + if_depth c
  | _ -> 0
 
lemma if_depth_psd f =
 (if_depth f) [@trigger] >= 0
[@@by auto] [@@fc]
 
let rec if_complexity (f : formula) =
  match f with
  | If (c, l, r) ->
    (if_complexity c) * (if_complexity l + if_complexity r)
  | _ -> 1
 
lemma arith_helper_0 x y =
  (x > 0 && y > 0) ==> x * y > 0
 
lemma arith_helper_1 x y z =
  x > 0 && y > 0 && z > 0 ==> x * (y + z) > 0
[@@by [%use arith_helper_0 x (y+z)] @> auto] [@@fc]
 
lemma arith_helper x y z =
  x * (y + z) <= 0 ==> x <= 0 || y <= 0 || z <= 0
[@@by auto] [@@fc]
 
lemma if_complexity_pos f =
  (if_complexity f) [@trigger] > 0
[@@by induct ~otf:true ()] [@@fc]
 
let n_measure (f : formula) =
  let o1 = Ordinal.of_int (if_complexity f) in
  let o2 = Ordinal.of_int (if_depth f) in
  Ordinal.pair o1 o2

The key insight:

  • When we push an If out of the condition position, if_depth decreases (the new condition is simpler)
  • When we recurse into branches, if_complexity may decrease while if_depth stays the same
  • The lexicographic ordering of (complexity, depth) always decreases

If-Normalization

The key insight: any formula can be normalized so that If nodes never appear in the condition position of another If. This is achieved by pushing nested Ifs outward:

If (If (p, q, r), left, right)
  =
If (p, If (q, left, right), If (r, left, right))

Supporting lemmas for the termination proof:

lemma n_measure_valid f =
  Ordinal.is_valid (n_measure f)
[@@by auto] [@@rw]
 
lemma elim_n_measure f g =
  Ordinal.(n_measure f << n_measure g)
= (if_complexity f < if_complexity g
   || (if_complexity f = if_complexity g
       && if_depth f < if_depth g))
[@@by auto] [@@rw]
 
[@@@disable n_measure]
[@@@disable Ordinal.is_valid]

The normalizer itself:

let rec normalise (f : formula) =
  match f with
    If (If (p, q, r), left, right) ->
    normalise
      (If (p,
           If (q, left, right),
           If (r, left, right)))
  | If (c, left, right) ->
    If (c, normalise left, normalise right)
  | _ -> f
[@@measure n_measure f]
[@@by auto]

Evaluation and Bindings

type binding = { var_id : int;
                 value  : bool }
 
let rec look_up var_id bindings =
  match bindings with
  | [] -> false
  | b :: bs ->
     if b.var_id = var_id then b.value
     else look_up var_id bs
 
let rec eval_ f bindings =
  match f with
  | Var n -> look_up n bindings
  | If (c,l,r) ->
     if eval_ c bindings then
       eval_ l bindings
     else
       eval_ r bindings
  | True -> true
  | False -> false
 
let rec is_assigned var_id bindings =
  match bindings with
  | [] -> false
  | b :: bs ->
     b.var_id = var_id
     || is_assigned var_id bs
 
let assume_true var_id bindings =
  let b = { var_id = var_id;
            value  = true }
  in
  b :: bindings
 
let assume_false var_id bindings =
  let b = { var_id = var_id;
            value  = false }
  in
  b :: bindings

Tautology Checker

The main checker for if-normal terms — case-splits on variables and checks all branches:

let rec is_tautology' f bs =
  match f with
  | True -> true
  | False -> false
  | Var _ -> eval_ f bs
  | If (Var n, l, r) ->
     if is_assigned n bs then
       if eval_ (Var n) bs then
         is_tautology' l bs
       else is_tautology' r bs
     else
       if is_tautology' l (assume_true n bs)
       then is_tautology' r (assume_false n bs)
       else false
  | If (True, l, _) ->
     is_tautology' l bs
  | If (False, _, r) ->
     is_tautology' r bs
  | _ -> false
 
let is_tautology f =
  is_tautology' (normalise f) []

Normal Form Predicate

let rec is_normal f =
  match f with
  | If (If _, _, _) -> false
  | If (_, p, q) ->
    if is_normal p then is_normal q else false
  | _ -> true

Verification: Soundness

Binding Stability Lemmas

These establish that extending a binding environment doesn't affect existing lookups:

theorem look_up_stable var_id a b =
  look_up var_id (a @ b)
  =
    if is_assigned var_id a then
      look_up var_id a
    else
      look_up var_id b
[@@by auto] [@@rw]
 
lemma true_is_assigned x a =
  look_up x a ==> is_assigned x a
[@@by auto] [@@rw]
 
lemma eval_stable f var_id value bs =
  value = look_up var_id bs
  ==>
  eval_ f ({ var_id; value } :: bs)
   = eval_ f bs
[@@by auto] [@@rw]

Key Soundness Lemma

If a formula is normal and passes the tautology check under bindings b, then it evaluates to true under any extension b @ a:

theorem tautology_implies_eval f a b =
  is_normal f
  && is_tautology' f b
    ==>
   eval_ f (b @ a)
[@@by auto] [@@rw] [@@timeout 60]

Normalization Correctness

Three properties of the normalizer — it produces normal forms, is idempotent on normal forms, and preserves truth:

theorem normalise_correct f =
  is_normal (normalise f)
[@@by auto] [@@rw]
 
theorem normalise_same f =
  is_normal f ==> normalise f = f
[@@by auto] [@@rw]
 
theorem normalise_respects_truth f bs =
  eval_ (normalise f) bs = eval_ f bs
[@@by auto] [@@rw]

Final Soundness Theorem

theorem tautology_theorem f bindings =
  is_tautology f
   ==>
  eval_ f bindings
[@@by [%use tautology_implies_eval (normalise f) bindings []]
   @> auto]

If the checker says "tautology", the formula evaluates to true under every binding environment.


Verification: Completeness

Completeness requires constructing a concrete falsifying assignment.

The Falsifier

The falsify' function mirrors the tautology checker but builds a witness assignment instead of returning a boolean:

type falsify_result = {
  falsified : bool;
  bindings  : binding list;
}
 
let rec falsify' f bs : falsify_result =
  match f with
  | True -> {falsified = false; bindings = []}
  | False -> {falsified = true; bindings = bs}
  | Var i ->
     if is_assigned i bs
     then if eval_ (Var i) bs then
         {falsified = false; bindings = []}
       else {falsified = true; bindings = bs}
     else
      {falsified = true; bindings = ({ var_id = i; value = false } :: bs)}
  | If (Var i, l, r) ->
     if is_assigned i bs then
       if eval_ (Var i) bs then
         falsify' l bs
       else falsify' r bs
     else
       let res_l = falsify' l (assume_true i bs) in
       begin match res_l with
         | {falsified=false;_} -> falsify' r (assume_false i bs)
         | _ -> res_l
       end
  | If (True, l, _) ->
     falsify' l bs
  | If (False, _, r) ->
     falsify' r bs
  | _ -> {falsified=false; bindings = []}
 
let falsify f =
  falsify' (normalise f) []

Non-Tautologies Are Falsified

If the checker rejects a normal formula, the falsifier produces a witness:

theorem non_taut_is_falsified f bs =
  is_normal f
  && not (is_tautology' f bs)
   ==>
  (falsify' f bs).falsified = true
[@@by induct ~on_fun:[%id is_tautology'] ()]
[@@rw]

Binding Extension Lemma

The falsifier extends bindings conservatively:

lemma falsify_extends_bindings f bs i =
   is_assigned i bs
    && (falsify' f bs).falsified
  ==>
   look_up i (falsify' f bs).bindings
    = look_up i bs
[@@by auto] [@@rw]

The Custom Induction Scheme for Completeness

The falsify_falsifies lemma — proving that the falsifying assignment actually falsifies the formula — requires a custom induction scheme that merges the recursion patterns of both falsify' and eval_:

let rec ff_ind_hint x bs =
  match x with
  | If (Var i as c, a, b) ->
     if eval_ c (falsify' x bs).bindings then
       ff_ind_hint c bs
       && ff_ind_hint a bs
       && ff_ind_hint a (assume_true i bs)
     else
       ff_ind_hint c bs
       && ff_ind_hint b bs
       && ff_ind_hint b (assume_false i bs)
  | If (True, a, _) ->
     ff_ind_hint a bs
  | If (False, _, b) ->
     ff_ind_hint b bs
  | _ -> true

This scheme branches on the evaluated truth value of the condition under the falsifying assignment — ensuring the inductive hypothesis is available for both the tautology check path and the evaluation path simultaneously.

Falsification Is Correct

lemma falsify_falsifies x bs =
  is_normal x
  && (falsify' x bs).falsified
   ==>
   eval_ x (falsify' x bs).bindings = false
[@@by induct ~on_fun:[%id ff_ind_hint] ()]
[@@disable look_up]
[@@rw] [@@timeout 120]

Final Completeness Theorem

lemma completeness_helper f =
  not (is_tautology f)
    ==>
  eval_ (normalise f) (falsify f).bindings = false
[@@by [%use falsify_falsifies (normalise f) []]
   @> [%use non_taut_is_falsified (normalise f) []]
   @> auto]
[@@disable normalise_respects_truth, is_normal, normalise]
 
theorem completeness f =
  not (is_tautology f)
    ==>
  eval_ f (falsify f).bindings = false
[@@by [%use completeness_helper f] @> auto]
[@@disable is_normal, normalise, eval_, falsify, is_tautology]

If the checker rejects a formula, the falsify function produces concrete bindings that make the formula evaluate to false.


Results

This is a substantial formal verification effort:

  • Ordinal termination — the normalizer uses Ordinal.pair to prove termination of a potentially size-increasing transformation
  • Two custom induction schemes — one following is_tautology', another merging falsify' and eval_
  • Forward chaining and rewriting — extensive use of [@@fc] and [@@rw] annotations to guide the prover
  • Complete decision procedure — the verified checker is a correct decision procedure for propositional tautologies

The proof establishes that the tautology checker is a trusted kernel — any formula it accepts is genuinely valid, and any formula it rejects has a concrete counterexample.


Soundness + Completeness

Both directions proved — the checker never gives wrong answers in either direction.

Ordinal Termination

Non-trivial termination argument using ordinal pairs for a potentially size-increasing normalization.

Classic Boyer-Moore

A faithful formalization of the Boyer-Moore '79 tautology checker — updated for ImandraX's modern tactic language.


References

  • Source: tautology.iml
  • Boyer, R.S. and Moore, J S. — A Computational Logic (1979)