%\documentclass[a4paper,12pt,openany,reqno,draft]{amsbook}
\documentclass[a4paper,12pt,openany,reqno]{amsbook}
%\documentclass[a4paper,openany,reqno,twoside]{amsbook}
%\usepackage{showlabels}
%\usepackage{layout}
\title{Introduction to Model Theory\\ 
and Mathematical Logic}
\author{David Pierce}
\date{\today}
\address{Mathematics Department, Middle East Technical University,
  Ankara 06531, Turkey} 
\email{dpierce@metu.edu.tr}

\usepackage[headings]{fullpage}
\usepackage[dvips]{graphicx}  % for the cover photo and the German script
\usepackage{verbatim}     % for the comment environment
\usepackage{paralist}     % for the list of ``fnsymbol''s.

\usepackage[polutonikogreek,english]{babel}  % this seems to cause
				% with the labelling of appendices as
				% such in the table of contents; see
				% comments below
\usepackage[sffont=cmr,oxonia]{psgreek}

\usepackage{upgreek}  % for \upomega for the von Neumann natural numbers
\usepackage{stmaryrd} % for \binampersand

\usepackage{textcomp}  % supposedly useful with \oldstylenums
\usepackage{hfoldsty} % this didn't work until I added missing
		      % brackets to some of the files.

%\use
%\usepackage[cmr]{psgreek}
%\usepackage{psgreek}
\usepackage[all]{xy}
%\pagestyle{headings}
%\usepackage{amsrefs}  I don't know why this does not work.

\usepackage{amssymb,amsmath,amsthm}
\usepackage{amscd}     % commutative diagram
\usepackage[mathscr]{euscript}
\usepackage{url}

\usepackage{pstricks,pst-node,pst-tree,pst-plot}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%   Theorem-like environments
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\newtheorem{theorem}{Theorem}[section]
%\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{lemma}{Lemma}[section]
\newtheorem{corollary}{Corollary}
\newtheorem*{corollary*}{Corollary}
\newtheorem{porism}[theorem]{Porism}
\newtheorem*{porism*}{Porism}
\newtheorem{notation}[theorem]{Notational Convention}

\theoremstyle{definition}
%\newtheorem{definition}[theorem]{Definition}
\newtheorem*{definition}{Definition}

%\newtheorem{example}[theorem]{Example}
%\newtheorem*{example*}{Example}
%\newtheorem{examples}[theorem]{Examples}
%\newtheorem*{examples*}{Examples}

\theoremstyle{remark}
\newtheorem{remark}[theorem]{Remark}


\begin{comment}



\newenvironment{exercises}[1][]%
  {\subsection*{Exercises}{#1}\begin{enumerate}}%
  {\end{enumerate}}

\newenvironment{exercise}%
  {\subsection*{Exercise}}%
  {}



\end{comment}

\newtheorem{xca}{Exercise}[chapter]

%\newcommand{\exercise}{\textbf{exercise}}
\newcommand{\exercise}{exercise}

\numberwithin{section}{chapter}
\numberwithin{equation}{section}
\renewcommand{\theequation}{\fnsymbol{equation}}
%\renewcommand{\theenumi}{\roman{enumi}}
%\renewcommand{\theenumi}{\alph{enumi}}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%                             %
%  Abbreviations              %
%                             %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

%\input{../../../../TeX/abbreviations}



\renewcommand{\leq}{\leqslant}
\renewcommand{\geq}{\geqslant}
\renewcommand{\setminus}{\smallsetminus}
\renewcommand{\emptyset}{\varnothing}
\renewcommand{\epsilon}{\varepsilon}

\newcommand{\defn}[2]{\index{#1}\textbf{#1#2}}
\newcommand{\defnplain}[2]{\textbf{#1#2}}
\newcommand{\tech}[2]{\index{#1}\textsl{#1#2}}
\newcommand{\techplain}[2]{\textsl{#1#2}}

% Set theory

\newcommand{\included}{\subseteq}      % [the name suggests the meaning here]
\newcommand{\pincluded}{\subset}       %  proper inclusion
\newcommand{\fincluded}{\included_\mathrm{f}}  % is a finite subset of

\newcommand{\size}[1]{\lvert#1\rvert}  % cardinality
\newcommand{\class}[1]{\mathbf{#1}}    % class
\newcommand{\on}{\class{ON}}           % class of orrdinals
\newcommand{\cn}{\class{CN}}           % class of cardinals
\newcommand{\family}[1]{\mathcal{#1}}  % family (of sets)
\newcommand{\To}{\longrightarrow}
\newcommand{\setim}[1][]{''{#1}} % image of set under preceding function 
\newcommand{\setimb}[1]{[#1]} % image of set under preceding function 
\newcommand{\inv}{^{-1}}                % inverse
\newcommand{\id}{\operatorname{id}}          % identity-map

\newcommand{\comp}{^{\mathrm{c}}}      % set-theoretic complement
\newcommand{\pow}[1]{\mathscr{P}(#1)}  % power set

\newcommand{\dom}[1]{\operatorname{dom}(#1)}  % domain of relation
\newcommand{\conv}[1]{\breve{#1}}       % converse of a relation





\newcommand{\gpgen}[1]{\left\langle#1\right\rangle}% subgroup generated by #1

\newcommand{\scr}{\operatorname s}     % successor (for natural numbers)
%\newcommand{\scr}{\mathrm s}     % successor (for natural numbers)
\newcommand{\sscr}{^{\scr}}      % superscript successor

\newcommand{\pred}[1]{\operatorname{pred}(#1)}

% Symbolic logic

\DeclareMathOperator{\arity}{arity}    %
\newcommand{\qsep}{\;}                 % follows a quantified variable
\newcommand{\Forall}[1]{\forall{#1}\qsep }
\newcommand{\Exists}[1]{\exists{#1}\qsep }
\newcommand{\Existsgeq}[2]{\exists^{{}\geq #1}{#2}\qsep }
\newcommand{\Existsgeqn}[1]{\Existsgeq n{#1}}
\newcommand{\Existsgeqk}[1]{\Existsgeq k{#1}}
\newcommand{\Existsleq}[2]{\exists^{{}\leq #1}{#2}\qsep }
\newcommand{\Existsleqn}[1]{\Existsleq n{#1}}
\newcommand{\Existseq}[2]{\exists^{{}= #1}{#2}\qsep }
\newcommand{\existsunique}{\exists!}
\newcommand{\Existsunique}[1]{\exists!\,{#1}\qsep }
\newcommand{\Frall}[2]{(\forall#1\in#2)\qsep }
\newcommand{\Exsts}[2]{(\exists#1\in#2)\qsep }
\newcommand{\Iff}{}
\let\Iff\iff
\renewcommand{\iff}{\Leftrightarrow}
\newcommand{\lto}{\Rightarrow}
\newcommand{\eor}{\nLeftrightarrow}  % exclusive or
\newcommand{\amp}{\;{}\mathbin{\&}{}\;}
%\newcommand{\Enot}{\mathsf{not-}}
\newcommand{\Enot}{\text{\sf not-}}
%\renewcommand{\land}{\mathrel{\wedge}} %(otherwise the spacing is wrong)
\renewcommand{\land}{\mathrel{\binampersand}}
\newcommand{\lno}{{\lnot}}
\newcommand{\shstroke}{\mathrel{|}}    % Sheffer stroke
\newcommand{\psys}[1]{\mathcal{#1}}    % proof system
\newcommand{\proves}[1][]{\vdash_{\psys{#1}}}
\newcommand{\nproves}[1][]{\nvdash_{\psys{#1}}}
\newcommand{\fcom}{,\;}                % comma in list of formulas

\newcommand{\false}{\bot}              %
\newcommand{\true}{\top}               %
\newcommand{\False}{\mathsf F}
\newcommand{\True}{\mathsf T}
\newcommand{\Bcon}{\mathrel{\square}}  % arbitrary bin. Bool. connective 
\newcommand{\quant}{\mathsf Q}         % arbitrary quantifier
\newcommand{\Quant}[1]{\quant{#1}\qsep } % 
\newcommand{\Quanti}[2]{\quant_{#1}{#2}\qsep } % indexed arb. quant.
\newcommand{\syntactic}[1]{\mathbf{#1}} % for symbols of the syntax language
\newcommand{\sF}{\syntactic F}
\newcommand{\sG}{\syntactic G}
\newcommand{\sH}{\syntactic H}
\newcommand{\sK}{\syntactic K}
\newcommand{\sP}{\syntactic P}
\newcommand{\sQ}{\syntactic Q}
\newcommand{\sR}{\syntactic R}
\newcommand{\sA}{\syntactic A}
\newcommand{\sB}{\syntactic B}
\newcommand{\sX}{\syntactic X}

\newcommand{\ngt}{\operatorname{neg}}  % negation
\newcommand{\imp}{\operatorname{imp}}  % implication
\newcommand{\conj}{\operatorname{conj}}  % conjugation

% Model theory

\newcommand{\PFm}{\mathrm{PF}}  % set of propositional formulas
\newcommand{\PVar}{V}          % set of propositional variables
\newcommand{\interpretation}{\mathscr I}% interpretation (part of a
    %structure)
\newcommand{\Tm}[2]{\operatorname{Tm}^{#1}(#2)} % terms
\newcommand{\Tmol}{\Tm{0}{\lang}} % terms
%\newcommand{\Fm}[1]{\mathrm{Fm}^{#1}} % #1-ary formulas
%\newcommand{\Fm}[2]{\operatorname{Fm}^{#1}(#2)} % #1-ary formulas
\newcommand{\Fm}[2]{\operatorname{Fm}^{#1}_{#2}} % #1-ary formulas
%\newcommand{\Fmo}[1]{\mathrm{Fm}^{#1}_0} % #1-ary basic formulas
%\renewcommand{\Fm}[1]{\operatorname{Fm}^{#1}}
%\newcommand{\Fmla}{\operatorname{Fm}_{\lang}}
\newcommand{\Fmla}{\Fm{}{\lang}}


\newcommand{\Sn}[1][\lang]{\operatorname{Sn}_{#1}} % sentences
\newcommand{\fv}[1]{\operatorname{fv}(#1)}  % set of free variables in #1
\newcommand{\Cn}[2][\lang]{\operatorname{Con}_{#1}(#2)} % logical consequences
\newcommand{\conseq}[1]{\operatorname{Con}(#1)} % logical consequences
\newcommand{\LT}[2]{\operatorname{B}_{#1}(#2)}  % Lindenbaum--Tarski algebra
\DeclareMathOperator{\stone}{S}
\newcommand{\ts}[2]{\stone_{#1}(#2)}   % type-space
\newcommand{\tp}[2]{\operatorname{tp}_{#1}(#2)}  % complete type
\renewcommand{\models}{\vDash}   % for parallelism with the following:
\newcommand{\nmodels}{\nvDash}
\newcommand{\acl}[1]{\operatorname{acl}(#1)}  % algebraic closure
\newcommand{\dcl}[1]{\operatorname{dcl}(#1)}  % definable closure
\newcommand{\cl}[1]{\operatorname{cl}(#1)}   % closure

%\newcommand{\free}{\mathbin{\underset{\textstyle\smash\smallsmile}{\smash\shortmid}}}

%\newcommand{\free}{\mathbin%
%    {\thickspace\mid\negthickspace\negthickspace\negthinspace\cup}} 

\newcommand{\free}{\mathbin{\text{\makebox[0.8em]{\makebox[0pt]{$\mid$}\makebox[0pt]{$\cup$}}}}}



\newcommand{\lang}{\mathcal{L}}        % a language or signature
\newcommand{\eq}{^\mathrm{eq}}         % 
\DeclareMathOperator{\theory}{Th}      %
\newcommand{\Th}[1]{\theory(#1)}       % the theory of...
\DeclareMathOperator{\modelclass}{Mod}
\newcommand{\Mod}[2][\lang]{\modelclass_{#1}(#2)}  % the class of models of...
%\newcommand{\Mod}[1]{\modelclass^{\included}(#1)}  % the category of
				% models with embeddings
%\newcommand{\Model}[1]{\modelclass^{\elsub}(#1)}  % the category of
				% models with elementary embeddings

\newcommand{\diag}[1]{\operatorname{diag}(#1)}      % (Robinson) diagram
\newcommand{\eldiag}[1]{\Th{\str{#1}_{#1}}}
\newcommand{\str}[1]{\mathfrak{#1}}     % structure
\newcommand{\Substr}[1]{\mathrm{Ss}(#1)} % set of substructures
\newcommand{\elsub}{\preccurlyeq}      % is an elementary substructure of
%\newcommand{\elsubstr}{\preccurlyeq}   

%  ``Standard'' theories:

\newcommand{\thy}[1]{\mathrm{#1}}      % for the following:
\newcommand{\TO}{\thy{TO}}             % total orders (yes, same as linear)
\newcommand{\ACF}{\thy{ACF}}           % algebraically closed fields
\newcommand{\It}{\thy{Itr}}             % iterative structures...


% Standard structures
\newcommand{\vnn}{\upomega}
\newcommand{\stnd}[1]{\mathbb{#1}}
\newcommand{\N}{\stnd N}          % naturals
\newcommand{\Q}{\stnd{Q}}         % rationals
\newcommand{\R}{\stnd{R}}         % reals
\newcommand{\F}{\stnd{F}}         % a field (probably finite)
\newcommand{\Z}{\stnd{Z}}         % integers
\newcommand{\C}{\stnd{C}}         % complex numbers
\newcommand{\B}{\stnd{B}}         % the 2-element Boolean algebra
\newcommand{\Char}[1]{\operatorname{char}(#1)}       % characteristic



\newcommand{\tuple}[1]{\vec{#1}\,}
\newcommand{\length}[1]{\operatorname{\ell n}(#1)}  % length of a string
\newcommand{\concat}{\mathbin{\widehat{\ }}}
\newcommand{\ord}[1]{\operatorname{ord}(#1)}

%\newcommand{\bref}[1]{\textbf{\ref{#1}}}
\newcommand{\varble}{\mathsf x}
\renewcommand{\phi}{\varphi}


\newcommand{\divides}{\mathrel{\mid}}

\newcommand{\Aut}[1]{\operatorname{Aut}(#1)}

%\newcommand{\Eng}[1]{\textsf{#1}}
%\newcommand{\Eng}[1]{\textbf{\textsl{#1}}}
\newcommand{\Eng}[1]{\emph{#1}}
\newcommand{\Lat}[1]{\textsc{#1}} 
\newcommand{\Tur}[1]{\textsf{#1}} % for words in Turkish
\newcommand{\lett}[1]{\textsf{#1}}

\newcommand{\named}[1]{\widehat{#1}}

\newcommand{\Gk}[1]{\selectlanguage{polutonikogreek}#1\selectlanguage{english}}
%\newcommand{\Gkemph}[1]{\textbf{\textsf{#1}}}
\newcommand{\Gkemph}[1]{\underline{\rule[-0.45ex]{0ex}{1.5ex}#1}}

%\newcommand{\propositional}{propositional}  % I decided to call
				% Boolean connectives propositional


\makeindex

\begin{document}
\mainmatter
\thispagestyle{empty}
%\maketitle
\begin{center}
{\Huge Recursion and Induction}
\vfill
{\LARGE Notes on
Mathematical Logic\\
and Model Theory}
\vfill
{\Large David Pierce}
\vfill
\today
\vfill
\includegraphics[width=300pt,height=400pt]{priene.eps}

{\small Columns in the Ionic order, Priene, Ionia (S\"oke, Ayd\i n, Turkey)}

\vfill
\vfill
\end{center}


 \tableofcontents

\chapter*{Preface}
%\vfill
\begin{quotation}
  The beginner \emph{has in his head a definition of the science}; a
  childish definition perhaps, but still a definition; of the
  science's subject-matter he has \emph{no definition at all.}

Only the hope of a definition.  `I don't know what life, is, but I
hope I shall when I have studied physiology for long enough.'

`That is true for a beginner in physiology; but for a master in
physiology the reverse is true; a master in physiology has found out
all that it can tell him and knows what life is.  A beginner in
physiology does not; for him physiology is definable and life as yet,
except in the language of hope, indefinable.'

A man ceases to be a beginner in any given science and becomes a
master in that science when he has learned that \emph{this expected
  reversal is never going to happen} and that he is going to be a
beginner all his life.

%\mbox{}

\mbox{}\hfill ---R. G. Collingwood, \emph{The New Leviathan}
\cite[1.43--46]{C-NL} 
\end{quotation}

%\mbox{}
%\vfill
These notes are for use in a course called Introduction to
Mathematical Logic and 
Model Theory, Math 406, given at METU in the fall of 2008.  The notes are
based on the 
notes I prepared while teaching the course in the fall of 2004.  But I have
made many changes and additions.  

The title of these notes refers to the methods of 
\techplain{recursion}{}%
\index{recurs!---ion}
and 
\techplain{induction}.%
\index{inducti!---on, proof by ---on}  I 
have become increasingly aware of how these two methods are confused.
In these notes, recursion is a method of \emph{definition;} induction
is a method of \emph{proof.}  If a set is defined by recursion, then
properties of elements of the set can be proved by induction.
However, it does not then necessarily follow that functions \emph{on}
the set can be defined by recursion.  Logic provides examples of
this phenomenon and a way to understand it.

These notes are intended as a supplement to the
classroom experience, and not for independent study.  I say this
because the notes may not give full explanations of some matters; they
may give too much explanation of other matters; and they may have
mistakes and other features to be changed during the course.  
Various
examples and topics are left as exercises: investigation of some of
these will depend on the interest of the student.

%\newpage

I first learned logic from David Kueker and Chris Laskowski, and from
the notes that I took in their courses and that I still consult today.
Another influence on these notes is the book \cite{MR18:631a} of
Alonzo Church: as a student, I obtained a leftover display copy of
this at a meeting of the Association for Symbolic Logic.
References for current
model theory include Hodges \cite{MR94e:03002}, Marker
\cite{MR1924282}, and Rothmaler \cite{MR1800596}; but this is not a
complete list of the books consulted in the preparation of these
notes.  Shoenfield \cite[p.~iv]{MR1809685} is a good source---though
dated---for mathematical logic in general.  Concerning his practice of
attribution, he writes
\begin{quotation}
  I have made no attempt to credit each result to its author; the
  names attached to the principal theorems are there simply to give
  the reader some idea of the people who have created the subject.  I
  have also omitted all bibliographical references.
\end{quotation}
My practice is not so extreme.
Most of what is in these notes has been worked out only since
the late 1800s, so it is possible to track down the original sources.
I have done this in a few cases.  For sources in the other cases,
especially in model theory, Hodges \cite{MR94e:03002} would be the
place to look. 
 
%\vfill
\section*{Conventions}


The lemma called Lemma~5.3.2, for example, is the second
lemma in \S\ref{sect:implications} (namely, Section Three of
Chapter~Five).  Displayed expressions that will be
referred to later are
labelled from the sequence 
\begin{center}
\begin{inparaenum}\renewcommand{\theenumi}{\fnsymbol{enumi}}
  \item\mbox{}
  \item\mbox{}
  \item\mbox{}
  \item\mbox{}
  \item\mbox{}
  \item\mbox{}
  \item\mbox{}
  \item\mbox{}
  \item\mbox{}
\end{inparaenum}  
\end{center}
But the labels repeat.  Hence a reference to
($*$) is a reference to the \emph{last} displayed expression labelled
as ($*$).

Proofs begin with the word \emph{Proof} and end with a box
%$\qedsymbol$.  
$\square$.
If there is no proof given, then supplying it is an
exercise.  Other exercises are indicated in the text; these are
repeated, and more are added, at the ends of chapters.

I also put technical terms in \defnplain{boldface}{} when they are being
defined (perhaps only implicitly).  If they are only being emphasized
for some other reason, then they may be \techplain{slanted}.  
All such terms are listed in the index at the back.
Throughout the text, ordinary \emph{italics} and `quotation marks' are
used for the usual sorts of reasons.

%\vfill\vfill

\chapter{Introduction}\label{ch:intro}

\section{Building blocks}\label{sect:bb}

An 
\defnplain{ordered pair}{}%
\index{order!---ed pair}
is defined by the identity
\begin{equation*}
  (x,y)=\bigl\{\{x\},\{x,y\}\bigr\}.
\end{equation*}
The sole purpose of the definition is to ensure that
\begin{equation*}
  (x,y)=(a,b)\Iff x=a\amp y=b.
\end{equation*}
(The sign $\Iff$ is just an abbreviation of the English \emph{if and
  only if.})
The \defnplain{Cartesian product}{}%
\index{Cartesian!--- product} of sets $A$ and $B$ is
the set
\begin{equation*}
  \{(x,y)\colon x\in A\amp y\in B\},
\end{equation*}
which is denoted by
\begin{equation*}
 A\times B.  
\end{equation*}
A \defn{relation}{} from $A$ to $B$ is just a subset of $A\times B$.  If
$R$ is such a relation, and $(a,b)\in R$, then we may also write
\begin{equation*}
  a\mathrel Rb.
\end{equation*}
The \defnplain{domain}{}%
\index{domain!--- of a function}
of $R$ is given by
\begin{equation*}
  \dom R=\{x\colon\Exists yx\mathrel Ry\};
\end{equation*}
that is, the domain of $R$ is the set of $a$ for which there is some $b$
such that $a\mathrel Rb$.

A relation $R$ from $A$ to $B$ is a \defn{function}{} from $A$ to $B$ if, for
every element $a$ of $A$, there is a unique element $b$ of $B$ such
that $a\mathrel Rb$.  If $f$ is a function from $A$ to $B$, then we
can express this by writing
\begin{equation*}
  f\colon A\To B.
\end{equation*}
The set $B$ is the \defn{codomain}{} of $f$, but (unlike the domain)
it is not determined by $f$ alone.
If $a\mathrel fb$, then we usually write
\begin{equation*}
  f(a)=b  
\end{equation*}
instead; also, $b$ is the \defn{image}{} of $a$ under $f$.
The function $f$ itself can also be written as
\begin{equation*}
  x\longmapsto f(x).
\end{equation*}
The function $f$ is 
\defnplain{injective}{}%
\index{function!injective ---}%
\index{injective function}
if for each $b$ in $B$ there is
at \emph{most} one element $a$ of $A$ such that $f(a)=b$; 
\defnplain{surjective},%
\index{surjective function}%
\index{function!surjective ---}
if for each $b$ in $B$ there is
at \emph{least} one such element $a$ of $A$; 
\defnplain{bijective},%
\index{bijective function}%
\index{function!bijective ---} if both
injective and surjective.  The set $\{y\colon \Exists xf(x)=y\}$ or
$\{f(x)\colon x\in A\}$ is the \defn{image}{} or \defn{range}{} of
$f$; so $f$ is surjective if and only if this image is $B$.  If
$C\included A$, then the \defn{restriction}{} of $f$ to $C$ is given
by
\begin{equation*}
  f\restriction{C}=f\cap(C\times B).
\end{equation*}

A relation from $A$ to itself is a 
\defnplain{binary relation}{}%
\index{binary!--- relation}%
\index{relation!binary ---}
on $A$.  One such relation is the 
\defnplain{diagonal},%
\index{diagonal relation} given by
\begin{equation*}
  \Delta_A=\{(x,y)\colon x=y\amp x\in A\}.
\end{equation*}
This is a function from $A$ to itself, namely the \defn{identity};
considered as such, it may be denoted by 
\begin{equation*}
\id_A.
\end{equation*}

If $R\included A\times B$, then the 
\defnplain{converse}{}%
\index{converse of a relation} of $R$ is given
by
\begin{equation*}
  \conv R=\{(y,x)\colon x\mathrel Ry\}.
\end{equation*}
If also $S\included B\times C$, then the
\defnplain{composite}{}%
\index{composite of relations}%
\index{relation!composite}
of $R$ and $S$ is the relation from $A$ to $C$ given by
\begin{equation*}
  R/S=\{(x,z)\colon\Exists y(x\mathrel Ry\amp y\mathrel Sz)\}.
\end{equation*}
The point of these derived relations is to allow some clever
definitions of certain kinds of relations.  So, $R$ is a function from
$A$ to $B$ if and only if
$\Delta_A\included R/\conv R$ and $\conv R/R\included\Delta_B$.  Assuming
$f\colon A\to B$, we have that $f$ is injective if and only if
$f/\conv f\included\Delta_A$, and surjective if and only if
$\Delta_B\included\conv f/f$.  If also $g\colon B\to C$, then
\begin{equation*}
  g\circ f=f/g.
\end{equation*}

A binary relation $R$ on $A$ is 
\defnplain{reflexive},%
\index{reflexive relation}%
\index{relation!reflexive ---}
if $\Delta_A\included R$; 
\defnplain{irreflexive},%
\index{irreflexive relation}%
\index{relation!irreflexive ---}
if $R\cap\Delta_A=\emptyset$; 
\defnplain{symmetric},%
\index{symmetric relation}%
\index{relation!symmetric ---}
 if $R=\conv R$;
\defnplain{antisymmetric},%
\index{antisymmetric relation}%
\index{relation!antisymmetric ---}
 if $R\cap\conv R\included\Delta_A$;
\defnplain{transitive},%
\index{transitive relation}%
\index{relation!transitive ---}
 if $R/R\included R$.  A relation is an
\defnplain{equivalence-relation},%
\index{equivalence!---{}-relation}%
\index{relation!equivalence, equivalence-{}---}
or just an 
\defn{equivalence}, 
if it is reflexive, symmetric, and transitive; an
\defnplain{ordering},%
\index{order!---ing}%
\index{relation|seealso{ordering}}
 if antisymmetric, transitive, and either reflexive
or irreflexive.  An irreflexive ordering is also called
\defnplain{strict}.%
\index{strict ordering}%
\index{order!---ing!strict ---}
  An ordering $R$ of $A$ is 
\defnplain{total}{}%
\index{total ordering}%
\index{order!---ing!total ---}
 if
$R\cup\conv R\cup\Delta_A=A\times A$; otherwise the ordering is
\defnplain{partial}.%
\index{partial ordering}%
\index{order!---ing!partial ---}%
\footnote{For some writers, `partial ordering' means ordering, and
  `ordering' means total ordering.}  If $R$ is an ordering of
$A$, then the pair $(A,R)$ is an 
\defn{order}.%
\index{structure|seealso{order}} 

A subset of $A$ is a 
\defnplain{singulary\footnote{The word
    \defn{unary}{} is often used instead   of \Eng{singulary}.
    Following Quine, Church  \cite[\S~02, p.~12, n.~29]{MR18:631a}
    suggests          
  \Eng{singulary} as a more etymologically correct word than
  \Eng{unary}.  Indeed, whereas the first five Latin cardinal numbers are
  \Lat{un-}, \Lat{du-}, \Lat{tri-}, \Lat{quattuor}, \Lat{quinque}, the
  first five Latin \emph{distributive} numbers---corresponding to the
  Turkish \Tur{birer}, \Tur{iki\c ser}, \Tur{\"u\c cer},
  \Tur{d\"order}, \Tur{be\c ser} \cite{LatinDili}---are \Lat{singul-},
  \Lat{bin-}, \Lat{tern-}, \Lat{quatern-}, \Lat{quin-}.  It is the
  latter sequence that gives us \Eng{binary} and \Eng{ternary}---also
  \Eng{quaternary} and \Eng{quinary}, if these are desired.  So
  \Eng{singulary} appears to be a better word than \Eng{unary}.  In
  fact, \Eng{singulary} does not appear in the original \emph{Oxford English
Dictionary} \cite{OED}.  The word \Eng{unary} \emph{does} appear in
this dictionary, but it is considered
obsolete: only one use of the word, from 1576, was discovered in
English literature.  There, \Eng{unary} meant \emph{unit}, although
the word \Eng{unit} was not actually invented until 1570, when it was
introduced by [John] Dee to correspond to the Greek \Gk{monad-}.}
 relation}{}%
\index{singulary!--- relation}%
\index{relation!singulary ---}
on $A$.  A
\defn{ternary relation}{}%
%\index{ternary!--- relation}%
\index{relation!ternary ---}
on $A$ is a subset of $A\times A\times A$, and so forth.

A
\defnplain{singulary operation}{}%
\index{singulary!--- operation}%
\index{operation!singulary ---}
on $A$ is a function from $A$ to itself; a
\defnplain{binary operation}{}%
\index{binary!--- operation}%
\index{operation!binary ---}
on $A$ is a function from $A\times A$ to $A$.
Taking Cartesian products is itself a binary operation, 
\begin{equation*}
(A,B)\mapsto A\times B,
\end{equation*}
on the \tech{class}{} of sets;
 taking \defn{power-set}{s,}%
\index{set!power-{}---}
that is, sets of subsets, is a singulary operation,
\begin{equation*}
A\mapsto\pow A,
\end{equation*}
on the class of sets.
An element of a set can be considered as a 
\defn{nullary operation}{}%
%\index{nullary!--- operation}%
\index{operation!nullary ---}
on the set.

\section{Model theory}\label{sect:what}

\begin{quotation}
  To think that physics or chemistry ought to be defined in terms of
  matter or physiology in terms of life is more than  an egregious
  blunder; it is a threat to the existence of science.

It implies that people know what matter is without studying physics or
chemistry, and what life is without studying physiology.

It implies that this non-scientific and pre-scientific knowledge
concerning the nature of matter or life is perfect and final, so far
as it goes, and can never be corrected by anything science can do.

It implies that, if anything scientists imagine themselves to have
discovered about matter or life or what not is inconsistent with
anything contained or implied in this non-scientific and
pre-scientific knowledge, the scientists have made a mistake.

It implies that, if they have made the mistake  by using (for example)
experimental methods, it is experimental methods that are at fault and
must be abandoned.

It implies that, if they have made the mistake by arguing logically,
it is logic that is at fault and must be abandoned.

It implies that any scientists who will not yield to persuasion and
confess the supremacy of non-scientific or pre-scientific knowledge
over all possible scientific inquiry must be made to yield by any
means that can be devised.

At one blow, by enunciating the apparently harmless proposition that
physics or chemistry is the science of matter, physiology the science
of life, or the like, we have evoked the whole apparatus of
\emph{scientific persecution}; I mean the persecution of scientists
for daring to be scientists.

In whose interest is such a persecution carried on?  Who stands to
gain by it?  The nominal beneficiary differs from time to time:
sometimes it is religion, sometimes statecraft, and so on.  None of
these has ever in fact gained a ha'porth of advantage. The actual
beneficiary has always been \emph{obsolete science.}

\hfill---R. G. Collingwood, \emph{The New Leviathan} \cite[I.5--58]{C-NL}
\end{quotation}

Model theory is whatever is taught in courses and books that have
model theory in their titles.  
Different writers will give different
definitions of what model theory is.  In my view, model theory is a
kind of
mathematics done self-consciously.  It is mathematics done while
paying attention to what it \emph{means} to do mathematics.  In particular,
model theory pays attention to the \emph{language} of mathematics.
For a simple example, the sets
commonly denoted by $\N$, $\Z$, $\Q$, $\R$, and $\C$ can all be talked
about in a language whose special symbols include $+$ and $\times$:
this is a model-theoretic observation.

Model
theory is the study of \tech{structure}s \emph{qu\^a}
\tech{model}s of
\techplain{theories}.\index{theory}  
A brief elucidation of the technical terms in this definition might
run as follows.

Examples of \tech{structure}s include
the sets like $\N$ that have just been mentioned, when these are
considered as being equipped with the named 
operations of addition and multiplication.  An order in the sense of
\S\ref{sect:bb}, or a group, is a structure.  A set by itself
is the simplest kind of structure.  In general, a \defn{structure}{} is
a set equipped with some (or no) operations and relations on it.  The
set itself may be called the \defn{universe}{} of the structure.  A
convention that I like to follow is that, if the universe is denoted
by a plain letter, as $A$ or $B$, then the whole structure is denoted by the
Fraktur form of that letter, as $\str A$ or $\str B$.  (See
Appendix~\ref{app:german}.)  But when the
universe is a standard set that is already denoted by a fancy letter,
as $\N$, then
this letter may also be understood to denote the whole structure with
that universe.  We may refer to an element of a structure when
we mean an element of its universe.

The word \emph{qu\^a}\label{qua} might be rendered literally in Turkish as
\emph{ondan} or \emph{neden,} though Redhouse \cite{Redhouse-Eng-Tur}
suggests \emph{s\i fat\i yle} and \emph{niteli\u ginde.}
It is a Latin relative pronoun in the ablative case, used
in technical English to mean \emph{in the capacity of.}  It is perhaps
originally a translation of the Greek \Gk{<~h|}, a relative pronoun in
the dative case, used for example by Aristotle \cite[IV,
  1003${}^{\mathrm a}$21]{Aristotle-Metaph-LCL-I} in referring to
\Gk{t`o >`on <~h| >'on}, \emph{being as such,} as the subject of the
work now known as the \emph{Metaphysics.}  (On the Greek alphabet, see Appendix~\ref{app:greek}.)

The special symbols for the operations and relations of a structure
(such as $+$ and $\times$ for $\N$ or
$\Q$) constitute its \defn{signature}.  \techplain{Sentence}s%
\index{sentence}
in this signature are either true or
false in the structure.  A \defn{model}{} of a set of sentences is a
structure in which all of the sentences are true.  
A set of sentences is \defn{theory}{} if it contains all of the sentences that
are true in all of its models.
The theory \emph{of} a structure is the set of sentences that are true
in the structure.  This theory may have models that are fundamentally
different from the original structure: this is one feature that makes
model theory interesting.

\section{Use and mention}

Of the following three sentences, the first two \defn{use}{} the word
{ice,} while the last two \defn{mention}{} this word.
\begin{enumerate}
  \item
Ice is frozen water.
\item\label{s:2}
{Ice} has one syllable.
\item
The English word for frozen water has one syllable.
\end{enumerate}
Note what happens when the sentences are translated into Turkish:
\begin{enumerate}
  \item
\emph{Buz donmu\c s sudur.}
\item
\emph{\emph{Ice}'in bir hecesi vard\i r.}
\item
\emph{Donmu\c s suyun \.Ingilizcesinin bir hecesi vard\i r.}
\end{enumerate}
In sentence~\eqref{s:2}, the word {ice} is used to mention itself.  This
self-referential use of a word may be shown typographically, by using
quotation-marks around the word, or setting it in a different font:
\begin{quotation}
`Ice' has one syllable.

\noindent\emph{Ice} has one syllable.
\end{quotation}
But there need be no typographical distinction at all, as long as
context makes the intended use of a word clear.

The distinction between use and mention of an expression can be seen
in mathematics, as in
\begin{enumerate}
  \item
$2+2=4$.
\item
The sign $+$ denotes addition.
\item
A sign resembling a Greek cross denotes addition.
\end{enumerate}

Occasionally a word is not precise enough to make its use clear.  For
example, David wrote \emph{A Treatise
of Human Nature,} and David is writing the present book: but this is
confusing or misleading.  David Hume wrote the former book
\cite{Hume}, which was
published in 1739--40, while David Pierce is writing 
the latter.  Thus we can attach tags to the name David to show which David
is meant in each case.

The same occasional need for greater precision arises in mathematics.
For example, a group-homomorphism is a function $f$ from one group to
another for which the equation
\begin{equation}\label{eqn:group-hom}
  f(x\cdot y)=f(x)\cdot f(y)
\end{equation}
is an identity.  Here it must be understood that the dot on the
left-hand side of the equation refers to multiplication in the domain of
$f$; on the right, the co-domain.  If $f\colon G\to H$, then by adding
labels to the dots, we can
write~\eqref{eqn:group-hom} more precisely as
\begin{equation*}
  f(x\cdot^G y)=f(x)\cdot^H f(y).
\end{equation*}

\section{The natural numbers}\label{sect:nat}

The \defn{natural numbers}{} compose a set
with the following five properties.
\begin{enumerate}
  \item\label{p1}
There is an 
\defnplain{initial element}.%
\index{initial!--- element}
\item\label{p2}
Every element has a unique \defn{successor}.
\item\label{p3}
A subset is the whole set if it contains the initial element and
contains the successor of each of its own elements.
\item\label{p4}
The initial element is not the successor of any element.
\item\label{p5}
Elements with the same successor are the same.
\end{enumerate}
These properties were identified by Richard
Dedekind \cite{MR0159773}; they were then written out in a new logical
notation by Giuseppe Peano \cite{Peano}, and they have come to be
called the \defn{Peano axioms}.\index{axiom!Peano ---s}  

Let us denote the set of natural numbers by $\N$;
its initial element, by $1$;
and the successor of an element $k$, by $\scr(k)$ or by $k\sscr$,
according to convenience.  Then we have a structure, which we may
denote by
\begin{equation}\label{eqn:N}
  (\N,1,\scr),
\end{equation}
whose universe is $\N$, and whose signature is
\begin{equation}\label{eqn:N-sig}
\{1,\scr\}.
\end{equation}
This is just what properties~\eqref{p1} and~\eqref{p2} give us.  Note
that, in~\eqref{eqn:N}, the symbols $1$ and $\scr$ refer to an element
and a singulary operation respectively, while in~\eqref{eqn:N-sig}, they refer
to themselves.  If we do not like this ambiguity, then we may
rewrite~\eqref{eqn:N} as
\begin{equation*}
  (\N,1^{\N},\scr^{\N}).
\end{equation*}
As noted in \S~\ref{sect:what}, the single letter $\N$ may also be
understood to denote this whole structure.
Property~\eqref{p3} is that this structure admits 
\defnplain{(proof by) induction}.%
\index{proof!--- by induction}%
\index{inducti!---on, proof by ---on}
The remaining two properties can be written out more formally thus:

\begin{enumerate}\setcounter{enumi}3
  \item
$\Forall x1\neq x\sscr$.
\item
$\Forall x\Forall y(x\sscr=y\sscr\lto x=y)$.
\end{enumerate}
This last property is just that the successor-operation is injective.

There are stuctures in
the signature $\{1,\scr\}$ that admit proof by induction, but do not
have properties~\eqref{p4} and~\eqref{p5}.  For example, suppose $A$ is a
three-element set, as $\{c,d,e\}$.  We obtain a structure $\str
A$ in the signature $\{1,\scr\}$ when we define $1^{\str A}$ as $c$
and define $\scr^{\str A}$ by the following table.
\begin{equation*}
  \begin{array}{c||c|c|c}
    x&c&d&e\\\hline
x\sscr&d&e&c
  \end{array}
\end{equation*}
Then $\str A$ admits proof by induction.  
Indeed, every subset of $A$ that contains $c$ and the successors of
its elements must contain $d$ and $e$, so it must be all of $A$.
But the initial element in $\str A$ is a successor.  We can define
a new structure $\str B$ with the same
universe $\{c,d,e\}$, and the same initial element $c$, but where
$\scr^{\str B}$ is as follows.
\begin{equation*}
\begin{array}{c||c|c|c}
    x&c&d&e\\\hline
x\sscr&d&e&d
  \end{array}  
\end{equation*}
Now $\str B$ admits proof by induction, and moreover, the initial
element is not a successor; but two distinct elements have the same
successor. 

Let us refer to all structures in the signature $\{1,\scr\}$ as
\defn{iterative structure}{s;}\footnote{This is my term, for want
  of a better; another possibility might be \emph{discrete dynamical
    systems.}}% 
%\index{iterative!--- structure}%
\index{structure!iterative ---}
and let us refer to iterative structures
that admit proof by induction as 
\defnplain{inductive structure}{s.}%
\index{inducti!---ve structure}%
\index{structure!inductive ---}
So $\N$ is an inductive structure, as are the other two structures
just defined.  A basic consequence of induction is the following.

\begin{theorem}\label{thm:1-or-succ}
  Every element of an inductive structure is either the initial
  element or a successor.
\end{theorem}

\begin{proof}
  Suppose $\str A$ is an inductive structure.  Let $M$ be the set of
  elements of $A$ that are either the initial element or successors.
  Then the initial element is in $M$, and so is the successor of every
  element of $M$, just because it is a successor.  By induction, $M=A$.
\end{proof}

A \defn{homomorphism}{} between two iterative
structures is a function from (the universe of) one to (the universe of)
the other that takes the initial element to the initial element and
takes the successor of every element to the successor of its image.
So, if $\str A$ and $\str B$ are arbitrary iterative structures,
and $f\colon A\to B$, then $f$ is a homomorphism from $\str A$ to
$\str B$ just in case
\begin{enumerate}
  \item
$f(1^{\str A})=1^{\str B}$;
\item
$f(\scr^{\str A}(c))=\scr^{\str B}(f(c))$ for all $c$ in $A$, that is,
$f\circ\scr^{\str A}=\scr^{\str B}\circ f$.
\end{enumerate}
An iterative structure $\str A$ admits 
\defnplain{recursion}{}
\index{recurs!---ion}
if, for every iterative structure
$\str B$, there is a \emph{unique} homomorphism from $\str A$ to $\str B$.
In this case, that unique homomorphism is said to be
\defnplain{recursively defined}.%
\index{recurs!---ively defined function}%
\index{defin!recursively ---ed function}
%\index{recurs!definition by ---ion} 

\begin{theorem}[Recursion]\label{thm:recursion-in-N}%
\index{theorem!Recursion Th---}%
\index{recurs!R---ion Theorem}
Every structure that satisfies the Peano axioms admits recursion. 
\end{theorem}

\begin{proof}
  Suppose $\str A$ meets the given five conditions, and $\str B$ is another
  iterative structure.  We show that there is a unique homomorphism
  from $\str A$ to $\str B$.

Assuming existence for the moment, we can prove uniqueness by
induction.  Indeed, suppose $f$ and $g$ are homomorphisms from $\str
A$ to $\str B$.  Let $M$ be the subset of $A$ comprising those $c$
such that $f(c)=g(c)$.  Because $f$ and $g$ are homomorphisms, we have
\begin{equation*}
f(1^{\str A})=1^{\str B}=g(1^{\str A}), 
\end{equation*}
so $1^{\str A}\in M$.
Suppose $d\in M$, so $f(d)=g(d)$.  Again since $f$ and $g$ are
homomorphisms, we 
have 
\begin{equation*}
f(\scr^{\str A}(d))=\scr^{\str B}(f(d))
=\scr^{\str B}(g(d))=g(\scr^{\str A}(d)),
\end{equation*}
so $\scr^{\str A}(d)\in
M$.  By induction, $M=A$, so $f=g$.

It remains to show that such a homomorphism $f$ exists at all. 
We want to say that $f$ is the set
\begin{equation*}
  \{(1^{\str A},1^{\str B}),
(\scr^{\str A}(1^{\str A}),(\scr^{\str B}(1^{\str B})),
(\scr^{\str A}(\scr^{\str A}(1^{\str A})),
\scr^{\str B}((\scr^{\str B}(1^{\str B}))),\dots\}.
\end{equation*}
We can write this set as
$\{(1,1),(1\sscr,1\sscr),(1\sscr{}\sscr,1\sscr{}\sscr),\dots\}$, as
long as we understand that the left entry in each ordered pair is in
$\str A$; and the right, in $\str B$.  In any case, we have to give a
valid definition of this set, to ensure that it exists.
One way to do
this is to build up $f$ as the union of the sets 
\begin{equation}\label{eqn:11}
\begin{aligned}
&\{(1^{\str A},1^{\str B})\},\\
&\{(1^{\str A},1^{\str B}),
(\scr^{\str A}(1^{\str A}),(\scr^{\str B}(1^{\str B}))\},\\ 
&\{(1^{\str A},1^{\str B}),
(\scr^{\str A}(1^{\str A}),(\scr^{\str B}(1^{\str B})),
(\scr^{\str A}(\scr^{\str A}(1^{\str A})),
\scr^{\str B}((\scr^{\str B}(1^{\str B})))\},\\
&\parbox{4in}{\dotfill}
\end{aligned}
\end{equation}
That is, if $\class C$ is the set of all sets listed in~\eqref{eqn:11},
then we let $f=\bigcup\class C$.  But we still have to give a valid
criterion for membership in $\class C$ (and then prove $f$ is a
homomorphism). 

To be precise, we let $\class C$ comprise those subsets $D$ of
$A\times B$ such that, if
$(a,b)\in D$, then either
\begin{equation*}
(a,b)=(1^{\str A},1^{\str B})=(1,1),
\end{equation*}
or else 
\begin{equation*}
(a,b)=(\scr^{\str A}(c),\scr^{\str B}(d))=(c\sscr,d\sscr)
\end{equation*}
for some $c$ and $d$ such
that $(c,d)\in D$.  Now $\class C$ is well defined.  Let
$R=\bigcup\class C$.  Then $R$
is a well-defined relation from $A$ to $B$.  Moreover, since
$\{(1,1)\}\in\class C$,
we have
$1\mathrel R1$; and if $c\mathrel Rd$, so that $(c,d)\in D$ for some
$D$ in $\class C$, then $D\cup\{(c\sscr,d\sscr)\}\in\class C$, so
$c\sscr\mathrel Rd\sscr$. 
Thus $R$ is indeed a homomorphism from $\str A$ to $\str B$, provided
it is a
\emph{function} from $A$ to $B$.

We can already conclude that, for
every $a$ in $A$, there is $b$ in $B$ such that $a\mathrel Rb$.
It remains to prove, assuming $a\mathrel Rb$, that this $b$ is unique.
For this, we shall use
the additional properties of $\str A$.  Suppose $1\mathrel Rb$.  Then
$(1,b)\in D$ for some $D$ in $\class C$.  In $\str A$, we assume that
$1$ is not a successor.  Therefore, by definition of $\class C$, we
know that $b=1$.


Now suppose that, for some $c$ in $A$, there is a unique $d$ in $B$
such that $c\mathrel Rd$.  We know $c\sscr\mathrel Rd\sscr$.  Suppose
$c\sscr\mathrel Re$.  Then $(c\sscr,e)\in D$ for some $D$ in $\class
C$.  Since $1$ is not a successor in $\str A$, we must have $(k,\ell)\in
D$ for some $k$ and $\ell$ such that 
$(k\sscr,\ell\sscr)=(c\sscr,e)$.
Since the successor-operation is injective, we have $k=c$.  Since
$k\mathrel R\ell$, this means $c\mathrel R\ell$.  By uniqueness of
$d$, since $c\mathrel Rd$, we conclude $\ell=d$, so
$e=\ell\sscr=d\sscr$.

By induction, $R$ is a function from $A$ to $B$.  This completes the
proof. 
\end{proof}

The Recursion Theorem allows us to define the usual arithmetic
operations of addition and multiplication and exponentiation on $\N$.
In particular, we can write $\scr$ as $x\mapsto x+1$.
In fact, addition and multiplication can be
defined in any \emph{inductive} structure; but not exponentiation.
See Appendix~\ref{app:N}.  


A modification of the Recursion Theorem is

  \begin{corollary*}\label{cor:recursion}
    Suppose $A$ is a set with an element $b$, and $F\colon\N\times
    A\to A$.  Then there is a {unique} function $G$ from $\N$ to
    $A$ such 
    that
    \begin{enumerate}
      \item
$G(1)=b$, and
\item
$G(n+1)=F(n,G(n))$ for all $n$ in $\N$.
    \end{enumerate}
  \end{corollary*}

  \begin{proof}
 %   Adjust the proof of Theorem~\ref{thm:rec}.
Let $f\colon \N\times A\to\N\times A$,
where $f(n,x)=(n+1,F(n,x))$.  By recursion, there is a unique function
$g$ from
$\N$ to $\N\times A$ such that $g(1)=(1,b)$ and $g(n+1)=f(g(n))$.  By
induction, the first entry in $g(n)$ is always $n$.  The desired
function $G$ is given by $g(n)=(n,G(n))$.  Indeed, we now have
$G(1)=b$; also, $g(n+1)=f(n,G(n))=(n+1,F(n,G(n)))$, so
$G(n+1)=F(n,G(n))$.  By induction, $G$ is unique.
  \end{proof}

This allows taking factorials: again, see Appendix~\ref{app:N}.

An 
\defnplain{isomorphism}{}%
\index{isomorphi!---sm}%
\index{function|seealso{isomorphism}}
is a homomorphism whose underlying function is
a bijection whose inverse is also a homomorphism.  To the Recursion
Theorem then, we have the following converse.

\begin{theorem}\label{thm:isom-to-N}
  Every iterative structure that admits recursion is
  isomorphic to $\N$ and therefore satisfies the Peano axioms; in
  particular, it admits induction.
\end{theorem}

\begin{proof}
  Suppose $\str A$ is an iterative structure that admits recursion.
  Then there are 
  homomorphisms $f$ from $\str A$ to $\N$ and $g$ from $\N$ to $\str
  A$.  Then $f\circ g$ is a homomorphism from $\N$ to itself.  But the
  identity on $\N$ is also a homomorphism from $\N$ to itself;
  therefore $f\circ g$ must be the identity.  For the same reason,
  $g\circ f$ is the identity on $A$.  Thus $f$ is invertible as a
  homomorphism, so it is an isomorphism.
\end{proof}


The sets of \tech{predecessor}s of natural numbers are defined
recursively.  That is, we have $x\mapsto\pred x\colon\N\to\pow{\N}$,
where
\begin{align*}
  \pred 1&=\emptyset;\\
\pred{n\sscr}&=\pred n\cup\{n\}.
\end{align*}
The elements of $\pred n$ are the \defn{predecessor}s of $n$.  We can
define the binary relation $<$ on $\N$ by
\begin{equation}\label{eqn:pred}
  x<y\Iff x\in\pred y.
\end{equation}
Conversely, if $<$ is a binary relation on a set $A$,
then~\eqref{eqn:pred} defines a predecessor-function from $A$ to $\pow
A$.  Then the 
\defnplain{relational structure}{}%
\index{relation!---al structure}%
\index{structure!relational ---} $(A,<)$ admits
\defnplain{(proof by) induction}{}%
\index{proof!--- by induction}%
\index{inducti!---on, proof by ---on}
if, for every proper subset $B$ of $A$, there is an element $c$ of $A$
that $B$ does not contain, although $B$ contains all predecessors of
$c$.  This means a subset $C$ of $A$ can be proved equal to $A$, provided
that, from the
\defnplain{inductive hypothesis}{}%
\index{inducti!---ve hypothesis}%
\index{hypothesis!inductive ---}
that $\pred d\included C$, it can be proved that $d\in C$.

Note well that we now have \emph{two} kinds of induction.  Context
must be relied on to show which kind is meant.

\begin{theorem}[Induction]%
\index{theorem!Induction Th---}%
\index{inducti!I---on Theorem}
  $(\N,<)$ admits proof by induction.
\end{theorem}

\begin{proof}
  Suppose $B$ is a subset of $\N$ such that, if
  $\pred d\included B$, then $d\in B$.  We shall show $B=\N$.
    Let $C$ comprise those elements of $\N$ whose predecessors belong to
    $B$.  As~$1$ has no predecessors, they belong to $B$, so $1\in
    C$.  Suppose $n\in C$.  Then all predecessors of $n$ belong to
    $B$, so by assumption, $n\in B$.  Thus, all
    predecessors of $n\sscr$ belong to $B$, so $n\sscr\in C$.  By
    induction, $C=\N$.  In particular, for all $n$ in $\N$, we have
  $n\sscr\in C$, so $n$ (being a predecessor of $n\sscr$) belongs to
  $B$.  Thus $B=\N$. 
\end{proof}

There are relational structures $(A,<)$ that admit induction, although
$<$ is not transitive (\exercise).

\begin{theorem}
  The relation $<$ on $\N$ is transitive.
\end{theorem}

\begin{proof}
  We show
  \begin{equation*}
    x\in\pred n\implies \pred x\included\pred n
  \end{equation*}
(where $\implies$ stands for the English \emph{implies}).
The claim is vacuously true when $n=1$, since $\pred 1=\emptyset$.
Suppose the claim is true when $n=m$.  If $x\in\pred{m\sscr}$, then
either $x\in\pred m$, or else $x=m$.  In the former case, by inductive
hypothesis, we have $\pred x\included\pred m$;
in the latter case, $\pred x=\pred m$.  In either case, $\pred
x\included \pred m\included\pred{m\sscr}$.  By induction, we are done.
\end{proof}

\begin{lemma}\label{lem:trans-->strict}
Every transitive relational structure that admits induction is a strict
order.\hfill\qedsymbol
\end{lemma}
 
In particular, $(\N,<)$ is a strict order.  But
the ordering guaranteed by the lemma need not be total (\exercise).

\begin{lemma}\label{lem:1<=n}
  In $\N$, $1\leq n$.\hfill\qedsymbol
\end{lemma}

\begin{lemma}\label{lem:m<n}
  In $\N$, if $m<n$, then $m\sscr\leq n$.
\end{lemma}

\begin{proof}
  The claim is trivially true when $n=1$.  Suppose it is true when
  $n=k$.  Say $m<k\sscr$, that is, $m\in\pred k\cup\{k\}$.  If $m\in\pred
  k$, then $m<k$, so $m\sscr\leq k$ by inductive hypothesis, hence
  $m\sscr\leq k\sscr$.  If $m=k$, then $m\sscr=k\sscr$, so $m\sscr\leq
  k\sscr$. 
\end{proof}

\begin{theorem}
  $(\N,<)$ is a strict total order.
\end{theorem}

\begin{proof}
  We show
  \begin{equation*}
    m\not\leq n\implies n<m.
  \end{equation*}
The claim is trivially true when $m=1$, by Lemma~\ref{lem:1<=n}.
Suppose it is true when 
$m=k$.  Say $k\sscr\not\leq n$.  Then $k\not< n$ by the last lemma.
If $k\neq n$, then $k\not\leq n$, so $n<k<k\sscr$ by inductive
hypothesis.  If $k=n$, then $n=k<k\sscr$. 
\end{proof}




A relational structure $(A,<)$ admits 
\defnplain{recursion}{}%
\index{recurs!---ion}
if, for every set $B$ and function $f$ from $\pow B$ into $B$, there
is a unique function $g$ from $A$ to $B$ such that
\begin{equation}\label{eqn:strong-rec}
  g(c)=f(g\setimb{\pred c})
\end{equation}
for all $c$ in $A$.  Here $g\setimb X$ means $\{g(x)\colon x\in X\}$.

\begin{theorem}\label{thm:ord-ind-rec}
A strict order that admits induction admits
recursion. 
\end{theorem}

\begin{proof}
Suppose $(A,<)$ is a strict order that admits induction.  Let
$B$ be a set, and $f\colon\pow B\to B$.  Suppose there are functions
$h$ and $h'$ from $A$ to $B$ such that~\eqref{eqn:strong-rec} holds
for all $c$ in $A$ when $g$ is $h$ or $h'$.  If $h$ and $h'$ agree on
$\pred d$, then
\begin{equation*}
  h(d)=f(h\setimb{\pred d})
=f(h'\setimb{\pred d})
=h'(d),
\end{equation*}
so $h$ and $h'$ agree at $d$.  By induction, $h=h'$.

It remains to show that such a function $h$ exists at all.  Let
$\class C$ comprise the relations $R$ from $A$ to $B$ such that, if
$a\mathrel Rb$, then
\begin{enumerate}
  \item
$\pred a\included\dom R$;
\item
$b=f(\{y\colon\Exists x(x<a\amp x\mathrel Ry)\})$.
\end{enumerate}
Let $S=\bigcup\class C$.  We shall show that $S$ is the desired
function $h$.  Let
\begin{equation*}
  S_a=S\cap(\pred a\times B).
\end{equation*}
Our inductive hypothesis is that $S_a$ is a function $h_a$ from $\pred
a$ to $B$ such that, when $c<a$, then
\begin{align*}
  h_a(c)&=f(h_a\setimb{\pred c})\\
&=f(\{y\colon\Exists x(x<c\amp h_a(x)=y)\})\\
&=f(\{y\colon\Exists x(x<c\amp x\mathrel{S_a}y\}).
\end{align*}
By transitivity of $<$, if $c<a$, then $\pred c\included\pred a$, so
$\pred c\included\dom{S_a}$.  Hence $S_a\in\class C$.  Letting
$b=f(h_a\setimb{\pred a})$, we have also $S_a\cup\{(a,b)\}\in\class
C$.  Therefore $a\mathrel Sb$.

Conversely, suppose $a\mathrel Sb'$.  Then $a\mathrel Rb'$ for some
$R$ in $\class C$.  But then $\pred a\included\dom R$, and
\begin{equation*}
  R\cap(\pred a\times B)\included S\cap(\pred a\times B)=S_a,
\end{equation*}
so $R\cap(\pred a\times B)=S_a$.  Therefore $b'=b$.  This completes
the induction and the proof.
\end{proof}

Hence $(\N,<)$ admits recursion.

A strict order $(A,<)$ is 
\defnplain{well-founded}{}%
\index{well-founded order}%
\index{order!well-founded ---}
if every non-empty
subset $B$ of $A$ has a \defn{minimal element}, that is, an element
$c$ than which no element of $B$ is less (this means we never have
$d<c$ if $d\in B$).

\begin{theorem}
  Strict orders that admit recursion are well-founded.
\end{theorem}

\begin{proof}
  Suppose $(A,<)$ is a strict order that is not well-founded.  Then
  $A$ has a nonempty subset $B$ that has no minimal element.  Let $C$
  be the set of elements $a$ of $A$ such that $b\leq a$ for some $b$
  in $B$.  Then $C$ has no minimal element.  Define $g_1$ and $g_2$ on
  $A$ by
  \begin{equation*}
    g_1(x)=1;\qquad\qquad
 g_2(x)=
    \begin{cases}
      2,&\text{ if }x\in C;\\
1,&\text{ if }x\in A\setminus C.
    \end{cases}
  \end{equation*}
Define $f$ from $\pow{\{1,2\}}$ into $\{1,2\}$ by
\begin{equation*}
  f(X)=
  \begin{cases}
    2,&\text{ if }2\in X;\\
1,&\text{ otherwise}.
  \end{cases}
\end{equation*}
Then both $g_1$ and $g_2$ are functions $g$ such that
$g(x)=f(g\setimb{\pred x})$.
\end{proof}

A well-founded total order is usually said to be a
\defn{well-ordered set}.%
\index{order!well-{}---ed set}
So $(\N,<)$ is well-ordered.  We now complete the circle of implications
begun in the last two theorems.

\begin{theorem}\label{thm:wf-->ind}
  Well-founded strict orders admit induction.\hfill\qedsymbol
\end{theorem}

For orders then, induction and recursion are equivalent.  For
iterative structures, they are not.

\section{More building blocks}\label{sect:more-blocks}

Note that we have \emph{not} proved that $\N$ exists.  We might supply
this deficiency by offering the following 
\defnplain{recursive definition},%
\index{defin!recursive ---ition}%
\index{recurs!---ive definition} 
namely,
\begin{enumerate}
\item
  $1\in\N$;
\item
if $n\in\N$, then $n1\in\N$.
\end{enumerate}
By $n1$ is meant the result of writing $1$ to the right of $n$.
So the natural numbers are obtained as 
\defn{string}s $1\cdots1$, whose 
\defnplain{entries}{}%
\index{entry in a string} are vertical
strokes.  It is an \exercise{} to check that the set of these strings
is a model of the Peano axioms.

Note that our recursive definition of $\N$ is the recursive definition
of a \emph{set;} it should be 
distinguished from the recursive definition of a \emph{function.}

If $A\included \dom f$ and $f\setimb A\included A$, then $A$ is
\defnplain{closed}{}%
\index{closed!--- under operations}
under $f$.  One of the
axioms of set-theory is that there is a set $\Omega$ that contains
$\emptyset$ and is closed under the operation $x\mapsto x'$, where
$x'$ is the
\defnplain{(set-theoretic) successor}{}%
\index{set!---{}-theoretic successor}%
\index{successor!set-theoretic ---} 
of $x$ and is given by
\begin{equation*}
  x'=x\cup\{x\}.
\end{equation*}
So $(\Omega,\emptyset,{}')$ is an iterative structure.
The set $\vnn$ is defined as the intersection of all subsets of
$\Omega$ that contain $\emptyset$ and are closed under $x\mapsto x'$.
Then $(\vnn,\emptyset,{}')$ satisfies the Peano axioms (\exercise).  
Again we could offer a recursive definition:
\begin{enumerate}
\item
  $\emptyset\in\vnn$;
\item
if $n\in\vnn$, then $n\cup\{n\}\in\vnn$.
\end{enumerate}
Normally
$\emptyset$ is denoted by $0$; and $0'$, by $1$.  In general, when
$n\in\vnn$, then
\begin{equation*}
  n=\{0,1,\dots,n-1\}.
\end{equation*}
It is notationally convenient to consider the natural numbers to be
the elements of $\vnn$, in the following way.


The set of all functions from a set $A$ to a set $B$ can be denoted
by\footnote{Some people write ${}^AB$ instead.} 
\begin{equation}\label{eqn:map-set}
  B^A.
\end{equation}
If $n\in\vnn$, then the $n$th
\defnplain{Cartesian power}{}%
\index{Cartesian!--- power}
of $A$ is
\begin{equation*}
  A^n.
\end{equation*}
Thus, the $n$th Cartesian power of $A$ is the set of functions from
$\{0,1,\dots,n-1\}$ to $A$.  An element of $A^n$ can be written as any
one of 
\begin{center}
\hfill
  $(a_0,\dots,a_{n-1})$,\hfill
  $i\longmapsto a_i,$\hfill
  $\tuple a$;\hfill\mbox{}
\end{center}
it can be called an 
\defnplain{(ordered) $n$-tuple}{}% 
\index{order!---ed $n$-tuple}%
\index{tuple!ordered $n$-tuple}
from $A$.
Note well that 
\begin{equation*}
  A^0=\{\emptyset\}=\{0\}=1; 
\end{equation*}
this is true even if $A$
is empty.  Also, every element of $A^1$ is $\{(0,a)\}$ for some $a$ in
$A$.  So we have a bijection
\begin{equation}\label{eqn:A1}
  x\longmapsto\{(0,x)\}
\end{equation}
from $A$ to $A^1$.
We may sometimes treat this bijection as an \defn{identification}:
that is, we may neglect to distinguish between $a$ and
$\{(0,a)\}$.

For any $m$ and $n$ in $\vnn$, we have a bijection
\begin{equation}\label{eqn:concat}
  (\tuple x,\tuple y)\longmapsto \tuple x\concat\tuple y
\end{equation}
from $A^m\times
  A^n$ to $A^{m+n}$.
In this notation, $\tuple a\concat\tuple b$ is the $(m+n)$-tuple
\begin{equation*}
  (a_0,\dots,a_{m-1},b_0,\dots,b_{n-1}); 
\end{equation*}
this is the $(m+n)$-tuple 
 $\tuple c$ such
that
\begin{equation*}
  c_k=
  \begin{cases}
    a_k,&\text{ if }k<m;\\
    b_{k-m},&\text{ if }m\leq k<m+n.
  \end{cases}
\end{equation*}
We shall always treat the bijection in~\eqref{eqn:concat} as an
identification; in particular, we shall always write $(\tuple a,\tuple
b)$ instead of $\tuple a\concat\tuple b$.

An 
\defnplain{$n$-ary operation on $A$}{}%
\index{ary!$n$-{}--- operation}%
\index{operation!$n$-ary ---} is a function from $A^n$ to $A$.
The set of these is
\begin{equation*}
  A^{A^n}.
\end{equation*}
In particular, a $0$-ary or \defn{nullary operation}{} on $A$ is an element of
$A^1$; by the bijection in~\eqref{eqn:A1} then, we may identify a
nullary operation on $A$ with an element of $A$.  

An 
\defnplain{$n$-ary relation on $A$}{}%
\index{ary!$n$-{}--- relation}
\index{relation!$n$-ary ---}
is a subset of $A^n$; the set of
these is 
\begin{equation*}
  \pow{A^n}. 
\end{equation*}
In particular, a nullary relation is a subset of $A^0$, that is, of $1$
(or $\{0\}$); so the nullary relation is $0$ or $1$.

An $n$-ary operation on $A$ is then a (certain kind of) subset of
$A^n\times A$, and this product can be identified with $A^n\times A^1$
and hence with $A^{n+1}$; so an $n$-ary operation on $A$ can be
thought of as an $(n+1)$-ary relation on $A$.  More precisely, if
$f\colon A^n\to A$, then one may refer to the $(n+1)$-ary relation
\begin{equation*}
  \{(\tuple x,f(\tuple x))\colon \tuple x\in A^n\}
\end{equation*}
as the 
\defnplain{graph}{}% 
\index{graph of a function}
of $f$; but there is a bijection between graphs in
this sense and operations.

\section{Well-ordered sets and cardinalities}\label{sect:ordinals}

The notion of
\techplain{cardinality}{}%
\index{cardinal!---ity} of sets can be developed with the help of
well-ordered sets.

An injection $h$ from one relational structure, $(A,R)$, to another,
$(B,S)$, is an 
\defn{embedding}{} if $x\mathrel Ry\Iff h(x)\mathrel Sh(y)$.  An
\defnplain{isomorphism}{}%
\index{isomorphi!---sm}
is then a surjective embedding.  An \defn{initial segment}{} of a
relational structure is a subset that contains all of the predecessors
of its elements.  The initial segment is 
\defnplain{proper}{}%
\index{proper!--- initial segment}
if it is not the whole order; otherwise it is \defnplain{improper}.%
\index{improper initial segment}  Only initial segments of
\emph{orders} will be of interest to us.

\begin{lemma}
  Of any two well-ordered sets, one is isomorphic to an initial segment of
  the other.
\end{lemma}

\begin{proof}
  Let $\str A$ and $\str B$ be well-ordered sets, and let $\infty$ be
  a non-element of $B$.  Define $h$ from $A$ to $B\cup\{\infty\}$ by
  \begin{equation*}
    h(x)=
    \begin{cases}
      \min(B\setminus h\setimb{\pred x}),& \text{ if this exists;}\\
\infty,& \text{ otherwise.}
    \end{cases}
  \end{equation*}
Let $A^*=\{x\in A\colon h(x)\neq\infty\}$ and $h^*=h\restriction{A^*}$.
Then $A^*$ is an initial segment of $\str A$, and~$h^*$ is an
isomorphism between this and an initial segment of $\str B$.
Moreover, one of these segments is improper.
\end{proof}

\begin{lemma}
  If $\str A$ and $\str B$ are well-ordered sets, and $\str A$ is
  isomorphic to a proper initial segment of $\str B$, then $\str B$ is
  not isomorphic to an initial segment of $\str A$.\hfill\qedsymbol
\end{lemma}

A set is 
\defnplain{transitive}{}%
\index{transitive!--- set}%
\index{set!transitive ---}
if it properly includes each of its elements.  So $A$ is transitive if
and only if $x\in A\Iff x\pincluded A$, that is,
\begin{equation*}
  x\in A\amp y\in x\implies y\in A.
\end{equation*}

\begin{lemma}\label{lem:order-type}
On a well-ordered set $\str A$, let $h$ be defined by
\begin{equation*}
  h(x)=h\setimb{\pred x}.
\end{equation*}
Then the image of $h$ is transitive, is well-ordered by membership
($\in$), and, with this ordering, is isomorphic to $\str A$.
\hfill\qedsymbol
\end{lemma}

A set is an \defn{ordinal}{} if it is transitive and well-ordered by
membership.
Ordinals are often denoted by small letters from the beginning of the Greek
alphabet, as $\alpha$ and $\beta$.

\begin{theorem}
Every member of an ordinal is an ordinal.
\emph{On} an ordinal, membership is the same as proper inclusion
($\pincluded$).
The set-theoretic successor of an ordinal is an ordinal.  The union of
a set of ordinals is an ordinal.\hfill\qedsymbol
\end{theorem}

\begin{theorem}[Burali-Forti Paradox]%
\index{Burali-Forti Paradox}%
\index{theorem!Burali-Forti Paradox}
The class
  of ordinals is transitive and well-ordered by membership, which is
  the same as proper inclusion.\hfill\qedsymbol
\end{theorem}

The paradox is that, if the class of ordinals is a \defn{set}, then it
is an ordinal, so it belongs to itself and therefore properly includes
itself, which is absurd.  So the class of ordinals is \emph{not} a
set; it is `too big' to be a set; it is a
\defnplain{proper class}.%
\index{proper!--- class}%
\index{class!proper ---}
The class of ordinals can be understood to have the following
recursive definition.
\begin{enumerate}
  \item
$\emptyset$ is an ordinal;
\item
if $x$ is an ordinal, then so is $x'$;
\item
the union of a set of ordinals is an ordinal.
\end{enumerate}
In particular, $\vnn$ and its elements are ordinals.

If $\str A$ is a well-ordered set, then, by the lemmas, it is
isomorphic to a unique ordinal.
\begin{comment}


, which can be called the
\defnplain{order-type}{}% 
\index{order!---{}-type}%
\index{type!order-{}---}
of $\str A$.

and be denoted by
\begin{equation*}
  \ord{\str A}.
\end{equation*}



\end{comment}
Two sets have the same
\defnplain{cardinality}{}%
\index{cardinal!---ity} if there is a bijection between them.
One of the equivalent forms of the
\defnplain{Axiom of Choice}{}%
\index{axiom!A--- of Choice}%
\index{choice!Axiom of Ch---}
is that every set can be well-ordered.  Therefore every set has the
same cardinality as some ordinal.  The least ordinal with the same
cardinality as the set is the
\defnplain{cardinality}{}%
\index{cardinal!---ity}
of the set.  The cardinality of $A$ is denoted by
\begin{equation*}
  \size A.
\end{equation*}
An ordinal that is the cardinality of some set is a \defn{cardinal}.
We have $\size A\leq\size{\pow A}$, but there is no bijection between
$A$ and $\pow A$; therefore $\size A<\size{\pow A}$.  In
particular, there is no largest cardinality.  This allows us to
define, on the class of ordinals, the
function
\begin{equation*}
  \alpha\longmapsto\aleph_{\alpha},
\end{equation*}
where $\aleph_{\alpha}$ is the
least infinite cardinal greater than those $\aleph_{\beta}$ such that
$\beta<\alpha$.  In particular, 
\begin{equation*}
\aleph_0=\vnn. 
\end{equation*}
All of the finite ordinals are cardinals.  These and $\aleph_0$ are
\defn{countable}; the other cardinals are \defn{uncountable}.


\section{Structures}\label{sect:structures}

An informal definition of structure was given in~\ref{sect:what};
now we can give more formal definition.
A \defn{structure}{} is an ordered pair $(A,\interpretation)$, also
referred to as $\str A$, where:
\begin{enumerate}
  \item
$A$ is a set, called the \defn{universe}{} of the structure;
\item
$\interpretation$ is a function, written also
  \begin{equation*}
    s\longmapsto s^{\str A},
  \end{equation*}
whose domain $\lang$ is called the \defn{signature}{} of the structure;
\item
$s^{\str A}$ is either an element of $A$ or an $n$-ary operation or
  relation on $A$ for some positive $n$, for each $s$ in $\lang$.
\end{enumerate}
Then $\str A$ is more precisely an
\defnplain{$\lang$-structure}.%
\index{L-structure@$\lang$-structure} or a structure 
\defnplain{of}{}\index{structure!--- of $\lang$} 
$\lang$.
If $\lang=\{s_0,s_1,\dots\}$, then $\str A$ can be written
\begin{equation*}
  (A,s_0{}^{\str A}, s_{1}{}^{\str A},\dots).
\end{equation*}
The elements, operations, and relations $s^{\str A}$ may be called
\defnplain{basic}.% 
\index{basic elements, operations, and relations}

We have made use of the inductive structure $(\vnn,0,{}')$ to define
Cartesian powers and hence
structures in general.  Algebra provides a wealth of examples of
structures:
\begin{enumerate}
\item
a group $G$, or $(G,\cdot,{}\inv,1)$;
\item
an \emph{abelian} group $G$, or $(G,+,-,0)$;
\item
a unital ring $R$, or $(R,+,-,\cdot,0,1)$;
\item
the ring $\Z$, or $(\Z,+,-,\cdot,0,1)$;
\item
the field $\R$, or $(\R,+,-,\cdot,0,1)$;
\item
the two-element field $\F_2$, or $(\F_2,+,-,\cdot,0,1)$;
\item
a vector-space $V$ over a field $K$; here the signature of $V$ is
\begin{equation*}
  \{+,-,0\}\cup\{a\cdot{}\colon a\in K\}, 
\end{equation*}
where $a\cdot{}$ is the singulary
operation of multiplying by $a$.
\end{enumerate}
Further examples include:
\begin{enumerate}\setcounter{enumi}6
\item
an order $(\Omega,<)$;
\item
the \emph{ordered} field $\R$, or $(\R,+,-,\cdot,0,1,<)$.
\end{enumerate}
From a set $\Omega$ arises what we might call
the \defn{power-set}{} structure on $\Omega$, namely
\begin{equation}\label{eqn:pow}
  (\pow{\Omega},\cap,\cup,{}\comp,\emptyset,\Omega,\included).
\end{equation}
In case $\Omega$ is the $1$-element set $\{0\}$, which is $1$, then we have
$\pow{\Omega}=\{0,1\}$, and we may write out the structure
in~\eqref{eqn:pow} as
\begin{equation*}
  (\B,\land,\lor,\lnot{}, 0,1,\models).
\end{equation*}
In particular,  $\B=\{0,1\}$.  I propose to refer to any
structure with universe $\B$ as
a \defnplain{truth}{-structure.}%
\index{truth!---{}-structure}\label{truth}  
In this context, we can
understand $1$ as truth, and $0$ as falsehood.
\defnplain{Propositional logic}{}%
\index{logic!propositional ---}%
\index{proposition!---al logic}
is the study of truth-structures.

With $\interpretation$ as above in the arbitrary structure
$(A,\interpretation)$: 
\begin{enumerate}
  \item
$s^{\str A}$ is the \defn{interpretation}{} in $\str A$ of $s$;
\item
$s$ is a \defn{symbol}{} for $s^{\str A}$.
\end{enumerate}
So $s$ is one of the following:
\begin{enumerate}
  \item
a \defn{constant},%
\index{symbol!constant}
if $s^{\str A}$ is an element of $A$;
\item
an 
\defnplain{$n$-ary function-symbol},%
\index{ary!$n$-{}--- function-symbol}%
\index{function!---{}-symbol}%
\index{symbol!function-{}---}
if $s^{\str A}$ is an $n$-ary operation on $A$;
\item
an 
\defnplain{$n$-ary predicate},%
\index{ary!$n$-{}--- predicate}%
\index{predicate!$n$-ary ---}%
\footnote{Or 
\techplain{relation-symbol}.%
\index{relation!---{}-symbol}} 
if
$s^{\str A}$ is an $n$-ary relation on $A$.  
\end{enumerate}
Since nullary operations on $A$ can be considered as elements of $A$,
a constant can be considered as a nullary function-symbol.

Suppose $\str A$ and $\str B$ are two structures with the same
signature $\lang$.  A \defn{homomorphism}{} from $\str A$ to $\str B$
is a function $h$ from $A$ to $B$ such that
\begin{enumerate}
  \item
$h(c^{\str A})=c^{\str B}$ for all constants $c$ in $\lang$;
\item
$h(f^{\str A}(a_0,\dots,a_{n-1}))=f^{\str B}(h(a_0),\dots,h(a_{n-1}))$
  for all $a_i$ in $A$ and
  $n$-ary function-symbols 
  $f$ in $\lang$, for all positive
  $n$;
\item
$(a_0,\dots,a_{n-1})\in R^{\str A}\implies(h(a_0),\dots,h(a_{n-1}))\in
  R^{\str B}$ for all $a_i$ in $A$, for all
  $n$-ary predicates $R$ in $\lang$, for all positive $n$.
\end{enumerate}
To say that $h$ is a homomorphism from $\str A$ to $\str B$, we may
write
\begin{equation*}
  h\colon\str A\longrightarrow\str B.
\end{equation*}
Then $h$ is an 
\defnplain{isomorphism}{}%
\index{isomorphi!---sm}
 if it is a bijection and its
inverse is a homorphism.
If $A\included B$, and the inclusion of $A$ in $B$ is a homomorphism,
then we write
\begin{equation*}
  \str A\included\str B
\end{equation*}
and say that $\str A$ is a \defn{substructure}{}%
\index{structure|seealso{sub---}} 
 of $\str B$.  So a
substructure of $\str B$ is a structure whose universe is a subset of
$B$ that is 
\defnplain{closed}{}%
\index{closed!--- under operations}
under the basic operations of
$\str B$ (including the nullary operations). 

\section{Algebras}\label{sect:algebras}

A structure in a signature with no predicates is an \defn{algebra},
and the signature itself may be called \defnplain{algebraic}.%
\index{algebra!---ic signature}
In particular, iterative structures in the sense of \S\ref{sect:nat}
are algebras.  A substructure of an algebra can be called a
\defn{subalgebra}. 
An arbitrary algebra admits \defnplain{(proof by) induction}{}%
\index{inducti!---on, proof by ---on}%
\index{proof!--- by induction}
if it has no proper subalgebras.
An algebra $\str A$ in signature $\lang$ admits
\defnplain{recursion}{}%
\index{recurs!---ion}
 if, for each algebra $\str B$ of $\lang$, there is a
unique homomorphism from $\str A$ to $\str B$.  
The earlier definitions, for iterative structures, were just special
cases of these.

There are also two `degenerate' cases to consider.
If there are no constants, then only the empty structure admits
induction or recursion.  If there are no function-symbols (of positive
arity), an algebra admits induction if and only if its universe
consists entirely of interpretations of the constants; the algebra
admits recursion if and only if, in addition, distinct constants have
distinct interpretations. 

So the simplest \emph{interesting} cases of algebras that admit
induction or recursion are just the ones we have already considered,
namely the iterative structures, in a signature consisting of one
constant and one singulary function-symbol.

In the signature $\{1,s,t\}$ with one constant and \emph{two}
singulary function-symbols, let $\str A$ be the
structure whose universe comprises the binary numerals (starting with $1$),
where $1^{\str A}$ is $1$, and $s^{\str A}$ is adding $0$ on the
right, and $t^{\str A}$ is adding $1$ on the right.  Then
$1110=s^{\str A}(t^{\str A}(t^{\str A}(1)))=s(t(t(1)))=1^t{}^t{}^s$, and
(part of) $\str A$ might be depicted as follows. 
\begin{equation*}
  \xymatrix@C=0pt{
&&&&&&&1\ar@{-}[dllll]_s\ar@{-}[drrrr]^t&&&&&&&\\
&&&10\ar@{-}[dll]_s\ar@{-}[drr]^t&&&&&&&&11\ar@{-}[dll]_s\ar@{-}[drr]^t&&&\\
&100\ar@{-}[dl]_s\ar@{-}[dr]^t&&&&101\ar@{-}[dl]_s\ar@{-}[dr]^t&&&&110\ar@{-}[dl]_s\ar@{-}[dr]^t&&&&111\ar@{-}[dl]_s\ar@{-}[dr]^t&\\
1000&&1001&&1010&&1011&&1100&&1101&&1110&&1111
}
\end{equation*}
\begin{comment}
%Connector labeling isn't working properly in my system

\begin{center}
  \pstree{\TR{$1$}}
         {\pstree{\TR{$10$}^{$s$}}
                 {\pstree{\TR{$100$}}
                         {\TR{$1000$}
                          \TR{$1001$}
                         }
                  \pstree{\TR{$101$}}
                         {\TR{$1010$}
                          \TR{$1011$}
                         }
                 }
          \pstree{\TR{$11$}}
                 {\pstree{\TR{$110$}}
                         {\TR{$1100$}
                          \TR{$1101$}
                         }
                  \pstree{\TR{$111$}}
                         {\TR{$1110$}
                          \TR{$1111$}
                         }
                 }
         }
\end{center}


\end{comment}
This structure admits induction, since every numeral can be obtained
from $1$ by application of $s^{\str A}$ and $t^{\str A}$; it admits
recursion, since every numeral is so obtained in a unique way.

\begin{theorem}[Recursion]\label{thm:rec-alg}%
\index{theorem!Recursion Th---}%
\index{recurs!R---ion Theorem}
An algebra admits recursion, provided:
\begin{enumerate}
  \item
it admits induction, 
\item
its basic operations are injections, and 
\item
the ranges of these operations (including the nullary operations) are
pairwise disjoint. 
\end{enumerate}
\end{theorem}

\begin{proof}
  Suppose $\str A$ is an algebra, in a signature $\lang$, meeting the
  given conditions.  Let $\str B$ be another $\lang$-structure.  Let
  $\class C$ be the set of all relations $D$ from $A$ to $B$ such
  that, if $a\mathrel Db$, then either $a=c^{\str A}$ and $b=c^{\str
  B}$ for some constant $c$ in $\lang$, or $a=f^{\str
  A}(d_0,\dots,d_{n-1})$ and $b=f^{\str B}(e_0,\dots,e_{n-1})$ for
  some $n$-ary function-symbol in $\lang$, for some positive $n$, where
  $d_i\mathrel De_i$ for each $i$ in $n$.  Then $\bigcup\class C$ is a
  homomorphism from $\str A$ to $\str B$ (\exercise); it is unique by
  induction (\exercise).
\end{proof}

The converse to this theorem, in the signature $\{1,\scr\}$, is
Theorem~\ref{thm:isom-to-N}.  The proof has three parts:
\begin{inparaenum}[(1)]
  \item
all algebras in the signature that admit recursion are isomorphic;
\item
there is a particular algebra, namely $(\N,1,\scr)$, that admits
recursion \emph{and} satisfies the hypotheses of
Theorem~\ref{thm:rec-alg};   
\item
therefore all algebras in the signature that admit recursion must
satisfy the hypotheses. 
\end{inparaenum}
To follow this line of argument in arbitrary algebraic signatures, we
need to find, in every such signature, an example of an algebra that
admits recursion.  A first step in this direction is the following.

\begin{theorem}[Induction]\label{thm:alg-ind}%
\index{theorem!Induction Th---}%
\index{inducti!I---on Theorem}
  Every algebra has a unique subalgebra that admits induction.
\end{theorem}

\begin{proof}
Let $\str A$ be an algebra in a signature $\lang$.
The set of subalgebras of $\str A$ is nonempty and ordered by the
substructure-relation $\included$.  If $\class C$ is a set of
subalgebras of $\str A$, then $\bigcap\class C$ is also a subalgebra
of $\str A$ (\exercise).  Therefore the intersection of the
set of all subalgebras of $\str A$ is a subalgebra $\str B$ of $\str
A$.  Then $\str B$ has no proper subalgebras, so it admits induction;
and it is a subalgebra of every subalgebra of $\str A$, so it is the
only subalgebra of $\str A$ that admits induction.
\end{proof}

The subalgebra $\str B$ found in the proof can be understood as given
by the following 
\defnplain{recursive definition}:%
\index{recurs!---ive definition}
\begin{enumerate}
  \item
$c^{\str A}\in B$ whenever $c$ is a constant in $\lang$;
\item
for all positive $n$, for all $n$-ary function-symbols $f$ in $\lang$,
if $\tuple a\in B^n$, then $f^{\str A}(\tuple a)\in B$.
\end{enumerate}

For every algebraic signature $\lang$, there is an algebra $\str A$ in
$\lang$ whose universe $A$ is
the set of all strings of symbols from $\lang$, and where
\begin{enumerate}
  \item
$c^{\str A}$ is just $c$, when $c$ is a constant in $\lang$;
\item
when $f$ is an $n$-ary function-symbol of $\lang$, then $f^{\str A}$
is the function that, from an $n$-tuple $(\sA_0,\dots,\sA_{n-1})$ of
strings in $A$, constructs the string $f\sA_0\dotsb\sA_{n-1}$.
\end{enumerate}
Let the least subalgebra of $\str A$ be denoted by
\begin{equation*}
  \Tmol;
\end{equation*}
this is the algebra of
\defnplain{constant term}s%
\index{constant!--- term}%
\index{term!constant ---}
of $\lang$.

An
\defnplain{initial segment}{}%
\index{initial!--- segment}%
\index{segment!initial ---}
of a string is a string obtained by deleting some (or no) entries on
the right.  The initial segment is 
\defnplain{proper}{}%
\index{proper!--- initial segment}
%\index{initial!proper --- segment}%
%\index{segment!proper initial ---}
if it results from deleting at least one entry.

\begin{lemma}\label{lem:pis-alg}
  No proper initial segment of an element of $\Tmol$ is an
  element of $\Tmol$.
\end{lemma}

\begin{proof}
  We prove by induction that every element of $\Tmol$ neither \emph{is}
  a proper initial segment of another element, nor \emph{has} another
  element as a proper initial segment.  This is true for all
  constants in $\lang$, since all other terms start with
  function-symbols that are not constants.
  Suppose the claim is true for terms $t_0$, \dots,
  $t_{n-1}$, and $f$ is an $n$-ary function-symbol in $\lang$.
  Suppose the term $ft_0\dotsb t_{n-1}$ is a proper initial segment of some
  other term.  This term must take the form $gu_0\dotsb u_{m-1}$.
  Then $g$ is $f$, and there is some $k$ such that either $t_k$ is a
  proper initial
  segment of $u_k$, or the other way around.  Either way contradicts
  the inductive hypothesis.  There is a similar contradiction if some
  proper initial segment of $ft_0\dotsb t_{n-1}$ is a term.
\end{proof}

\begin{theorem}\label{thm:tmol}
  $\Tmol$ admits recursion.
\end{theorem}

\begin{proof}
Use the Recursion Theorem (\ref{thm:rec-alg}).
  By construction, $\Tmol$ admits induction.  Its basic operations are
  injective, since if $ft_0\dotsb t_{n-1}$ is the
  same term as $fu_0\dotsb u_{n-1}$, then each $t_i$ must be the same
  as $u_i$, by the last lemma.  The basic operations have disjoint
  images, since elements of the image of $f$ all start with $f$.
\end{proof}

The converse of the Recursion Theorem (\ref{thm:rec-alg}) now follows
in the manner suggested.  Moreover, another method of proving the
Theorem itself now arises:  If $\str A$ and $\str B$ are algebras
with the same signature, then the product algebra $\str A\times\str B$
can be defined in the obvious way.  If $\str C$ is the subalgebra of
this that admits induction, and if $\str A$ meets the conditions of the
Recursion Theorem, then $C$ is just a homomorphism from $\str
A$ to $\str B$.


If
$\str A$ is an algebra with signature $\lang$, then the interpretation
$c\mapsto c^{\str A}$ in $\str A$ of the constants in $\lang$ extends
recursively to a function $t\mapsto t^{\str A}$ from $\Tmol$ into $A$,
once we require
\begin{equation*}
  ft_0\dotsb t_{n-1}{}^{\str A}=f^{\str A}(t_0{}^{\str
  A},\dots,t_{n-1}{}^{\str A}).
\end{equation*}
\begin{comment}



For any algebra $\str A$ in $\lang$, we can understand the elements of
$A$ as new constants.  The result of adding these to $\lang$ is the
signature $\lang(A)$.  Then we can \defn{expand}{} $\str A$ to the
$\lang(A)$-structure $\str A_A$, in which the elements of $A$ are
interpreted as themselves.




\end{comment}

We now introduce a set  $\{\varble_k\colon k\in\vnn\}$ of new symbols,
to be called
\defnplain{(individual) variable}{s.}%
\index{individual variable}%
\index{variable!individual ---} 
We may add some or all of these to $\lang$ as new constants.
 For
$\Tm{0}{\lang\cup\{\varble_0,\dots,\varble_{n-1}\}}$, we write
\begin{equation*}
  \Tm n{\lang};
\end{equation*}
this is the set of 
\defnplain{$n$-ary term}s%
\index{ary!$n$-{}--- term}%
\index{term!$n$-ary ---}
of $\lang$.  The union of these sets is $\Tm{}{\lang}$.  If $t$ is an
$n$-ary term, and $\tuple a$ is an $n$-tuple from an $\lang$-structure
$\str A$, then we recursively obtain an element $t^{\str A}(\tuple a)$
of $A$ as follows:
\begin{enumerate}
\item
  $\varble_k{}^{\str A}(\tuple a)=a_k$;
\item
$c^{\str A}(\tuple a)=c^{\str A}$;
\item
$(ft_0\dotsb t_{n-1})^{\str A}(\tuple a)=f^{\str A}(t_0{}^{\str
  A}(\tuple a), \dots,t_{k-1}{}^{\str A}(\tuple a))$.
\end{enumerate}
We shall see a special case of the function $t\mapsto t^{\str
  A}(\tuple a)$ in \S\ref{sect:logical} and then develop it more
  generally in \S\ref{sect:terms}.


\section{Propositional logic}
\label{sect:prop}

%This section reviews propositional logic; but the subject will be
%treated more deeply in Chapter~\ref{ch:prop}. 

The function-symbols in the signature of a truth-structure can be
called 
\defnplain{propositional connective}{s.}%
\index{connective!propositional, Boolean ---}%
\index{proposition!---al connective}%
\footnote{Alternatively, they
  are \techplain{Boolean connectives}.}  
Possibilities include
\begin{enumerate}%\setcounter{enumi}{-1}
\item
the nullary connectives $0$ and $1$;
\item
the singulary connective $\lnot$;
\item
the binary connectives
$\land$, $\lor$, $\lto$, $\iff$, and $\eor$.
\end{enumerate}
Each of these has a standard interpretation as an operation on $\B$.
The interpretations of connectives with positive arity can be given by 
\defnplain{truth}{-tables:}%
\index{truth!---{}-table}%
\index{table!truth-{}---}
\begin{center}
\hfill
$  \begin{array}{c||c}
    P&\lnot P\\\hline
0&1\\
1&0
  \end{array}$
\hfill
$  \begin{array}
    {c|c||c|c|c|c|c}
    P & Q & P\land Q & P\lor Q &
    P\lto Q & P\iff Q & P\eor Q \\\hline
0&0&0&0&1&1&0\\
1&0&0&1&0&0&1\\
0&1&0&1&1&0&1\\
1&1&1&1&1&1&0
  \end{array}$
\hfill\mbox{}
\end{center}
There is an alternative approach to truth-structures.
We can first understand $\B$
as the two-element field $\F_2$, with the following addition- and
multiplication-tables.
\begin{center}
\hfill
  \begin{tabular}{c | cc}
$+$ & $0$ & $1$ \\ \hline
$0$ & $0$ & $1$ \\
$1$ & $1$ & $0$
  \end{tabular}\hfill
  \begin{tabular}{c | cc}
$\cdot$ & $0$ & $1$ \\ \hline
$0$ & $0$ & $0$ \\
$1$ & $0$ & $1$
  \end{tabular}\hfill\mbox{}
\end{center}
Then 
$\eor$ is another symbol for addition on this field; 
$\land$ is another symbol for multiplication; and
the remaining connectives are as follows.
\begin{center}
  \begin{tabular}{c|c}
symbol & interpretation\\\hline
$\lnot$ & $x\mapsto x+1$\\
$\lor$ & $(x,y)\mapsto x\cdot y+x+y$\\
$\lto$ & $(x,y)\mapsto x\cdot y+x+1$\\
$\iff$ & $(x,y)\mapsto x+y+1$
  \end{tabular}
\end{center}

As mentioned above (p.~\pageref{truth}), propositional logic is the
study of truth-structures.  A \emph{particular}
propositional logic, or a 
\defnplain{propositional calculus},%
\index{calculus!propositional ---}%
\index{proposition!---al calculus}
consists of:
\begin{enumerate}
  \item
for each $n$ in $\vnn$,  a set of strings called $n$-ary
\defnplain{(propositional) formula}{s;}%
\index{proposition!---al formula}%
\index{formula!propositional ---}
\item
a function $\sF\mapsto\named{\sF}$ that converts each $n$-ary
propositional formula
into an $n$-ary operation on $\B$;
\item
a set of formulas called \defn{axiom}{s;}
\item
some operations, called \defnplain{rules of inference},%
\index{rule of inference} on the set of all
formulas. 
\end{enumerate}
The axioms and rules of inference together constitute a
\defnplain{proof system}.%
\index{proof!--- system}%
\index{system!proof ---}
Desirable features of a propositional logic include the following.

The set of formulas should be defined 
\techplain{recursive}{ly,}%
\index{recurs!---ively defined set}
so that it
admits proof by 
\techplain{induction}.%
\index{inducti!---on, proof by ---on}  Moreover, it should admit recursion
itself.  One way to achieve this, as shown in the previous section, is
to let formulas be \tech{term}s in the sense defined there; but there
are alternatives.

The function $\sF\mapsto\named{\sF}$, the set of axioms, and the rules
of inference should then be recursively defined.

For every $n$, for every $n$-ary operation $g$ on $\B$, there should
be some $k$ and some $(n+k)$-ary formula $\sF$ such that
\begin{equation*}
  \named{\sF}(\tuple x,\tuple y)=g(\tuple x).
\end{equation*}
In particular, if $i<n$, then the \defn{projection}{} $\tuple x\mapsto
x_i$ from $\B^n$ to $\B$ will be $\named{\sF}$ for some formula $\sF$; most
naturally, this formula is just $P_i$, a
\defnplain{propositional variable}.%
\index{proposition!---al variable}%
\index{variable!propositional ---}  (This is a special case of
the individual variables introduced in the last section.)  

There will be propositional connectives, as mentioned above.  These
need not be formulas by themselves; but if $*$ is an $n$-ary
connective, then there is a corresponding interpretation $\named*$ as
an $n$-ary operation on $\B$.  If $\sF_0$, \dots, $\sF_{n-1}$ are
$m$-ary formulas, then there should be an $m$-ary formula $\sG$ such
that
\begin{equation*}
  \named{\sG}=\named*\circ(\named{\sF}_0,\dots,\named{\sF}_{n-1}).
\end{equation*}
This $\sG$ will presumably be some string in which $*$ appears as an
entry, and in which the $\sF_i$ appear as substrings.

If a substring of a formula is also a formula, and it is replaced by
another formula, then the result should still be a formula, and that
in a `natural' way.

If $\named{\sF}$ is constantly $1$, then $\sF$ is a 
\defnplain{tautology}.%
\index{tautolog!---y}
It is desirable that all axioms be tautologies, and that the set of
tautologies be closed under the rules of inference.  Moreover, it is
desirable that the set of tautologies be \emph{least} with these
properties.  Then the set of tautologies will be recursively defined
and so admit induction. 

In general, \tech{logic}{} begins as a way to understand ordinary
language and to
make it precise.  Propositional connectives correspond to
conjunctions and other `structural' words like \Eng{and, or, not,} and
\Eng{if\dots then.} 
For example, we interpret the connectives
$\lnot$ and $\lto$ as in the truth-tables above,
because:
\begin{enumerate}
  \item
we think\footnote{It is possible to think the other way, where
  $0$ is truth and $1$ is falsity; this is done, for example, in
  \cite[Ch.~4, Exercise~3.7, p.~178]{MR83e:04002}.} of $0$ as falsity
  and $1$ as truth;
\item
we take $\lnot$ to stand for a word like \Eng{not} that \emph{negates}
sentences, and we take $\lto$ to stand for the locution \Eng{if\dots then;}
\item
in our mathematical writing at any rate, 
\begin{enumerate}
  \item
a claim will be true
if and only if its negation is false, and 
\item
an implication
\Eng{If $A$, then $B$} will be false if and only if $A$ is true, but
$B$ is false.
\end{enumerate}
\end{enumerate}
The function $\sF\mapsto\named{\sF}$ assigns a `meaning' to formulas.
Hence anything to do with this function can be called \defn{semantic}.
By contrast, a proof system is
\defn{syntactic}, involving formulas only as strings.
(The etymologies of \emph{semantic} and \emph{syntactic} are discussed
in Appendix~\ref{app:synt}.)
Gottlob Frege is credited with the first proof system.
A bit of his peculiar notation 
(discussed in Appendix~\ref{app:Frege}) survives:  If $\sF$ is an
axiom or can be obtained from the axioms by (possibly repeated)
application of the rules of inference, then we write
\begin{equation*}
  \proves\sF,
\end{equation*}
apparently borrowing from Frege's notation.
By contrast, if $\sF$ is a tautology, then we may write
\begin{equation*}
  \models\sF.
\end{equation*}
It is easy to ensure that $\proves\sF$ implies $\models\sF$.  Forty-two years
after Frege, in 1921, Emil
Post published a proof  \cite[p.~169]{Post} that there are
proof systems in which
$\proves\sF$ if and only if $\models\sF$.

\section*{Exercises}
%\addcontentsline{toc}{section}{Exercises}

\begin{xca}
  Suppose $\str A$ is an inductive structure, and $\str B$ is another
  structure in the signature $\{1,\scr\}$, where $1^{\str A}=1^{\str
  B}$, and the two functions $\scr^{\str A}$ and $\scr^{\str B}$ agree on
  the intersection $A\cap B$ of their domains (that is, $\scr^{\str
  A}\restriction{A\cap B}=\scr^{\str B}\restriction{A\cap B}$).  Prove
  that $A\included B$. 
\end{xca}

\begin{xca}
  Prove Theorem~\ref{thm:recursion-in-N} by obtaining $f$ as an
  \emph{intersection} of relations from $A$ to $B$.
\end{xca}

\begin{xca}
\mbox{}
\begin{enumerate}
\item
  Find a relational structure $(A,<)$ that admits induction, although
  $<$ is not transitive.
\item
  Prove Lemma~\ref{lem:trans-->strict}.
\item
  Find a \emph{partial} order that admits induction.
\end{enumerate}
\end{xca}

\begin{xca}
  Prove Lemma~\ref{lem:1<=n}.
\end{xca}

\begin{xca}
  Prove Theorem~\ref{thm:wf-->ind}.
\end{xca}

\begin{xca}
  Prove that $\N$ as defined in \S\ref{sect:more-blocks} is indeed a
  model of the Peano axioms.
\end{xca}

\begin{xca}
  Prove that $\vnn$ is a
  model of the Peano axioms.
\end{xca}

\begin{xca}
Supply all missing details in \S\ref{sect:ordinals}.
\end{xca}

\begin{xca}
  Verify that the definition of isomorphism given in
  \S\ref{sect:ordinals} for relational structures agrees with that
  given in \S\ref{sect:structures} for arbitrary structures.
\end{xca}

\begin{xca}
  Supply the missing details in the proof of Theorem~\ref{thm:rec-alg}.
\end{xca}

\begin{xca}
  Prove that the intersection of a set of subalgebras of an algebra
  is a subalgebra.
\end{xca}

\begin{xca}
  Fill in the details of the alternative proof of the Recursion
  Theorem (\ref{thm:rec-alg}) mentioned after Theorem~\ref{thm:tmol}.
\end{xca}

%\input{prop}
\chapter{Propositional model theory}
\label{ch:prop}

%\setcounter{section}{-1}
\section{Propositional formulas}\label{sect:prop-form}

This chapter presents a kind of model theory of propositional logic.
It is inspired in part by Chang and Kiesler
\cite[\S~1.2]{MR0409165}, who describe the subject as `\,``toy'' model
theory'.  
 In this toy model theory, the role of structures will by played by
\techplain{truth}{-assignments.}%  
\index{truth!---{}-assignment}
These will provide interpretations for 
propositional formulas and will serve as models for sets of
propositional formulas. 

Until \S\ref{sect:prop-sigs}, the official signature for our
propositional logic will be 
$\{\lnot,\lto\}$.  Our
\defnplain{propositional variable}s%
\index{proposition!---al variable}%
\index{variable!propositional ---}
 will compose the set $\{P_0,P_1,\dots\}$, or
$\{P_n\colon n\in\vnn\}$; we may also denote this set by
\begin{equation*}
  \PVar.
\end{equation*}
To denote arbitrary members of $\PVar$, we may use the boldface letters
  $\sP$, $\sQ$, and $\sR$.  These are in boldface as a reminder that
  they are not \emph{themselves} propositional variables.\footnote{In
  technical terms, they are
\techplain{syntactical variable}{s.}%
\index{syntactic!---al variable}
That is, they are certain symbols of
  the \tech{syntax language}.  This is the language---English, with
  some extra symbols---that we are using now.  We are using this
  syntax language
  to talk about the \tech{object language,} which in this case is the
  language of propositional logic.  See \cite[\S~8]{MR18:631a}.}
The set of
\defnplain{propositional formula}s%
\index{proposition!---al formula}%
\index{formula!propositional ---}
will be called
\begin{equation*}
\PFm.
\end{equation*}
We give this a recursive definition:
\begin{enumerate}
  \item
Every propositional variable belongs to $\PFm$;
\item
if $\sA$ belongs to $\PFm$, then so does $\lnot\sA$;
\item
if $\sA$ and $\sB$ belong to $\PFm$, then so does $(\sA\lto\sB)$.
\end{enumerate}
So the propositional formulas are among the strings, each of whose
entries is
\begin{enumerate}
  \item
an element of the set $\PVar$ of variables, or 
\item
one of the connectives $\lnot$ or $\lto$, or
\item
one of the parentheses $($ or $)$.
\end{enumerate}
We may refer to an arbitrary such string by 
$\sA$ or $\sB$, as we did in the definition of $\PFm$; we may refer to
a \emph{formula} by $\sF$, $\sG$, $\sH$, or $\sK$. 

A formula obtained as $\lnot\sF$ is a \defn{negation}; as
$(\sF\lto\sG)$, an \defn{implication}.
So negations begin with the negation-sign $\lnot$; implications,
with the left parenthesis $($.  Every other formula is simply a
variable.

We may also refer to the
\emph{operations} of forming $\lnot\sA$ from
$\sA$, and $(\sA\lto\sB)$ from $(\sA,\sB)$, as negation and
implication respectively.  We may denote the operation of negation
by $\ngt$; of 
implication, by $\imp$.  Then $(\PFm,P_0,P_1,P_2,\dots,\ngt,\imp)$ is
the algebra admitting induction whose existence is guaranteed by the
Induction Theorem~(\ref{thm:alg-ind}).

By induction one can prove for example that every propositional
formula has the same number of left as right parentheses.  A somewhat
more interesting induction will prove Theorem~\ref{thm:co-domain} below. 

Suppose $g\colon V\to\PFm$.\label{substitution}  We can use this as a
  basis for substituting formulas for
  variables in a formula.  Indeed, suppose $\sF$ is an 
\defnplain{$n$-ary formula},%
\index{ary!$n$-{}--- formula}%
\index{formula!$n$-ary ---}
so that its variables appear in the list
  $(P_0,\dots,P_{n-1})$.  Then we can denote $\sF$ more precisely by
\begin{equation*}
  \sF(P_0,\dots,P_{n-1}).
\end{equation*}
Suppose $g(P_k)$ is $\sG_k$ for each $k$ in $\vnn$.  
If we go through $\sF$ entry by entry, replacing each variable $P_k$
with the formula $\sG_k$, then the resulting string can be denoted by
\begin{equation*}
  \sF(\sG_0,\dots,\sG_{n-1}),
\end{equation*}
or simply by
\begin{equation*}
  \sF(g).
\end{equation*}
This is the result of 
\defnplain{substitution with respect to $g$}.%
\index{substitution!--- with respect to $g$}
If exactly $m$ entries in $\sF$ are variables, so that $\sF$ can be
written as
\begin{equation*}
  \dots P_{k_0}\dots P_{k_1}\dots\dotsb\dots P_{k_{m-1}}\dots,
\end{equation*}
then the formula $\sF(g)$ is
\begin{equation*}
  \dots \sG_{k_0}\dots \sG_{k_1}\dots\dotsb\dots \sG_{k_{m-1}}\dots
\end{equation*}

\begin{theorem}\label{thm:co-domain}
  If $g\colon V\to\PFm$, and $\sF$ is in $\PFm$, then so is $\sF(g)$.
\end{theorem}

\begin{proof}
By induction on formulas, we prove that the set of formulas $\sF$ such
that $\sF(g)$ is a formula is all of $\PFm$.
\begin{enumerate}
  \item
If $\sP$ is a variable, then $g(\sP)$ is assumed to be a formula; but
$\sP(g)$ is $g(\sP)$, so $\sP(g)$ is in $\PFm$.
\item
Suppose $\sF(g)$ is a formula $\sH$.  Then substitution with respect
to $g$ in $\lnot\sF$ results in
$\lnot\sH$, which is in $\PFm$ by its definition.
\item
Suppose $\sF(g)$ and $\sG(g)$ are formulas $\sH$ and $\sH'$
respectively.  Then $(\sF\lto\sG)(g)$ is 
$(\sH\lto\sH')$, which again is in $\PFm$ by definition.
\end{enumerate}
This completes the induction and the proof.
\end{proof}

If the foregoing discussion of substitution seems too informal or
imprecise, let it be noted that the operation $\sF\mapsto\sF(g)$ can be
defined 
\techplain{recursive}{ly,}%
\index{recurs!---ively defined function}
by means of Theorem~\ref{thm:recursion}
below.  However, substitution makes sense for sets of strings that do
\emph{not} admit recursion or even induction.

\section{Recursion}\label{sect:recursion}

More is true than that $\PFm$ is defined recursively, so that it
admits proof by induction.  Every propositional 
formula carries the history of its construction, which can be
displayed in a \tech{tree}{} whose `trunk' or `root' is the formula, and
whose `leaves' are variables.
For example, the formula\label{example:tree}
$(P_0\lto(\lnot P_0\lto P_1))$ can be analyzed in the following way,
up\footnote{The English \emph{analyze} is from the Greek
  \Gk{>an'alusic}, which literally means a freeing \emph{up.}}
to the variables.
  \begin{equation*}
  \xymatrix@!0{
&& *+[F]{P_0} \ar@{-}[dr]&&&\\
&&&*+[F]{\lnot P_0} \ar@{-}[dr] && *+[F]{P_1} \ar@{-}[dl]\\
*+[F]{P_0} \ar@{-}[dr] &&&& *+[F]{(\lnot P_0\lto P_1)} \ar@{-}[dlll] &\\
& *+[F]{(P_0\lto(\lnot P_0\lto P_1))} &&&&
}
\end{equation*}
We can also draw this picture upside down, showing the formula as
built up from the variables.
\begin{equation*}
  \xymatrix@!0{
& *+[F]{(P_0\lto(\lnot P_0\lto P_1))} &&&&\\
&&&& *+[F]{(\lnot P_0\lto P_1)} \ar@{-}[ulll] &\\
&&&*+[F]{\lnot P_0} \ar@{-}[ur] &&&\\
*+[F]{P_0} \ar@{-}[uuur]&& 
*+[F]{P_0} \ar@{-}[ur]&&& *+[F]{P_1} \ar@{-}[uul]\\ 
}
\end{equation*}
By the formal definition, a \defn{tree}{}\label{tree} is an order $(T,<)$ such
  that, for each element of $T$, the set of predecessors of that
  element is well-ordered by $<$.  In the tree above, we have
  $\sF<\sG$ just in case
  $\sG$ is a \tech{subformula}{} of $\sF$.
Trees can be drawn for all formulas, because of
Lemma~\ref{lem:UR} below.  First, an analogue of
  Lemma~\ref{lem:pis-alg} is


\begin{lemma}\label{lem:pis}
  No proper initial segment of a propositional formula is a formula.
\end{lemma}

\begin{proof}
  We prove by induction that every formula neither \emph{has} a proper
  initial segment that is a formula, nor \emph{is} itself a proper initial
  segment of another formula.

By definition of $\PFm$, as we noted, every formula that is not just a
variable starts with $\lnot$ or $($.  So our claim holds for variables.

Suppose the claim holds for $\sF$.  Then it holds for $\lnot\sF$.
Indeed, if $\lnot\sF$ has an initial segment that is a formula,
then we can write this formula as $\lnot\sH$, where $\sH$ is a
formula.  But $\sH$ is an
initial segment of $\sF$, so by inductive hypothesis it must be $\sF$
itself.  Similarly, 
$\lnot\sF$ is not a proper initial segment of another formula.

Finally, suppose the claim holds for $\sF$ and $\sG$.  Then it holds
for $(\sF\lto\sG)$.  Indeed, suppose this has an initial segment that
is a formula; then we can write this formula as $(\sH\lto\sK)$ for
some formulas $\sH$ and $\sK$.  But then $\sH$ is an initial segment
of $\sF$, or the other way around.  Therefore $\sH$ is $\sF$.  Hence
$\sK$ is an initial segment of $\sG$, or the other way around, so
$\sK$ is $\sG$.  Similar considerations apply if $(\sF\lto\sG)$ is an
initial segment of a formula.
\end{proof}

\begin{lemma}[Unique Readability]\label{lem:UR}
The operations $\ngt$ and $\imp$ on $\PFm$ are injective, and their
images are disjoint from each other and from $\PVar$.
\end{lemma}

\begin{proof}
  We already know that the images are disjoint since negations start
  with $\lnot$; 
  implications, $($.  The operation of negation is injective, since if
  $\lnot\sF$ and $\lnot\sG$ are the same formula, then so are $\sF$
  and $\sG$.  Finally, if $(\sF\lto\sG)$ and $(\sH\lto\sK)$ are the
  same formula, then $\sF$ is an initial segment of $\sH$, or the
  other way around, so $\sF$ and $\sH$ are the same by
  Lemma~\ref{lem:pis}, and hence so are 
  $\sG$ and $\sK$; thus implication is injective.
\end{proof}

Hence an implication takes the form $(\sF\lto\sG)$ for some
\emph{unique} formulas $\sF$ and $\sG$.  We may refer to $\sF$ as the
\defn{antecedent}; $\sG$, the \defn{consequent}; of the
implication.  By Theorem~\ref{thm:rec-alg}, we conclude

\begin{theorem}[Recursion]\label{thm:recursion}%
\index{theorem!Recursion Th---}%
\index{recurs!R---ion Theorem}
  Suppose $A$ is a set, and
  \begin{enumerate}
    \item
  $h_0\colon \PVar\to A$,
\item
$g_1$ is a singulary operation on $A$, and 
\item
$g_2$ a binary operation on $A$.  
  \end{enumerate}
Then there is a unique function $h$ on
  $\PFm$ such that 
  \begin{enumerate}
    \item
$h$ agrees with $h_0$ on $\PVar$;
\item
$h(\lnot\sF)=g_1(h(\sF))$, for all $\sF$;
\item
$h((\sF\lto\sG))=g_2(h(\sF),h(\sG))$, for all $\sF$ and
  $\sG$.\hfill\qedsymbol 
  \end{enumerate}
\end{theorem}

%\setcounter{corollary}0
\begin{corollary*}%\label{por:recursion}
  Suppose
  \begin{enumerate}
    \item
  $h_0\colon \PVar\to A$, 
\item
$g_1\colon \PFm\times A\to A$, and
\item
 $g_2\colon (\PFm\times A)^2\to A$.  
  \end{enumerate}
Then there is a unique function $h$ on
  $\PFm$ such that 
  \begin{enumerate}
    \item
$h$ agrees with $h_0$ on $\PVar$;
\item
$h(\lnot\sF)=g_1(\sF,h(\sF))$ for all $\sF$;
\item
$h((\sF\lto\sG))=g_2((\sF,h(\sF)),(\sG,h(\sG)))$ for all
 $\sF$ and $\sG$.\hfill\qedsymbol
  \end{enumerate}
\end{corollary*}

We used Unique Readability (Lemma~\ref{lem:UR}) to obtain the
Recursion Theorem (\ref{thm:recursion}) and its corollary.
Conversely, Unique Readability follows from Recursion by the general
method given in \S\ref{sect:algebras}.  It also
follows directly from the corollary.  Indeed,
using the notation of this corollary,
let $A$ be $\PFm$, let $h_0$ and $g_1$ be chosen arbitrarily,
and let $g_2$ be 
\begin{equation*}
  ((\sF,\sF'),(\sG,\sG'))\longmapsto\sF.
\end{equation*}
Let $h$ be the function guaranteed by the corollary.  Then
$h((\sF\lto\sG))=\sF$.  Thus $h$ selects, from an
implication, its antecedent.  Since $h$ is a function,
the antecedent must be unique.  Similarly for the consequent.

Note well that the Recursion Theorem is \emph{not} a consequence of
the Induction Theorem alone.  For example,
suppose we define $\PFm$ without using parentheses.  We shall still be
  able to use induction, but if we are not careful, we shall not have
  definitions by recursion.
  Indeed, say we define $\mathrm{nPF}$ (for `not $\PFm$') so that:
  \begin{enumerate}
    \item
each variable is in $\mathrm{nPF}$;
\item
if $\sA$ is in $\mathrm{nPF}$, then so is $\lnot\sA$;
\item
if $\sA$ and $\sB$ are in $\mathrm{nPF}$, then so is
$\sA\lto\sB$.
  \end{enumerate}
Then proof by induction in $\mathrm{nPF}$ is possible.  However, suppose we
try to define by recursion a function~$f$ from $\mathrm{nPF}$ into
$\PFm$ so as to 
send every element
of the former to its `equivalent' in the latter:
\begin{enumerate}
  \item
$f(\sP)=\sP$;
\item
$f(\lnot\sF)=\lnot f(\sF)$;
\item
$f(\sF\lto\sG)=(f(\sF)\lto f(\sG))$.
\end{enumerate}
This \emph{fails} as the definition of a function, since it implies
that $f(P_0\lto P_1\lto P_2)$ must be both $(P_0\lto(P_1\lto P_2))$
and $((P_0\lto P_1)\lto P_2)$, even though these are different
formulas. 

\section{Notation}\label{sect:notation}

A correct way to avoid using parentheses is to use 
\techplain{\L ukasiewicz}{}%
\index{Lukasiewicz@\L ukasiewicz notation}%
\index{notation!Polish or \L ukasiewicz ---} 
or 
\tech{Polish notation},
%\index{Polish!--- notation}
writing
$\mathord{\lto}\;\sF\;\sG$ 
instead of $(\sF\lto\sG)$.  This is just the notation used for terms
in \S\ref{sect:algebras}.  
Alternatively, without changing the order of symbols, we can remove
\emph{some} parentheses from the formulas in $\PFm$, obtaining a set
$\PFm'$ of formulas that still admits recursion.  To be precise,
every formula in $\PFm'$ will be a variable, a negation, or an
implication.  Then the recursive definition of $\PFm'$ is as follows.
\begin{enumerate}
\item
$\PVar$ is the set of variables in $\PFm'$.
  \item
If $\sA$ is a variable or a negation in $\PFm'$, then $\lnot \sA$ is a
negation in $\PFm'$. 
\item
If $\sA$ is an implication in $\PFm'$, then $\lnot(\sA)$ is a negation
in $\PFm$.
\item
If $\sA$ is a variable or a negation in $\PFm'$, and $\sB$ is in
$\PFm'$, then $\sA\lto \sB$ is an 
implication in $\PFm'$.
\item
If $\sA$ is an implication in $\PFm'$, and $\sB$ is in $\PFm'$, then
$(\sA)\lto \sB$ is an implication in $\PFm'$.
\end{enumerate}
Thus, no formula in $\PFm'$ is enclosed in parentheses; but an
implication must be so enclosed when it is negated or used as the
\emph{antecedent} of another implication.  It is left to the reader to
formulate and prove an analogue of the Recursion
Theorem~(\ref{thm:recursion}), so
that the following can then be proved:

\begin{theorem}\label{thm:simpler}
  There is a unique bijection $\sF\mapsto\overline{\sF}$ from $\PFm$
  to $\PFm'$ such that
  \begin{enumerate}
    \item
$\overline{\sP}=\sP$ for all variables $\sP$;
\item
$\overline{\lnot\sF}=
  \begin{cases}
    \lnot\overline{\sF},&\text{ if $\sF$ is a variable or
  negation;}\\
    \lnot(\overline{\sF}),&\text{ if $\sF$ is an
  implication;}
  \end{cases}$
\item
$\overline{(\sF\lto\sG)}=
  \begin{cases}
      \overline{\sF}\lto\overline{\sG},&\text{ if $\sF$ is
  a variable or negation;}\\
      (\overline{\sF})\lto\overline{\sG},&\text{ if $\sF$
  is an implication.}
\end{cases}$
  \end{enumerate}
The inverse of this function is a function $\sF\mapsto\underline{\sF}$ from
$\PFm'$ to $\PFm$ such that
\begin{enumerate}
  \item
$\underline{\sP}=\sP$ for all variables $\sP$;
\item
$\underline{\lnot\sF}=\lnot\underline{\sF}$;
\item
$\underline{\lnot(\sF)}=\lnot\underline{\sF}$;
\item
$\underline{\sF\lto\sG}=(\underline{\sF}\lto\underline{\sG})$; 
\item
$\underline{(\sF)\lto\sG}=(\underline{\sF}\lto\underline{\sG})$.
\end{enumerate}
\end{theorem}

\begin{proof}
In the notation of the corollary to the Recursion Theorem for
Formulas, let $A$ be the set of 
strings of the symbols in $\PVar\cup\{\lto,\lnot,(,)\}$, let $h_0$ be
the inclusion of $\PVar$ in $A$, and let
\begin{align*}
  g_1(\sF,\sA)&=
  \begin{cases}
    \lnot\sA,&\text{ if $\sF$ is a variable or negation;}\\
    \lnot(\sA),&\text{ if $\sF$ is an implication;}
  \end{cases}\\
  g_2((\sF,\sA),(\sG,\sB))&=
  \begin{cases}
    \sA\lto\sB,&\text{ if $\sF$ is a variable or negation;}\\
    (\sA)\lto\sB,&\text{ if $\sF$ is an implication.}
  \end{cases}
\end{align*}
Then the function from $\PFm$ to $\PFm'$ exists uniquely as desired.
This function is bijective, with inverse as claimed 
(details are left to the reader). 
\end{proof}

Henceforth we may use formulas $\PFm'$ to denote the corresponding
formulas in $\PFm$.  But the official formulas still belong to
$\PFm$.  This is an important point.  Substitution in formulas in
$\PFm'$ may have undesirable results.  For example, in $P_0\lto P_1$,
if we substitute $P_0\lto P_2$ for $P_0$, we get $P_0\lto P_2\lto
P_1$, which corresponds to the formula $(P_0\lto (P_2\lto
P_1))$ in $\PFm$; but this is not what we get by substituting
$(P_0\lto P_2)$ for $P_0$ in $(P_0\lto P_1)$.

\section{Truth}\label{sect:logical}

A 
\defnplain{truth}{-assignment}%
\index{truth!---{}-assignment}
is a function from $\PVar$ to $\B$.  Let
$\epsilon$ be such a function.  It determines a
substitution
$\sF\mapsto\sF(\epsilon)$ as in \S~\ref{substitution}, although $0$
and $1$
are not formulas in $\PFm$.  By recursion, $\epsilon$
uniquely determines a function $h$ on $\PFm$ as
follows.
\begin{align*}
h(\sP)&=\epsilon(\sP);\\
h(\lnot\sF)&=
\begin{cases}
  1,&\text{ if }h(\sF)=0;\\
  0,&\text{ if }h(\sF)=1;
\end{cases}\\
h(\sF\lto\sG)&=
\begin{cases}
  0,&\text{ if }h(\sF)=1\text{ and }h(\sG)=0;\\
  1,&\text{ otherwise}.
\end{cases}
\intertext{Alternatively, using the considerations in
  \S\ref{sect:prop}, we have}
  h(\lnot\sF)&=h(\sF)+1;\\
h(\sF\lto\sG)&=h(\sF)\cdot h(\sG)+h(\sG)+1.
\end{align*}
That is, $h$ is the unique homomorphism from
$(\PFm,P_0,P_1,\dots,\ngt,\imp)$ into
\begin{equation*}
(\B,\epsilon(P_0),\epsilon(P_1),\dots,x\mapsto x+1,(x,y)\mapsto
x\cdot y+y+1).
\end{equation*} 
As such, it can be compared with the function $t\mapsto t^{\str
  A}(\tuple a)$ defined in \S\ref{sect:algebras}.
We can denote the homomorphism $h$ by
\begin{equation*}
 \sF\longmapsto\named{\sF}(\epsilon).
\end{equation*}
If $\sF$ is an $n$-ary formula, then $\named{\sF}(\epsilon)$ depends only on
the $n$-tuple 
$(\epsilon(P_0),\dots,\epsilon(P_{n-1}))$.
(This is obvious, but can be
confirmed by induction on formulas.)  
Denoting this $n$-tuple more briefly by $\tuple e$, we may write
\begin{equation*}
  \named{\sF}(\tuple e)
\end{equation*}
instead of $\named{\sF}(\epsilon)$, and we may refer to $\tuple e$ as
   an 
\defnplain{$n$-ary truth-assignment}.%
\index{ary!$n$-{}--- truth-assignment}%
\index{truth!---{}-assignment}%
\index{assignment!truth-{}---}
Then the $n$-ary operation
\begin{equation*}
  \tuple e\longmapsto\named{\sF}(\tuple e),
\end{equation*}
or just $\named{\sF}$, on $\B$ is the \defn{interpretation}{} of
$\sF$.
The number $\named{\sF}(\tuple e)$ is the 
\defnplain{truth}{-value}%
\index{truth!---{}-value}
of $\sF$ with respect to $\epsilon$ or
   $\tuple e$.  In particular, $\sF$ is \defn{true in}{} $\epsilon$ (or
   $\tuple e$) if
   $\named{\sF}(\epsilon)=1$; otherwise, $\sF$ is 
\defn{false in}{}
   $\epsilon$ (or $\tuple e$).

The truth-values of an $n$-ary formula $\sF$ with respect to all
 $n$-ary truth-assignments can be given in a 
\defnplain{truth}{-table}%
\index{truth!---{}-table}
with $2^n$ rows and with one
   column for each entry in $\sF$ that is not a parenthesis.
For example, the table for $P_0\lto\lnot P_0\lto P_1$ is the
   following.
   \begin{equation*}
     \begin{array}{cccccc}
       P_0&\lto&\lnot&P_0&\lto&P_1\\\hline
0&1&1&0&0&0\\
1&1&0&1&1&0\\
1&1&0&1&1&0\\
1&1&0&1&1&1
     \end{array}
   \end{equation*}
In general,
we can think of the rows as indexed by the
   numbers less than $2^n$, written in binary notation.  Indeed, let
   us define the elements $e_i^{(k)}$ of $\B$, where $i<n$ and $k<2^n$, by
   \begin{equation*}
     k=\sum_{i=0}^{n-1}e_i^{(k)}\cdot2^i.
   \end{equation*}
Then row $k$ of the truth-table corresponds to the 
   truth-assignment $(e_0^{(k)},\dots,e_{n-1}^{(k)})$.  The corresponding
   truth-value for $\lnot\sF$ will be found in the column indexed by
   $\lnot$; for $\sF\lto\sG$, by $\lto$. 
The truth-table for $P_i$ is
   \begin{equation*}
     \begin{array}{c}
       e_i^{(0)}\\
\vdots\\
e_i^{(2^n-1)}
     \end{array}
   \end{equation*}
The truth-table for $\lnot\sF$ is $S\;T$, where $T$ is the table for
$\sF$, and, assuming the column of $T$ giving the values of $\sF$ is
   \begin{equation*}
     \begin{array}{c}
       f_0\\
\vdots\\
f_{2^n-1}
     \end{array}
   \end{equation*}
then $S$ is the column
   \begin{equation*}
     \begin{array}{c}
       f_0+1\\
\vdots\\
f_{2^n-1}+1
     \end{array}
   \end{equation*}
Similarly, the truth-table for $\sF\lto\sG$ takes the form
$T_0\;S\;T_1$, where $T_0$ is the table for $\sF$; $T_1$, for $\sG$; and
$S$ gives the values of $\sF\lto\sG$.


The following may seem obvious, once it is understood.

   \begin{theorem}[Associativity]\label{thm:associativity}%
\index{theorem!Associativity Th---}%
\index{Associativity Theorem}
     Suppose $\sF$ is an $n$-ary formula, and $\sH$ is a formula
     $\sF(\sG_0,\dots,\sG_{n-1})$, and $\tuple e$ and $\tuple f$ are
     truth-assignments (of appropriate arity) such that
     \begin{equation*}
       \named{\sG}_k(\tuple e)=f_k
     \end{equation*}
for each $k$ in $n$.  Then
\begin{equation*}
  \named{\sF}(\tuple f)=\named{\sH}(\tuple e).
\end{equation*}
   \end{theorem}

   \begin{proof}
     We use induction on $\sF$.  If $\sF$ is a variable, then it is
     $P_k$ for some $k$ in $n$, so $\sH$ is $\sG_k$, and
     \begin{equation*}
       \named{\sH}(\tuple e)=
       \named{\sG}_k(\tuple e)=f_k=\named{P}_k(\tuple f)=\named{\sF}(\tuple f).
     \end{equation*}
Suppose the claim is true when $\sF$ is $\sF_0$ or $\sF_1$.  If
$\sF$ is $\lnot\sF_0$, then $\sH$ is $\lnot\sH_0$, where $\sH_0$ is
$\sF_0(\sG_0,\dots,\sG_{n-1})$, so that
\begin{align*}
\named{\sH}(\tuple e)
&=1+\named{\sH}_0(\tuple e)\\
&=1+\named{\sF}_0(\tuple f)\qquad\text{[by inductive hypothesis]}\\
&=\named{\sF}(\tuple f).
   \end{align*}
The remaining case, where $\sF$ is $(\sF_0\lto\sF_1)$, is left to the
reader. 
   \end{proof}

A formula is a \defnplain{tautology}{}%
\index{tautolog!---y} if it is true in every
truth-assignment.  The Associativity Theorem immediately yields

\begin{theorem}[Substitution]%
\index{theorem!Substitution Th---}%
\index{substitution!S--- Theorem}
  If $\sF$ is an $n$-ary tautology, then $\sF(\sG_0,\dots,\sG_{n-1})$
  is a tautology. \hfill\qedsymbol
\end{theorem}

Two $n$-ary formulas $\sF$ and $\sG$ are
\defnplain{(logically) equivalent}{}%
\index{logic!---ally equivalent}%
\index{equivalent!logically ---}
if the operations $\named{\sF}$ and $\named{\sG}$ are the same.
Suppose $\sF$ appears as
a \defn{substring}{}%
\index{string!sub---}
of $\sH$, so that $\sH$ can be written as
\begin{equation*}
  \dots\sF\dots
\end{equation*}
We might expect to be able to 
\defnplain{replace}{}%
\index{replacement}
$\sF$ with $\sG$,
obtaining a new formula
\begin{equation*}
  \dots\sG\dots
\end{equation*}
or $\sH'$ such that, if $\sF$ and $\sG$ are equivalent, then so are
$\sH$ and $\sH'$.  However, this fails for $\PFm'$.  For example,
$P_0$, $P_0\lto P_0$, and $\lnot(P_0\lto P_0)$ are formulas in
$\PFm'$, but in the last, if we replace $P_0\lto P_0$ with $P_0$, we
get the non-formula $\lnot(P_0)$.

In $\PFm$, replacement does work in the obvious way.  First we define
\defn{subformula}s%
\index{formula!sub---}
recursively:  Every formula is a subformula of itself, and moreover,
\begin{enumerate}
  \item
subformulas of $\sF$ are subformulas of $\lnot\sF$;
\item
subformulas of $\sF$ or $\sG$ are subformulas of $(\sF\lto\sG)$.
\end{enumerate}

\begin{lemma}\label{lem:subformulas}
  In $\PFm$, a formula that is a substring of $\sF$ is a subformula of
  $\sF$. \hfill\qedsymbol
\end{lemma}

\begin{lemma}\label{lem:sub-->form}
  In a formula of $\PFm$, if a subformula is replaced with another
  formula, the result is a formula.
\end{lemma}

\begin{proof}
  The claim is trivially true when the original formula is a
  variable.  Suppose it is true when the original formula is $\sF$ or
  $\sG$.  Then it is true for $\lnot\sF$ and $(\sF\lto\sG)$ as well,
  since the proper subformulas of these are subformulas of $\sF$ or
  $\sG$.
\end{proof}

\begin{theorem}[Replacement]\label{thm:prop-replacement}%
\index{theorem!Replacement Th---}%
\index{replacement!R--- Theorem}
  In $\PFm$, suppose $\sF$ and $\sF'$ are equivalent, and $\sF$ is a
  subformula of $\sG$, and $\sG'$ is the result of replacing $\sF$ in
  $\sG$ with $\sF'$.  Then $\sG$ and $\sG'$ are equivalent.\hfill\qedsymbol
\end{theorem}

\section{Logical entailment}

We can think of truth as a \emph{relation}
from $\B^{\PVar}$ to $\PFm$, namely the
\defnplain{truth-relation},%
\index{truth!---{}-relation}%
\index{relation!truth-{}---}
$\models$, given by
\begin{equation}\label{eqn:models}
  \epsilon\models\sF\Iff\named{\sF}(\epsilon)=1.
\end{equation}
The complement of $\models$ can be denoted by $\nmodels$.
Hence we can express a fundamental fact as follows:

\begin{lemma}\label{lem:tf}
For all truth-assignments $\epsilon$ and formulas $\sF$,
  \begin{equation}
     \epsilon\models\sF\Iff \epsilon\nmodels\lnot\sF.
   \end{equation}
\end{lemma}

\begin{proof}
  Suppose $e\in\B$.  Then $e=1\Iff e\neq0\Iff e+1=0$.  Let $\sG$ be
  $\lnot\sF$.  Then 
  \begin{align*}
\epsilon\models\sF
&\Iff \named{\sF}(\epsilon)=1\\
&\Iff1+\named{\sF}(\epsilon)=0\\
&\Iff\named{\sG}(\epsilon)=0\\
&\Iff\named{\sG}(\epsilon)\neq1\\
&\Iff\epsilon\nmodels\sG\\
&\Iff\epsilon\nmodels\lnot\sF,
  \end{align*}
as desired.
\end{proof}
Immediately,
   \begin{equation*}
     \epsilon\nmodels\sF\Iff \epsilon\models\lnot\sF.
   \end{equation*}


From the truth-relation, we obtain three new functions, as follows.
A \defn{model}{} of a set of formulas is a truth-assignment in which
every element of the set is true.  If $\Sigma$ is a set of formulas,
let 
\begin{equation*}
  \Mod[]{\Sigma}
\end{equation*}
be the set of its models.  This is the set
\begin{equation*}
  \bigcap_{\sF\in\Sigma}\{\epsilon\in \B^{\PVar}\colon \epsilon\models\sF\}. 
\end{equation*}
We now have a function $\Sigma\mapsto\Mod[]{\Sigma}$ from $\pow{\PFm}$ to
$\pow{\B^{\PVar}}$. 
The \defn{theory}{} of a set of truth-assignments is the set of formulas
that are true in all of the truth-assignments.  If $E$ is a set of
truth-assignments, let
\begin{equation*}
  \Th E
\end{equation*}
be its theory.  This is the set
\begin{equation*}
  \bigcap_{\epsilon\in E}\{\sF\in\PFm\colon \epsilon\models\sF\}.
\end{equation*}
So we have a function $E\mapsto\Th E$ from $\pow{\B^{\PVar}}$ to
$\pow{\PFm}$. 
The 
\defnplain{logical consequence}s%
\index{logic!---al consequence} of a set of formulas are the formulas
that are true in every model of the set.  The logical
consequences of $\Sigma$ compose a set
\begin{equation*}
  \Cn[]\Sigma.
\end{equation*}
This is the set 
\begin{equation*}
\bigcap_{\epsilon\in\Mod[]\Sigma}\{\sF\in\PFm\colon
\epsilon\models\sF\},
\end{equation*}
which is
\begin{equation*}
  \Th{\Mod[]\Sigma}.
\end{equation*}
So we have a singulary operation $\Sigma\mapsto\Cn[]{\Sigma}$ on
$\pow{\PFm}$. 
If $T$ is a set of formulas that is the theory of \emph{some} set of
truth-assignments, then $T$ can be called a \defn{theory}, simply.
If $\sF$ is a logical consequence of $\Sigma$, we may say also that
$\Sigma$ 
\defnplain{logically entail}s%
\index{logic!---al entailment}
 $\sF$.  So we have several
ways\footnote{Another way might be $\Sigma\models\sF$, as suggested in
  \S\ref{sect:structures}; but this should
not be confused with the notation introduced
  in~\eqref{eqn:models}, which has a different meaning.}
 of
saying the same thing:
\begin{enumerate}
  \item
 $\sF$ is a logical consequence of $\Sigma$;
\item
$\Sigma$ logically entails $\sF$;
\item
$\sF\in\Cn[]\Sigma$.
\end{enumerate}
The logical consequences of $\emptyset$ are called 
\defnplain{tautologies};%
\index{tautolog!---y}
these are the formulas that are true in \emph{every} truth-assignment.

Note well that the definition of 
logical entailment is not recursive.  There is, at the moment, no
obvious way to prove by induction that a given set of formulas
contains all logical consequences of $\Sigma$ (or even all
tautologies). 

\begin{lemma}\label{lem:reversing}
  The functions $\Sigma\mapsto\Mod[]{\Sigma}$ and $E\mapsto\Th E$ are
  inclusion-reversing, that is,
  \begin{enumerate}
    \item
$\Sigma\included\Gamma\implies\Mod[]\Gamma\included\Mod[]\Sigma$, and
\item
$D\included E\implies\Th E\included \Th D$.
  \end{enumerate}
The operations $\Sigma\mapsto\Cn[]{\Sigma}$ and $E\mapsto\Mod[]{\Th
  E}$ are increasing, that is,
\begin{enumerate}\setcounter{enumi}2
  \item\label{item:S-conseq}
$\Sigma\included\Cn[]{\Sigma}$;
\item\label{item:E-Mod-Th}
$E\included\Mod[]{\Th E}$.\hfill\qedsymbol 
\end{enumerate}
\end{lemma}

\begin{theorem}\label{thm:closure}
A subset $\Sigma$ of $\PFm$ is a theory if and only if
\begin{equation*}
  \Sigma=\Cn[]{\Sigma}.
\end{equation*}
\end{theorem}

\begin{proof}
If $\Sigma=\Cn[]{\Sigma}$, then $\Sigma$ is the theory of
$\Mod[]{\Sigma}$.  For the converse, note from the lemma that $\Th
E\included\Cn[]{\Th E}$ by~\eqref{item:S-conseq}, but
\begin{equation*}
  \Cn[]{\Th E}=\Th{\Mod[]{\Th E}}\included\Th E
\end{equation*}
by~\eqref{item:E-Mod-Th}, so $\Th E=\Cn[]{\Th E}$.
\end{proof}

See Appendix~\ref{app:compactness} for a discussion of the functions
$\Sigma\mapsto\Mod[]{\Sigma}$ and $E\mapsto\Cn[] E$ in general
terms. 

\section{Compactness}\label{sect:compactness}

A set of formulas with a model can be called 
\defnplain{satisfiable}.%
\index{satisfi!---able}

\begin{lemma}\label{lem:sat}
$\Sigma$ logically entails $\sF$ if and only if
  $\Sigma\cup\{\lnot\sF\}$ is not satisfiable. 
\end{lemma}

\begin{proof}
Suppose $\Sigma$ does \emph{not} logically entail $\sF$.
Then $\Sigma$ has a model $\epsilon$ in which $\sF$ is false.  Hence
$\epsilon\models\lnot\sF$ by Lemma~\ref{lem:tf},  so
$\epsilon$ is a model of $\Sigma\cup\{\lnot\sF\}$.
Suppose conversely that $\Sigma\cup\{\lnot\sF\}$ has a model.  Then
$\sF$ is false in this model, again by Lemma~\ref{lem:tf}, so $\sF$ is
not a logical consequence of $\Sigma$.  
\end{proof}

A set of formulas whose every \emph{finite} subset has a model can be
called 
\defnplain{finitely satisfiable}.%
\index{finit!ely satisfiable}%
\index{satisfi!finitely ---able}

\begin{lemma}\label{lem:fin-sat}
  If $\Sigma$ is finitely satisfiable, then so is
  $\Sigma\cup\{\sF\}$ or $\Sigma\cup\{\lnot\sF\}$.
\end{lemma}

\begin{proof}
Suppose $\Sigma$ is finitely satisfiable, but $\Sigma\cup\{\sF\}$ is
  not.  Then there is a finite subset $\Gamma$ of $\Sigma$ such that
  $\Gamma\cup\{\sF\}$ has no model.  Then $\Gamma\cup\{\lnot\lnot\sF\}$
  has no model, so
  $\Gamma\models\lnot\sF$ by the last lemma.  Say $\Lambda$ is another finite
  subset of $\Sigma$.   Then $\Gamma\cup\Lambda$ is also a finite
  subset of $\Sigma$, so it has a model, and $\lnot\sF$ is true in
  each of its models.  Thus $\Lambda\cup\{\lnot\sF\}$ has a
  model, by Lemma~\ref{lem:reversing}.  Hence $\Sigma\cup\{\sF\}$ is
  finitely satisfiable.
\end{proof}


\begin{theorem}[Compactness]\label{thm:prop-compactness}%
\index{theorem!Compactness Th---}%
\index{compact!C---ness Theorem}
Every finitely satisfiable set of formulas is satisfiable.
\end{theorem}

\begin{proof}
Let $\Sigma$ be finitely satisfiable.
By recursion (in the sense of Theorem~\ref{thm:ord-ind-rec}), we first
define a function $n\mapsto\sF_n$ from 
$\vnn$ into $\PFm$.  Suppose $\{\sF_k\colon k<n\}$ has been defined.  We then
let $\sF_n$ be $P_n$, if $\Sigma\cup\{\sF_k\colon k<n\}\cup\{P_n\}$ is
finitely satisfiable; otherwise, $\sF_n$ is $\lnot P_n$.  This completes
the recursive definition.

We now observe by induction that every set $\Sigma\cup\{\sF_k\colon k<n\}$
is finitely satisfiable.  Indeed, it is true by assumption when $n=0$;
and if it is true when $n=m$, then it is true when $n=m+1$, by the
last lemma and the definition of the $\sF_k$.

Now let $\epsilon$ be the truth-assignment given by
\begin{equation}
  \epsilon(P_k)=
  \begin{cases}
    1,& \text{ if }\sF_k=P_k;\\
0,& \text{ if }\sF_k=\lnot P_k.
  \end{cases}
\end{equation}
This is a model of $\Sigma$.  Indeed, suppose $\sG\in\Sigma$.  Then
$\sG$ is $n$-ary for some $n$.  The finite set
$\{\sG\}\cup\{\sF_k\colon k<n\}$ has a
model $\zeta$.  In particular, $\zeta$ must agree with $\epsilon$ on
$\{P_k\colon k<n\}$ (why?); so $\epsilon\models\sG$.
\end{proof}

There are sets $\Sigma$ of formulas such that \emph{every} finite
subset of $\Sigma$ has a model that is not a model of $\Sigma$ itself.
For example, let $\Sigma_n$ comprise the formulas
  \begin{equation*}
P_0\lto P_1\lto\dotsb\lto P_k
  \end{equation*}
where $k<n$.  (The precise recursive definition of the sets $\Sigma_n$ is left
as an \exercise.)  So $\Sigma_0$ is empty, $\Sigma_1=\{P_0\}$, and we
have a chain
\begin{equation*}
  \Sigma_0\included\Sigma_1\included\Sigma_2\included\dotsb.
\end{equation*}
Let $\Sigma=\bigcup_{n\in\vnn}\Sigma_n$.  Then every finite subset of
$\Sigma$ is a subset of some $\Sigma_n$.  Let
$\epsilon_n$ be the truth-assignment such that
\begin{equation*}
  \epsilon_n(P_k)=1\Iff k<n.
\end{equation*}
Then $\epsilon_n$ is a model of $\Sigma_n$, but not of $\Sigma_{n+1}$
(why?), hence not of $\Sigma$.  But $\Sigma$ must have a model, by
Compactness.  In fact, $P_k\mapsto0$ is a model.


If a set $A$ is a \emph{finite} subset of a set $B$, we may denote
this by
\begin{equation*}
  A\fincluded B.
\end{equation*}\label{fincluded}
Now one consequence of the Compactness Theorem can be expressed as
follows: 
%\setcounter{corollary}0
\begin{corollary*}
  $\displaystyle\Cn[]{\Sigma}
  =\bigcup_{\Gamma\fincluded\Sigma}\Cn[]\Gamma$.  
\end{corollary*}

\begin{proof}
  By Theorem~\ref{thm:closure}, it is enough to show that
  \begin{equation*}
      \Cn[]{\Sigma}
      \included\bigcup_{\Gamma\fincluded\Sigma}\Cn[]\Gamma.  
  \end{equation*}
  Suppose $\sF$ is \emph{not} a member of the union.  Then,
  for each finite subset $\Gamma$ of $\Sigma$, the set $\Cn[]{\Gamma}$
  does not contain $\sF$, and so the set
  $\Gamma\cup\{\lnot\sF\}$ is satisfiable, by Lemma~\ref{lem:sat}.  This
  means $\Sigma\cup\{\lnot\sF\}$ is finitely satisfiable; so it is
  satisfiable, by the Compactness Theorem.  Therefore
  $\lnot\sF\notin\Cn[]\Sigma$, again by Lemma~\ref{lem:sat}. 
\end{proof}

\section{Syntactic entailment}\label{sect:syn-entail}

Logical entailment is one way to derive formulas from a given set of
formulas.  Another way is by 
\tech{formal proof}{}%
\index{proof!formal ---, deduction}
%\index{formal!--- proof} 
or
\techplain{deduction}.%
\index{deduc!---tion}

A 
\defnplain{proof system}{}%
\index{proof!--- system} 
consists of \tech{axiom}s and 
\techplain{rules of inference}.%
\index{rule of inference}%
\index{inference|see{rule of ---}}
An \defn{axiom}{} is a particular formula, and a
\defn{rule of inference}{} is a clearly defined way of obtaining one
formula from finitely many others.  Then a
\defnplain{formal proof}{}%
\index{formal!--- proof}%
\index{proof!formal ---}
from a set $\Sigma$ of formulas is a list of formulas, each of which
is an element of $\Sigma$, or is an axiom, or is obtainable by a rule
of inference from formulas appearing earlier on the list.  The last
formula on the list is the 
\defnplain{conclusion};%
\index{conclusion of a formal proof} 
the formulas in $\Sigma$ are 
\defnplain{hypotheses}{}%
\index{hypothesis!--- of a formal proof}
from which this conclusion is 
\defnplain{deducible}{}%
\index{deduc!---ible} 
in the system.  We
may say also that the conclusion is a
\defnplain{syntactic consequence}{}%
\index{syntactic!--- consequence}%
\index{consequence!syntactic ---}
of the hypotheses.

Our axioms will take any of the following three
forms:
\begin{enumerate}
  \item
$\sF\lto\sG\lto\sF$\hfill(\defn{Affirmation of the
    Consequent}{}%
\index{consequent!Affirmation of the C---}%
\index{axiom!Affirmation of the Consequent}%
\footnote{Church \cite[\S10, p.~73]{MR18:631a}
    uses this term for this axiom.  However, the term is also used for
    the fallacy of concluding $\sF$ from $\sG$ and $\sF\lto\sG$.}),
\item
$(\sF\lto\sG\lto
  \sH)\lto(\sF\lto\sG)\lto\sF\lto\sH$\hfill(\defn{Self-Distribution of
    Implication}{}),%
\index{implication!Self-Distribution of I---}%
\index{axiom!Self-Distribution of Implication} 
\item
$(\lnot\sF\lto\lnot\sG)\lto\sG\lto \sF$\hfill(\defn{Contraposition}{}).%
\index{axiom!Contraposition}
\end{enumerate}
That is, the axioms are $\sK_0(\sF,\sG)$,
$\sK_1(\sF,\sG,\sH)$, and
$\sK_2(\sF,\sG)$, where the formulas $\sK_i$ are respectively $P_0\lto
P_1\lto P_2$, $(P_0\lto P_1\lto P_2)\lto(P_0\lto P_1)\lto P_0\lto
P_2$, and $(\lnot P_0\lto\lnot P_1)\lto P_1\lto P_0$.
Our only rule of inference will be \defn{Detachment},%
\index{rule of inference!Detachment}
 or
\defnplain{\emph{Modus Ponens,}}{}%
\index{Modus Ponens@\emph{Modus Ponens}}%
\index{rule of inference!Modus Ponens@\emph{Modus Ponens}}
namely, that from formulas $\sF$ and $\sF\lto\sG$, the formula $\sG$
can be derived.
If $\sF$ is deducible from $\Sigma$, then we shall write
\begin{equation*}
  \Sigma\proves\sF.
\end{equation*}
If $\sF$ is deducible from $\emptyset$, then we may just write
\begin{equation*}
  \proves\sF.
\end{equation*}
(See Appendix~\ref{app:Frege} on the origin of this notation.)

\begin{lemma}\label{lem:FtoF}
$\proves\sF\lto\sF$.
\end{lemma}

\begin{proof}
  The following is a formal proof, with
justifications.
\begin{enumerate}
\item\label{l1}
$\sF\lto(\sF\lto\sF)\lto\sF$\hfill {}[Affirmation of the Consequent]
\item\label{l2}
$(\sF\lto(\sF\lto\sF)\lto\sF)\lto
  (\sF\lto\sF\lto\sF)\lto \sF\lto \sF$\hfill {}[Self-Distribution of $\lto$]
\item\label{l3}
$(\sF\lto\sF\lto\sF)\lto \sF\lto \sF$\hfill {}[Detachment from~\eqref{l1}
 and~\eqref{l2}]
\item\label{l4}
$\sF\lto\sF\lto\sF$\hfill {}[Affirmation of the Consequent]
\item
$\sF\lto\sF$\hfill {}[Detachment from~\eqref{l4} and~\eqref{l3}]
\end{enumerate}
Thus $\proves\sF\lto\sF$.
\end{proof}

The next two lemmas are immediate.

\begin{lemma}\label{lem:immediate}
  If $\Sigma\proves\sF$, and $\Sigma\included
\Gamma$, then $\Gamma\proves\sF$.\hfill\qedsymbol
\end{lemma}

\begin{lemma}\label{lem:pis-proof}
  Every initial segment of a formal proof
is itself a formal proof.\hfill\qedsymbol
\end{lemma}

\begin{theorem}\label{thm:proves}
The set of syntactic consequences of a set $\Sigma$ is the set $\Gamma$
given recursively by the following rules:
\begin{enumerate}
\item
$\Sigma\included\Gamma$;
  \item
$\Gamma$ contains the axioms;
\item
if $\sF\in\Gamma$, and $\sF\lto\sG$ is in $\Gamma$, then
$\sG\in\Gamma$.
\end{enumerate}
Thus the set of syntactic consequences of $\Sigma$ admits induction.
\end{theorem}

\begin{proof}
Let $\Gamma'$ be the set of syntactic consequences of $\Sigma$.
We first prove $\Gamma\included\Gamma'$ by induction.
If $\sF$ belongs to $\Sigma$ or is an axiom, then $\sF$
is a one-line proof that $\Sigma\proves\sF$, so $\sF\in\Gamma'$.  If $\sF$
and $\sF\lto\sG$ are in $\Gamma'$, then
they have formal proofs 
\begin{equation*}
  (\sH_1,\dots,\sH_{m-1},\sF),\qquad(\sK_1,\dots,\sK_{n-1},\sF\lto\sG)
\end{equation*}
respectively; but then
\begin{equation*}
  (\sH_1,\dots,\sH_{m-1},\sF,\sK_1,\dots,\sK_{n-1},\sF\lto\sG,\sG)
\end{equation*}
is a formal proof of $\sG$, so $\sG\in\Gamma'$.  By induction,
$\Gamma\included\Gamma'$. 

Now we show $\Gamma'\included\Gamma$ by induction on the lengths of formal proofs.
Suppose $\sF\in\Gamma$ whenever $\sF$ has a formal proof from $\Sigma$
of length less than $n$.  Suppose $\sG$ has a formal proof from
$\Sigma$ of length $n$.  If $\sG$ is an element of $\Sigma$ or an
axiom, then $\sG\in\Gamma$ by definition.  The only other possibility
is that, in its proof, $\sG$ is
preceded by $\sF$ and $(\sF\lto\sG)$.  Then, by inductive hypothesis and
Lemma~\ref{lem:pis-proof}, both $\sF$ and $\sF\lto\sG$ are in $\Gamma$,
so that $\sG\in\Gamma$.
\end{proof}

The proof that $\Gamma'\included\Gamma$ is `really' by induction on
the \emph{set} of formal proofs, when this is ordered so that the predecessors
of a proof are its proper initial segments.  Indeed, the set of formal
proofs then becomes a tree, and trees admit proof by induction
(\exercise).  In particular, one shows by induction that, for all formal
proofs $(\sF_0,\dots,\sF_{n-1},\sG)$ from $\Sigma$, we have
$\sG\in\Gamma$. 


We can understand the last theorem as that formal proofs themselves
correspond to certain trees.  For example, the proof of $\sF\lto\sF$
can be written as the following tree. 
 \begin{equation*}
   \xymatrix@!0%
@C=3em
{
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 &&*+[F]{\sF\lto(\sF\lto\sF)\lto\sF} \ar@{-}[ddr] &&\\
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 &&&&{}\save[]+<3.5cm,0cm>
 *+[F]{(\sF\lto(\sF\lto\sF)\lto\sF)\lto
   (\sF\lto\sF\lto\sF)\lto \sF\lto \sF} \ar@{-}[dl] \restore\\
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 &&&*+[F]{(\sF\lto\sF\lto\sF)\lto \sF\lto \sF}
 \ar@{-}[ddll] &\\
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 *+[F]{\sF\lto\sF\lto\sF} \ar@{-}[dr] &&&&\\
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 &*+[F]{\sF\lto\sF}&&&}
 \end{equation*}
However, the formula $\sF\lto\sF$ does not appear to carry within
itself this tree: there is no obvious way to extract the tree from the
formula.  

Establishing syntactic entailment by formal proof is usually quite
tedious.  Theorem~\ref{thm:proves} allows some short-cuts, including
the following.

\setcounter{corollary}0
\begin{corollary}\mbox{}
\begin{enumerate}
\item
if $\sF\in\Sigma$, then $\Sigma\proves\sF$;
  \item
if $\sF$ is an axiom, then $\Sigma\proves\sF$;
\item
\defn{Detachment}:
if $\Sigma\proves\sF$, and $\Sigma\proves\sF\lto\sG$, then
$\Sigma\proves\sG$.\hfill\qedsymbol
\end{enumerate}
\end{corollary}

\begin{corollary}\mbox{}
\begin{enumerate}
\item
\defn{Affirmation of the Consequent}:
  If $\Sigma\proves\sF$, then $\Sigma\proves\sG\lto\sF$.
\item
\defn{Self-Distribution of Implication}:
  If $\Sigma\proves\sF\lto\sG$ and $\Sigma\proves\sF\lto\sG\lto\sH$,
  then $\Sigma\proves\sF\lto\sH$.
\item
\defn{Contraposition}:
  If $\Sigma\proves\lnot\sF\lto\lnot\sG$, then
  $\Sigma\proves\sG\lto\sF$.\hfill\qedsymbol
\end{enumerate}
\end{corollary}

More short-cuts are as follows.

\begin{theorem}[Deduction]\label{thm:deduction}%
\index{deduc!D---tion Theorem}%
\index{theorem!Deduction Th---}
$\Sigma\proves\sF\lto\sG\Iff\Sigma\cup\{\sF\}\proves\sG$.
\end{theorem}

\begin{proof}
The forward implication is an \exercise.
The reverse implication is by induction on the lengths of formal
proofs.  Suppose this implication holds for all $\sG$ that have formal
proofs shorter than the proof of $\sH$; and suppose
$\Sigma\cup\{\sF\}\proves\sH$.  With respect to the formal 
proof, there are three possibilities for $\sH$.

If $\sH$ is an axiom, or is one of the formulas in $\Sigma$, then
$\Sigma\proves\sH$; hence
$\Sigma\proves\sF\lto \sH$ by Affirmation of the Consequent.

If $\sH$ is $\sF$, then $\proves\sF\lto \sH$ by Lemma
\ref{lem:FtoF}, so $\Sigma\proves\sF\lto \sH$ by Lemma~\ref{lem:immediate}. 

The last possibility is that, in its formal proof, $\sH$ is preceded
by some formulas $\sK$ 
and $\sK\lto\sH$.  By Lemma~\ref{lem:pis-proof}, these formulas are
deducible from $\Sigma\cup\{\sF\}$, and the inductive
hypothesis applies to them.  Therefore
$\Sigma\proves\sF\lto\sK$ and
$\Sigma\proves\sF\lto\sK\lto\sH$.
By Self-Distribution of Implication,
$\Sigma\proves\sF\lto\sH$.  This completes the induction and the proof.
\end{proof}

Again, the proof is `really' by induction in the tree of formal proofs.

\begin{lemma}\label{lem:GSF}
  If $\Sigma\proves\sF$, and $\Gamma\proves\sG$ for all $\sG$ in
  $\Sigma$, then $\Gamma\proves\sF$.\hfill\qedsymbol
\end{lemma}

\begin{lemma}\label{lem:several}
The following are deducible from $\emptyset$:
\begin{enumerate}
\item\label{item:contrad}
$\lnot \sG\lto \sG\lto \sF$;
\item\label{item:double-neg}
$\lnot\lnot \sF\lto \sF$;
\item\label{item:other-way}
$\sF\lto\lnot\lnot \sF$;
\item\label{item:other-contrap}
$(\sF\lto \sG)\lto \lnot \sG\lto\lnot \sF$;
\item\label{item:imp}
$\sF\lto\lnot \sG\lto\lnot(\sF\lto \sG)$.
\item\label{item:two-cases}
$(\sF\lto \sG)\lto(\lnot \sF\lto \sG)\lto \sG$.
\end{enumerate}
\end{lemma}

\begin{proof}
We have
\begin{align*}
&\{\lnot\sG\}\proves\lnot\sG,&&\\
&\{\lnot\sG\}\proves\lnot\sF\lto\lnot\sG,
       &&\text{[Affirmation of the Consequent]}\\ 
&\{\lnot\sG\}\proves\sG\lto\sF,&&\text{[Contraposition]}\\
&\proves\lnot\sG\lto\sG\lto\sF,&&\text{[Deduction]}
\end{align*}
and thus~\eqref{item:contrad}.  As a special case of the
  penultimate conclusion,
\begin{align*}
&\{\lnot\lnot\sF\}\proves\lnot\sF\lto\lnot\lnot\lnot\sF,&&\\
&\{\lnot\lnot\sF\}\proves\lnot\lnot\sF\lto\sF,&&\text{[Contraposition]}\\
&\{\lnot\lnot\sF\}\proves\sF,&&\text{[Deduction]}\\
&\proves\lnot\lnot\sF\lto\sF,&&\text{[Deduction]}
\end{align*}
so~\eqref{item:double-neg}.
Part~\eqref{item:other-way} is an \exercise.
For~\eqref{item:other-contrap}, we have
\begin{align*}
  &\{\sF\lto\sG,\sF\}          \proves\sG,&&\\
  &\{\sF\lto\sG,\lnot\lnot\sF\}\proves\sG,&&\text{[Lemma~\ref{lem:GSF}
  and~\eqref{item:double-neg}]}\\
  &\{\sF\lto\sG,\lnot\lnot\sF\}\proves\lnot\lnot\sG,
  &&\text{[\eqref{item:other-way}]}\\  
  &\{\sF\lto\sG\}              \proves\lnot\lnot\sF\lto\lnot\lnot\sG,\\
  &\{\sF\lto\sG\}              \proves\lnot\sG\lto\lnot\sF,\\
                            &\proves(\sF\lto\sG)\lto\lnot\sG\lto\lnot\sF.
\end{align*}
The remaining~\eqref{item:imp} and~\eqref{item:two-cases} are an
\exercise. 
\end{proof}




\section{Completeness}\label{sect:complete}

An arbitrary proof system is
\begin{enumerate}
  \item
\defnplain{sound},%
\index{sound!--- proof system} 
if every set of formulas logically entails its syntactic consequences;
\item
\defnplain{complete},%
\index{complete!--- proof system}
if every set of formulas syntactically entails its logical consequences.
\end{enumerate}
We shall show that our proof system
is sound and complete.

\begin{theorem}[Soundness]\label{thm:sound}%
\index{theorem!Soundness Th---}%
\index{sound!S---ness Theorem}
  If $\Sigma\proves\sF$, then $\sF\in\Cn[]\Sigma$.
\end{theorem}

\begin{proof}
  We use induction on the set of syntactic consequences of $\Sigma$
  (that is, Theorem~\ref{thm:proves}) to
  show that it is a subset of $\Cn[]\Sigma$. 
All elements of $\Sigma$ are logical
  consequences of $\Sigma$.  Since all axioms are tautologies, they
  are logical consequences of $\Sigma$. 
Finally, suppose $\sF$ and $\sF\lto\sG$
  are logical consequences of $\Sigma$, and $\epsilon$ is a model of
  $\Sigma$.  Then $\named{\sF}(\epsilon)=1$.  Also, writing $\sH$ for
  $\sF\lto\sG$, we have
  \begin{equation*}
    1=\named{\sH}(\epsilon)=1+\named{\sF}(\epsilon)+
    \named{\sF}(\epsilon)\cdot\named{\sG}(\epsilon)
    =1+1+1\cdot\named{\sG}(\epsilon)= \named{\sG}(\epsilon),
  \end{equation*}
so $\epsilon\models\sG$.  This completes the induction and the proof.
\end{proof}

Proving completeness will take more work.\footnote{The following lemma
  corresponds to one found in Church \cite[*151, p.~98]{MR18:631a};
  the origin is not clear.}

\begin{lemma}\label{lem:eval}
Let $\tuple e$ be an $n$-ary truth-assignment, and for all $n$-ary
formulas $\sF$, let 
\begin{equation*}
  \sF'=
\begin{cases}
\sF,&\text{ if }\named{\sF}(\tuple e)=1;\\
\lnot \sF, &\text{ if }\named{\sF}(\tuple e)=0.
 \end{cases}
\end{equation*}
Then
\begin{equation*}
  \{P_0{}',\dots,P_{n-1}{}'\}\proves \sF'.
\end{equation*}
\end{lemma}


\begin{proof}
We use induction on $n$-ary
formulas.  Let $\Sigma=\{P_0{}',\dots,P_{n-1}{}'\}$.
If $\sF$ is a variable $P_k$, where $k<n$, then $\sF'$ is
in $\Sigma$, so $\Sigma\proves \sF'$. 
Suppose the claim holds when $\sF$ is $\sG$.
Let $\sF$ be $\lnot\sG$.  There are two cases to consider. 
\begin{enumerate}
  \item
If $\named{\sF}(\tuple e)=1$, then $\named{\sG}(\tuple
e)=0$, so $\sF'$ is $\sF$, but $\sG'$ is $\lnot\sG$, which is $\sF$,
that is, $\sF'$.  
\item
If
$\named{\sF}(\tuple e)=0$, then $\named{\sG}(\tuple e)=1$, so $\sG'$
is $\sG$, but $\sF'$ is $\lnot\sF$, which is $\lnot\lnot\sG$, that is,
$\lnot\lnot\sG'$. 
\end{enumerate}
Since we assume $\Sigma\proves\sG'$, we immediately
have $\Sigma\proves\sF'$ in the first case.  In the second case, we
have
$\Sigma\proves\sG$,
hence $\Sigma\proves\lnot\lnot\sG$, that
is, $\Sigma\proves\sF'$, by
Lemma~\ref{lem:several}\eqref{item:other-way} and Detachment.
Suppose finally that the claim holds when $\sF$ is $\sG$ or $\sH$.
Let $\sF$ be $\sG\lto\sH$.
There are three cases to
consider:
\begin{enumerate}
  \item
$\named{\sG}(\tuple e)=0$;
\item
$\named{\sH}(\tuple e)=1$;
\item
 $\named{\sG}(\tuple e)=1$ and $\named{\sH}(\epsilon)=0$.
\end{enumerate}
Details are left to the reader.  This completes the proof.
\end{proof}

\begin{theorem}[Completeness]\label{thm:completeness-prop}%
\index{theorem!Completeness Th---}%
\index{complete!C---ness Theorem}
If $\sF\in\Cn[]\Sigma$, then $\Sigma\proves\sF$.
\end{theorem}

\begin{proof}
Suppose $\sF\in\Cn[]\Sigma$.  By Compactness (rather, its corollary),
$\Sigma$ has a finite subset $\Gamma$ 
such that $\sF\in\Cn[]\Gamma$.  Write $\Gamma$ as
$\{\sF_0,\dots,\sF_{m-1}\}$, and $\sF$ as $\sF_m$.  Then the formula
\begin{equation*}
  \sF_0\lto\dotsb\lto\sF_m
\end{equation*}
is a tautology (\exercise).  Call this
tautology $\sG$, and suppose it is $n$-ary.
Let
$P_k{}'\in\{P_k,\lnot P_k\}$ for each $k$ in $n$.  By the previous
lemma, we have
\begin{equation}
\{P_0{}'\dots,P_{n-1}{}'\}\proves \sG.
\end{equation}
By the Deduction Theorem (\ref{thm:deduction}), remembering that
$P_{\ell}{}'$ can be either $P_{\ell}$ or $\lnot P_{\ell}$, we have
\begin{align*}
\{P_0{}'\dots,P_{n-2}{}'\}&\proves P_{n-1}\lto \sG,\\
\{P_0{}'\dots,P_{n-2}{}'\}&\proves \lnot P_{n-1}\lto \sG.
\end{align*}
so $\{P_0{}'\dots,P_{n-2}{}'\}\proves \sG$ by
Lemma~\ref{lem:several}\eqref{item:two-cases}.  Continuing this
elimination process, we
arrive at the conclusion $\proves\sG$, that is,
$\proves\sF_0\lto\dotsb\lto\sF_{m-1}\lto\sF$.  By Deduction in the
other direction,  
$\{\sF_0,\dotsb,\sF_{m-1}\}\proves\sF$.
\end{proof}

\section{Other propositional logics}\label{sect:prop-sigs}

An arbitrary signature $\lang$ for propositional logic may have
connectives of any arity.  Then the formulas in $\lang$ can be written
in Polish notation, 
as terms are in \S\ref{sect:algebras},
so that
\begin{enumerate}
  \item
each variable is a formula;
\item
$\mathop*\sF_0\dotsb \sF_{n-1}$ is a formula, if $*$ is an $n$-ary connective
  from $\lang$, and the $\sF_i$ are formulas.  (If $n=0$, then $*$ by
  itself is a formula.)
\end{enumerate}
Then the set of formulas admits recursion by Theorem~\ref{thm:tmol}.

Each connective is given an interpretation as
an operation on $\B$; from these, and a truth-assignment $\epsilon$,
a function $\sF\mapsto\named{\sF}(\epsilon)$ is determined as in
\S\ref{sect:logical}.
We may say then that an $n$-ary formula $\sF$
\defn{represent}s the $n$-ary operation $\tuple
e\mapsto\named{\sF}(\tuple e)$ or $\named{\sF}$ on $\B$.
The formal definition is recursive:
\begin{enumerate}
  \item
If $k<n$, then the formula $P_k$ is an $n$-ary formula and, as
such, represents 
the operation $\tuple e\mapsto e_k$.
\item
If $(\sF_0,\dots,\sF_{k-1})$ is an $k$-tuple of formulas, each of them
$n$-ary, and if $*$ is a  $k$-ary connective in
$\lang$, then
the formula $\mathop*\sF_0\dotsb \sF_{k-1}$ represents the function
\begin{equation*}
  \tuple x\longmapsto g(\widehat \sF_0(\tuple x),\dots,\widehat
\sF_{k-1}(\tuple x))
\end{equation*}
from $\B^n$ to $\B$, where $g$ is the standard interpretation of $*$.
\end{enumerate}
In particular, if $*$ is $n$-ary, then its standard interpretation is
$\named{\sG}$, where $\sG$ is the formula $\mathop*P_0\dotsb P_{n-1}$.

Each $n$-ary operation $g$ on $\B$ determines, for each $k$, the
$(n+k)$-ary operation
\begin{equation*}
  (\tuple x,\tuple y)\longmapsto g(\tuple x).
\end{equation*}
If $\sF$ is an $(n+k)$-ary formula representing this operation, let us
say also that $\sF$ represents $g$ itself.  Then
a signature $\lang$ for a propositional logic is \defn{adequate}{} if
each operation on $\B$ is represented by a formula of the logic.  The
following basic tool for establishing adequacy of a signature was 
proved by Emil Post in 1921 \cite{Post}: 

\begin{lemma}\label{lem:Post}
  A signature of propositional logic is adequate, provided that, in
  this signature, the following operations are represented:
  \begin{enumerate}
    \item
the nullary operations $0$ and $1$;
\item
the ternary operation $f$ given by the following table.
\begin{center}
  \begin{tabular}{c|c|c|c}
$e_0$ & $e_1$ & $e_2$ & $f(\tuple e)$\\ \hline
$0$&$0$&$0$&$0$\\
$1$&$0$&$0$&$1$\\
$0$&$1$&$0$&$0$\\
$1$&$1$&$0$&$1$\\
$0$&$0$&$1$&$0$\\
$1$&$0$&$1$&$0$\\
$0$&$1$&$1$&$1$\\
$1$&$1$&$1$&$1$
  \end{tabular}
\end{center}
  \end{enumerate}
\end{lemma}

\begin{proof}
  We use induction on the arity of operations.  The nullary operations
  are represented in the signature by assumption.
  Suppose all
  $n$-ary operations are represented, and $g$ is $(n+1)$-ary.  If
  $e\in\B$, let
  $h_e$ be the $n$-ary operation $\tuple x\mapsto g(\tuple x,e)$.
  By definition,
  \begin{equation*}
    f(e_0,e_1,e_2)=
    \begin{cases}
      e_0,&\text{ if }e_2=0;\\
      e_1,&\text{ if }e_2=1.
    \end{cases}
  \end{equation*}
Then for all $\tuple d$ in $\B^n$, we have
\begin{equation*}
  g(\tuple d,e)=h_e(\tuple d)=f(h_0(\tuple d),h_1(\tuple d),e).
\end{equation*}
Thus the operation $g$ is
  \begin{equation*}
    (\tuple x,y)\longmapsto f(h_0(\tuple x),h_1(\tuple x),y).
  \end{equation*}
By inductive hypothesis, each of the operations $h_e$ is represented by
some formula
\begin{equation*}
  \sH_e(P_0,\dots,P_{n-1},\dots); 
\end{equation*}
by assumption, $f$ is represented by
some formula $\sF(P_0,P_1,P_2,\dots)$.  Hence
$g$ is represented by
\begin{equation*}
  \sF(\sH_0(P_0,\dots,P_{n-1},\dots),
  \sH_1(P_0,\dots,P_{n-1},\dots),P_n,\dots).
\end{equation*}
By induction, the operations of all arities are represented. 
\end{proof}

\begin{theorem}\label{thm:to-not}
The propositional signature  $\{\lnot,\lto\}$ is adequate.
\end{theorem}

\begin{proof}
By the lemma, it is enough to observe
that
  $P_0\lto P_0$ represents $1$, and
  $\lnot(P_0\lto P_0)$ represents $0$,
while the formula 
      $\lnot((\lnot P_2\lto P_0)\lto \lnot(P_2\lto P_1))$
has the truth-table
  \begin{center}
    \begin{tabular}{*{10}c}
$\lnot$ & $((\lnot$ & $P_2$ & $\lto$ & $P_0)$ & $\lto$ & $\lnot$ & $(P_2$ &
      $\lto$ & $P_1))$\\ \hline
$0$&$1$&$0$&$0$&$0$&$1$&$0$&$0$&$1$&$0$\\
$1$&$1$&$0$&$1$&$1$&$0$&$0$&$0$&$1$&$0$\\
$0$&$1$&$0$&$0$&$0$&$1$&$0$&$0$&$1$&$1$\\
$1$&$1$&$0$&$1$&$1$&$0$&$0$&$0$&$1$&$1$\\
$0$&$0$&$1$&$1$&$0$&$1$&$1$&$1$&$0$&$0$\\
$0$&$0$&$1$&$1$&$1$&$1$&$1$&$1$&$0$&$0$\\
$1$&$0$&$1$&$1$&$0$&$0$&$0$&$1$&$1$&$1$\\
$1$&$0$&$1$&$1$&$1$&$0$&$0$&$1$&$1$&$1$
    \end{tabular}
  \end{center}
and so represents the operation in the lemma.
\end{proof}

The propositional signature $\{\lnot,\lto\}$ may be \emph{adequate,}
but it is not very \emph{useful} for doing mathematics.  It is perhaps
more convenient to use the propositional signature
\begin{equation*}
  \{\lnot,\lto,\land,\lor,\iff\}
\end{equation*}
and use 
\defn{infix notation}{}%
%\index{infix!--- notation}%
\index{notation!infix ---}
for formulas, as we did for $\PFm$, so that
\begin{enumerate}
  \item
variables are formulas;
\item
if $\sA$ is a formula, then so is $\lnot\sA$;
\item
if $\sA$ and $\sB$ are formulas, then so is $(\sA*\sB)$, where $*$ is
$\lto$, $\land$, $\lor$, or $\iff$.
\end{enumerate}
The interpretations of the new connectives were given in \S\ref{sect:prop}.
Note then that the formula in the last theorem is equivalent to
$((\lnot P_2\lto P_0)\land(P_2\lto P_1))$.
In writing formulas, we may follow the conventions established in
\S\ref{sect:notation} for $\PFm'$, so that, for example, we may
omit the outer parentheses.  We may
also remove interior parentheses, with the understanding that $\land$
and $\lor$ are more binding then $\lto$ and $\iff$.  Then, for
example, instead of $((\sF\land\sG)\lto\sH)$, we may just write
$\sF\land\sG\lto\sH$.  Since the interpretation $\land$ (namely,
multiplication on $\B$) is
associative, the interpretation of $\sF\land\sG\land\sH$ is
unambiguous; likewise for $\sF\lor\sG\lor\sH$.

For the purposes of writing recursive definitions and inductive proofs,
it will be convenient to think of our official signature as
$\{\lnot,\land\}$.  We can do this, because

\begin{corollary*}
  The propositional signature $\{\lnot,\land\}$ is adequate.
\end{corollary*}

\begin{proof}
  $(\sF\lto\sG)$ is equivalent to $\lnot(\sF\land\lnot\sG)$. 
\end{proof}

\section*{Exercises}

\begin{xca}
Prove by induction that
  \begin{enumerate}
    \item
every propositional formula has the same number of left
  as right parentheses;
\item
an entry $\lnot$ is never preceded by a
variable in any formula.
  \end{enumerate}
\end{xca}

\begin{xca}\label{ex:trees}
As in the example on p.~\pageref{example:tree}, draw trees for some
formulas, such 
as 
\begin{enumerate}
  \item
$(P_0\lto(P_1\lto P_0))$,
\item
$((P_0\lto(P_1\lto P_2))\lto((P_0\lto P_1)\lto(P_0\lto P_2)))$,
\item
$((\lnot P_0\lto\lnot P_1)\lto(P_1\lto P_0))$.
\end{enumerate}
\end{xca}

\begin{xca}
  Supply the missing details in the proof of the Recursion Theorem
  (\ref{thm:recursion}) and its corollary. 
\end{xca}

\begin{xca}
Prove the Recursion Theorem for Formulas under the assumption
that all formulas
are written in \L ukasiewicz notation (see \S\ref{sect:notation}). 
\end{xca}

\begin{xca}
Formulate and prove an analogue of
Theorem~\ref{thm:recursion} for $\PFm'$.
\end{xca}

\begin{xca}
Complete the proof of Theorem~\ref{thm:simpler}.
\end{xca}

\begin{xca}
  Construct truth-tables for some formulas, such as those in
  Exercise~\ref{ex:trees}. 
\end{xca}

\begin{xca}
\mbox{}
\begin{enumerate}
\item
Complete the proof of Theorem~\ref{thm:associativity}.
\item
  Prove Lemma~\ref{lem:subformulas}.
\item
  Prove the Replacement Theorem (\ref{thm:prop-replacement}).
\end{enumerate}
\end{xca}

\begin{xca}
  Let $\Sigma$ be $\{P_0\lto P_1,P_1\lto P_2\}$.
  \begin{enumerate}
    \item
Find $\Mod[]{\Sigma}$.
\item
Find a formula $\sF$ such that
\begin{equation*}
  \Mod[]{\Sigma}=\Mod[]{\{\sF\}}.
\end{equation*}
\item
Find a formula $\sG$ such that $\sG\in\Cn[]{\Sigma}$ but
$\Mod[]{\{\sG\}}\neq\Mod[]{\Sigma}$. 
  \end{enumerate}
\end{xca}

\begin{xca}
Prove Lemma~\ref{lem:reversing}.
\end{xca}

\begin{xca}
Prove that $\Sigma$ is a theory if and only if $\Cn[]\Sigma=\Sigma$.
\end{xca}

\begin{xca}
Can you find a formula $\sF$ such that  $\Cn[]{\{\sF\}}$ is
\begin{enumerate}
  \item
$\PFm$?
\item
$\emptyset$?
\end{enumerate}
\end{xca}

\begin{minipage}[t]{\textwidth}
\begin{xca}
\mbox{}
  \begin{enumerate}
    \item
If $\Cn[]{\{\sF\}}\included\Cn[]{\{\sG\}}$, must $\sG$ logically entail
$\sF$?
\item
How is $\Cn[]{\{\sF\lto\sG\}}$ related to $\Cn[]{\{\lnot\sF\}}$ and
$\Cn[]{\{\sG\}}$?
\item
Can you find pairwise-inequivalent formulas $\sF$, $\sG$, and $\sH$
such that 
\begin{equation*}
\Cn[]{\{\sF\}}\cup\Cn[]{\{\sG\}}=\Cn[]{\{\sH\}}?
\end{equation*}
\item
If $\Sigma$ logically entails $\sF\lto\sG$, must it entail either
$\lnot\sF$ or $\sG$?
  \end{enumerate}
\end{xca}
\end{minipage}

\begin{xca}\label{exercise:caps}
\mbox{}
\begin{enumerate}
\item
Show that
\begin{equation*}
\bigcap_{i\in I}\Mod[]{\Sigma_i}=
\Mod[]{\bigcup_{i\in I}\Sigma_i}.
\end{equation*}
\item
Show that $\Mod[]{\{\sF\}}\cup\Mod[]{\{\sG\}}=\Mod[]{\{\lnot \sF\lto
  \sG\}}$.
\end{enumerate}
(This shows that the sets $\Mod[]{\Sigma}$ are the 
\techplain{closed}{}%
\index{closed!--- sets of a topology}
sets in a
topology for $\B^V$.  Then the Compactness Theorem is that this
topology is compact.)
\end{xca}

\begin{xca}
Show that $\Mod[]{\{\sF\}}\comp=\Mod[]{\{\lnot \sF\}}$.
\end{xca}

\begin{xca}
If $\Gamma\cup\{\sF\}$ is not satisfiable, why is
$\Gamma\cup\{\lnot\lnot\sF\}$ not satisfiable?
\end{xca}

\begin{xca}
Why has the set $\{\sF,\lnot\sF\}$ no models?
\end{xca}

\begin{xca}
In the proof of the Compactness Theorem, why does $\zeta$ agree with
$\epsilon$ on $\{P_k\colon k<n\}$?
\end{xca}

\begin{xca}
In the example in \S\ref{sect:compactness}:
\begin{enumerate}
  \item
Give a precise recursive
definition of the sets $\Sigma_n$.
\item
Prove that
$\epsilon_n\in\Mod[]{\Sigma_n}\setminus\Mod[]{\Sigma_{n+1}}$.
\item
What is $\Cn[]{\Sigma}$?
\end{enumerate}
\end{xca}

\begin{xca}
Suppose $I$ is a set, and there is a function $i\mapsto\sF_i$ from
$I$ into $\PFm$, such that
\begin{equation*}
  \bigcup_{i\in I}\Mod[]{\{\sF_i\}}=\B^{\PVar}.
\end{equation*}
Prove that $I$ has a finite subset $J$ such that $\bigcup_{i\in
  J}\Mod[]{\{\sF_i\}}=\B^{\PVar}$. 
\end{xca}

\begin{xca}
Define a binary operation $*$ on $\PFm$ such that, for each formula
$\sF$, the function 
\begin{equation*}
  \sG\mapsto\sF*\sG
\end{equation*}
is recursively defined, and $\sF*(\sF\lto\sG)=\sG$.
\end{xca}

\begin{xca}
  Prove that all trees admit proof by induction.
\end{xca}

\begin{xca}
  Prove the corollaries of Theorem~\ref{thm:proves}.
\end{xca}

\begin{xca}
  Prove the forward implication of the Deduction Theorem.
\end{xca}

\begin{xca}
  Prove Lemma~\ref{lem:GSF}.
\end{xca}

\begin{xca}
Prove the remainder of Lemma~\ref{lem:several}.
\end{xca}

\begin{xca}
Complete the proof of Lemma~\ref{lem:eval}.
\end{xca}

\begin{xca}
Prove that, if
$\sF_m\in\Cn[]{\{\sF_0,\dots,\sF_{m-1}\}}$, then the formula
\begin{equation*}
\sF_0\lto\dotsb\lto\sF_m
\end{equation*}
is a tautology.
\end{xca}

\begin{xca}
  Prove that $\{\mid\}$ is adequate, where $\mid$ (the \tech{Sheffer
  stroke}{}) is given by
  \begin{equation*}
    \begin{array}{ccc}
      P&\mid&Q\\\hline
0&1&0\\
1&1&0\\
0&1&1\\
1&0&1
    \end{array}
  \end{equation*}
\end{xca}

\begin{xca}
  Prove that $\{\land, \lor\}$ is not adequate.
\end{xca}










\chapter{First-order logic}

Throughout this chapter, let $\str A$ stand for an arbitrary
structure (in the sense of \S\ref{sect:structures}); its signature will
be $\lang$.  So $\str A$ has  
universe $A$, which is just a set.  We shall use the letters
$c$, $R$ and $f$ to stand for arbitrary 
constants, predicates and function-symbols of $\lang$,
respectively.  The arity of $R$ and and of $f$ will be $n$.

Instead of propositional variables, we shall use the set of
\defnplain{individual variable}{s,}%
\index{individual variable}%
\index{variable!individual ---}
\begin{equation*}
  \{\varble_k\colon k\in\vnn\},
\end{equation*}
introduced in \S\ref{sect:algebras}.  The definition of
\defn{term}s there makes sense for arbitrary structures.  Much of
the account of terms given below will be a retelling of the account in
\S\ref{sect:algebras}. 
Terms do not involve predicates, but symbolize the ways of combining basic
operations to get new operations.  The ways of combining these with
basic relations to get new relations will be symbolized by
\techplain{(first-order) formula}{s.}%
\index{first-order formula, logic}%
\index{formula!first-order ---}%
\index{logic!first-order ---}

Our logic will be \defnplain{first order}, because we shall use only
individual variables, and not 
\tech{set variable}{s.}%
%\index{set!--- variable}%
\index{variable!set ---} 
This means that the notions of induction and recursion cannot be
formulated in first-order logic.  This deficiency is compensated for by
the Compactness Theorem,~\ref{thm:compactness-1st} below, which fails
in second-order logic.

\section{Terms}\label{sect:terms}

If $k<n$, then there is an $n$-ary operation
\begin{equation}\label{eqn:variable}
  \tuple x\longmapsto x_k
\end{equation}
on $A$.
This operation is \defn{projection}{} onto the $k$th coordinate.
Each element $b$ of $A$ determines, for each positive $n$,
the constant $n$-ary operation
\begin{equation}\label{eqn:constant}
  \tuple x\longmapsto b.
\end{equation}
If $b$ is $c^{\str A}$, then we have the $n$-ary operation $\tuple
x\mapsto c^{\str A}$. 
More generally, if $\alpha$ is an $n$-ary operation on $A$, then there
is an $(n+k)$-ary operation on $A$, namely
\begin{equation*}
  (\tuple x,\tuple y)\longmapsto\alpha(\tuple x).
\end{equation*}
All operations on $A$ that are symbolized in $\lang$ can be composed
with one another and with
projections to give other operations on $A$. 
The \defn{term}s of $\lang$ symbolize these new operations.  The
symbols used in terms of $\lang$ are:
\begin{enumerate}
\item
the variables $\varble_i$, which will
symbolize the projections;
\item
the constants $c$ of $\lang$;
  \item
the function-symbols $f$ of $\lang$.
\end{enumerate}
Then the terms of $\lang$ are defined inductively thus:
\begin{enumerate}
  \item
Each individual variable is a term of $\lang$.
\item
Each constant in $\lang$ is a term of $\lang$.
\item
If $f$ is an $n$-ary function-symbol of $\lang$, and $t_0$, \dots,
$t_{n-1}$ are terms of $\lang$, then the string
\begin{equation*}
  ft_0\dotsb t_{n-1}
\end{equation*}
is a term of $\lang$.
\end{enumerate}
  (Note well that 
$ft_0\dotsb t_{n-1}$
 is not generally a string
of length $n+1$.  If $f$ is binary, then we may
unofficially write the term as
$(t_0\mathrel f t_1)$ instead of $ft_0t_1$.)
Let the set of terms of $\lang$ be denoted by
\begin{equation*}
  \Tm{}{\lang}.
\end{equation*}
This set admits recursion by Theorem~\ref{thm:tmol}.
If the variables in a term $t$ come from $\{\varble_k\colon k<n\}$, then $t$ is
\defnplain{$n$-ary};%
\index{ary!$n$-{}--- term}%
\index{term!$n$-ary ---}
the set of $n$-ary terms of $\lang$ can be denoted by
\begin{equation*}
  \Tm{n}{\lang}.
\end{equation*}
Note then
\begin{equation*}
  \Tm{0}\lang\included\Tm 1\lang\included\Tm2\lang\included\dotsb.
\end{equation*}
The nullary terms are the 
\defnplain{constant term}{s.}%
\index{constant!--- term}%
\index{term!constant ---}
By the recursive definition in \S\ref{sect:algebras}, a constant term
is interpreted in $\str A$ as an element of $A$; an $n$-ary term,
as an $n$-ary operation on $A$:
\begin{enumerate}
\item
  $\varble_k{}^{\str A}$ is $\tuple x\mapsto x_k$ (as in
  \eqref{eqn:variable}), if $k<n$. 
\item
$c^{\str A}$ is $\tuple x\mapsto c^{\str A}$ 
(as in \eqref{eqn:constant}; 
here $c$ is understood respectively as term
  and constant).
\item
$(ft_0\dotsb t_{n-1})^{\str A}$ is 
  \begin{equation*}
      \tuple x\longmapsto f^{\str A}(t_0{}^{\str
  A}(\tuple x), \dots,t_{n-1}{}^{\str A}(\tuple x)),
  \end{equation*}
 that is,
  $f^{\str A}\circ(t_0{}^{\str A},\dots,t_{n-1}{}^{\str A})$.
\end{enumerate}
We may say that $t$ \defn{represent}s the operation $t^{\str A}$ on $A$.

For example, say $\lang$ is the signature $\{+,-,\cdot,0,1\}$ of
fields, and $\str A$ is an infinite field (such as $\Q$ or $\R$ or
$\C$).  If $t$ is a term of $\lang(A)$, then $t^{\str A}$ is a
\defn{polynomial}{} over $A$.  But a difficulty arises when $\str A$ is
a finite field, such as $\F_2$.  In this case, if $t$ is either
  $\varble_0\cdot(\varble_0+1)$ or $0$, then
  $t^{\str A}(a)=0$ for both $a$ in $A$.  However, the two terms may
represent different polynomials over larger fields, such as $\F_4$
(which can be defined as $\F_2[X]/(X^2+1)$). 

If $t$ is an $n$-ary term, and $u_0$, \dots, $u_{n-1}$ are $m$-ary
terms, then (as in \S\ref{sect:prop-form}), by substituting $u_i$ for
$\varble_i$, we obtain the $m$-ary term denoted by
\begin{equation*}
  t(u_0,\dots,u_{n-1}).
\end{equation*}
For example, if $t$ is $n$-ary, then $t$ is precisely the term
denoted by 
\begin{equation*}
  t(\varble_0,\dots,\varble_{n-1}).
\end{equation*}
We have a generalization of Theorem~\ref{thm:associativity}:

\begin{theorem}[Associativity]\label{thm:ass-1st}%
\index{theorem!Associativity Th---}%
\index{Associativity Theorem}
In a signature $\lang$, 
if $t$ is an $n$-ary term, and $u_0$, \dots, $u_{n-1}$ are $m$-ary
terms, then
\begin{equation*}
  t(u_0,\dots,u_{n-1})^{\str A}=t^{\str A}\circ(u_0{}^{\str
  A},\dots,u_{n-1}{}^{\str A})
\end{equation*}
for all $\lang$-structures $\str A$.\hfill\qedsymbol
\end{theorem}
An important special case arises as follows.
Suppose $\lang\included\lang'$.  An \defn{expansion}{} of $\str A$ to
$\lang'$ is a structure $\str A'$ whose signature is $\lang'$, and
whose universe is $A$, such that
\begin{equation*}
  s^{\str A'}=s^{\str A}
\end{equation*}
for all $s$ in $\lang$.  Then $\str A$ is the \defn{reduct}{} of $\str
A'$ to $\lang$.
For example,
  the ring $(\Z,+,-,\cdot,0,1)$ is an expansion of $(\Z,+,-,0)$, an
  abelian group; the latter is a reduct of the former. 

We can treat the elements of $A$ as
  new constants\footnote{If $a\in 
  A$, some writers prefer denote by $c_a$ the new constant whose
  interpretation in $\str A$ is $a$.} (not belonging to
$\lang$); adding these to $\lang$ gives the signature $\lang(A)$.
Then $\str A$ has a natural expansion $\str A_A$ to this signature,
so that
\begin{equation*}
  a^{\str A_A}=a
\end{equation*}
for all $a$ in $A$.
\begin{comment}


In fact, when it comes to interpreting terms (and, later, formulas),
we always treat $\str A$ as if it were $\str A_A$.  This means that
every $n$-ary term $t$ of $\lang(A)$ has an interpretation $t^{\str
  A}$ in $\str A$
according to the definition above, provided we understand $a^{\str A}$
as $a$ itself when $a\in A$.  In other contexts, however, it will be
important to distinguish clearly between $\str A$ and $\str A_A$.  

\end{comment}
We shall also want to speak of expansions $\str A_X$ of $\str A$, where
$X$ is an arbitrary subset of $A$.

An $n$-tuple $\tuple a$ from $\str A$ determines a function $t\mapsto
t(\tuple a)$ from $\Tm n\lang$ to $\Tm 0{\lang(A)}$.
The tuple $\tuple a$ also determines the function $g\mapsto g(\tuple a)$ 
from $A^{A^n}$ to $A$.  Then we have two functions, $t\mapsto t^{\str
  A}(\tuple a)$ and $t\mapsto t(\tuple a)^{\str A}$,
from $\Tm n{\lang}$ into $A$.  These can be understood as two paths in
the following diagram.
\begin{equation*}
  \begin{CD}
    \Tm n\lang@>{\tuple a}>>\Tm 0{\lang(A)}\\
@V{\interpretation}VV @VV{\interpretation}V\\
A^{A^n} @>>{\tuple a}> A
  \end{CD}
\end{equation*}
By Associativity, it doesn't matter which way one
moves around this diagram:
\begin{equation}\label{eqn:tAa=taA}
t^{\str A}(\tuple a)=t(\tuple a)^{\str A_A}.
\end{equation}
In a word, the diagram 
\defnplain{commutes}.%
\index{diagram!commutative ---}%
\index{commutative diagram}

%\section{Formulas}
\section{Atomic formulas}\label{sect:atomic}

As terms symbolize operations, so \tech{formula}s will symbolize
relations.  Each $n$-ary
formula $\phi$ of $\lang$ will, for each $\lang$-structure $\str A$,
have an \tech{interpretation}{} $\phi^{\str A}$ as an $n$-ary relation
on $A$.  A nullary formula will be a \defn{sentence}.  Hence, if
$\sigma$ is a sentence of $\lang$, then $\sigma^{\str A}\in\B$.  If
$\sigma^{\str A}=1$, then $\sigma$ is \defn{true in}{} $\str A$, and
we write
\begin{equation*}
  \str A\models\sigma.
\end{equation*}
In practice, it will be easier to define {truth}{} \emph{before}
defining interpretations in general.

So-called polynomial equations are examples of 
\techplain{atomic formulas},%
\index{atomic!--- formula}%
\index{formula!atomic ---}
which are the first kinds of formulas to be defined.  From these, we
shall define 
\tech{open formula}{s,}%
%\index{open!--- formula}%
\index{formula!open ---}
and then arbitrary 
\tech{formula}{s.}

%\subsection*{Atomic formulas and their interpretations}

The \defnplain{atomic formulas}{}%
\index{atomic!--- formula}%
\index{formula!atomic ---}
of $\lang$ are of two kinds:
\begin{enumerate}
\item
If $t_0$ and $t_1$ are terms of $\lang$, then the
\defn{equation}{}%
\index{formula!equation}
$t_0=t_1$ is an atomic
formula of $\lang$.\footnote{Some writers prefer to use a symbol like
$\equiv$ instead of $=$.}  
  \item
If $R$ is an $n$-ary predicate of $\lang$, and $t_0$, \dots, $t_{n-1}$
are terms of $\lang$, then $Rt_0\dotsb t_{n-1}$ is an atomic formula
of $\lang$.  (If $R$ is binary, then we may unofficially write
$(t_0\mathrel Rt_1)$ instead of $Rt_0t_1$.)
\end{enumerate}
An atomic formula $\alpha$ can be called 
\defnplain{$k$-ary}{}%
\index{ary!$k$-{}--- atomic formula}
if the terms it is made from are $k$-ary.  

A polynomial equation over $\R$ has a solution-set,
 which can be considered as the \tech{interpretation}{} of the
 equation in $\R$.  Likewise, arbitrary atomic formulas have solution-sets,
 which are their interpretations:  If
$\alpha$ is a $k$-ary atomic formula of $\lang$, then the
 \defn{interpretation}{} in $\str A$ of $\alpha$ is the 
$k$-ary relation $\alpha^{\str A}$ on $A$ defined as follows.
 (Strictly, the validity of the definition depends on unique
 readability, given by Theorem~\ref{thm:UR-formulas} below.)
 \begin{equation*}%\label{eqn:=R-int}
\begin{aligned}
   (t_0=t_1)^{\str A}
&=\{\tuple x\in A^k\colon t_0{}^{\str A}(\tuple x)
               =t_1{}^{\str A}(\tuple x)\};\\ 
(Rt_0\dotsb R_{n-1})^{\str A}
&= \{\tuple x\in A^k\colon (t_0{}^{\str A}(\tuple
    x),\dots, t_{n-1}{}^{\str A}(\tuple x))\in R^{\str A}\}.
\end{aligned}
 \end{equation*}
As a special case, if $k=0$, we have
\begin{equation}\label{eqn:truth-=R}
  \begin{aligned}
      (t_0=t_1)^{\str A}=1
&\Iff t_0{}^{\str A}=t_1{}^{\str A};\\
(Rt_0\dotsb t_{n-1})^{\str A}=1
&\Iff (t_0{}^{\str A},\dots,t_{n-1}{}^{\str
    A})\in R^{\str A}.
\end{aligned}
\end{equation}
Note that the atomic formula $t_0=t_1$ can be considered as the
special case of $Rt_0\dotsb t_{n-1}$ when $n=2$ and $R$ is $=$.  We
treat the special case separately because we consider the equals-sign to
be \emph{always} available for use in formulas, and we
\emph{always} interpret it as equality.




\section{Open formulas}\label{sect:open}
\setcounter{equation}1

An
\defnplain{open}{}%
\index{open formula}%
\index{formula!open ---}
or
\defnplain{quantifier-free formula}{}%
\index{quantifier!---{}-free formula}%
\index{formula!quantifier-free ---}
 is obtained from a propositional formula by substituting atomic
 formulas for the propositional variables.   
Let $\sF$ be an $n$-ary
 propositional formula, and let $\sigma_0$, \dots, $\sigma_{n-1}$ be
 atomic \emph{sentences.}
Then the 
\defn{interpretation}{}
of the open sentence 
 $\sF(\sigma_0,\dots,\sigma_{n-1})$ in $\str A$ is given by
 \begin{equation*}
        \sF (\sigma_0,           \dots,\sigma_{n-1}) ^{\str A}
=\named{\sF}(\sigma_0{}^{\str A},\dots,\sigma_{n-1}{}^{\str A}). 
 \end{equation*}
Now suppose more generally that $\phi_0$, \dots, $\phi_{n-1}$ are
$m$-ary atomic formulas.  Let $\theta$ be the $m$-ary open formula
$\sF(\phi_0,\dots,\phi_{n-1})$.  If $\tuple a\in A^m$, then
$\theta(\tuple a)$ has the obvious meaning: it is the result of
substituting $a_i$ for $\varble_i$ in $\theta$, for each $i$ in $m$.
Then $\theta$ and $\str A$ determine the $m$-ary relation
\begin{equation*}
  \{\tuple a\in A^m\colon\theta(\tuple a)^{\str A_A}=1\}
\end{equation*}
on $A$.
Defining the interpretation of an open formula this way is like
defining the interpretation of a term $t$ as $\tuple a\mapsto t(\tuple
a)^{\str A_A}$.  As terms allow also another approach, shown
in~\eqref{eqn:tAa=taA}, so with open formulas we can proceed
recursively as follows.  We have defined $\phi^{\str A}$ when
$\phi$ is atomic.  Suppose we have defined $\phi^{\str A}$ and
$\psi^{\str A}$ for two $m$-ary open formulas $\phi$ and $\psi$.  Then
\begin{equation}\label{eqn:int-not-and}
  \begin{aligned}
  \lnot\phi^{\str A}
&=(\phi^{\str A})\comp
=A^m\setminus \phi^{\str A};\\ 
(\phi\land\psi)^{\str A}
&=\phi^{\str A}\cap\psi^{\str A}.
\end{aligned}
\end{equation}
By induction,
\begin{equation*}
 \theta^{\str A}=\{\tuple a\in A^m\colon\theta(\tuple a)^{\str A_A}=1\}
\end{equation*}
for all open $\theta$.

Yet another way to understand interpretations of open formulas is by
the equation 
\begin{equation*}
  \sF(\phi_0,\dots,\phi_{n-1})^{\str A}=\named{\sF}(\phi_0{}^{\str
  A},\dots,\phi_{n-1}{}^{\str A}),
\end{equation*}
where the right-hand side has the same formal definition as
$\named{\sF}(\tuple e)$ in \S\ref{sect:logical}, with adding $1$ in
$\B$ now replaced with complementation in $\pow{A^m}$, and
multiplication in $\B$ replaced with intersection in $\pow{A^m}$.




\section{Formulas in general}
\setcounter{equation}2

Formulas in general may contain the 
\defnplain{existential quantifier}{}%
\index{existential!--- quantifier}%
\index{quantifier!existential ---}
$\exists$. 
The inductive definition of \defn{formula}s is:
\begin{enumerate}
  \item
atomic formulas are formulas;
\item
if $\phi$ and $\psi$ are formulas, then so are $\lnot\phi$ and
$(\phi\land\psi)$; 
\item
if $\phi$ is a formula, and $x$ is a variable, then $\Exists x\phi$ is
a formula.
\end{enumerate}

The possibility of defining the foregoing interpretations of open
formulas depends on the following.

\begin{theorem}[Unique Readability]\label{thm:UR-formulas}
\index{theorem!Unique Readability Th---}%
\index{Unique Readability Theorem}
  Every formula of $\lang$ is \emph{uniquely} one of the following:
  \begin{enumerate}
    \item
$t_0=t_1$, for some terms $t_e$ of $\lang$;
\item
$Rt_0\dotsb t_{n-1}$ for some terms $t_k$ and
$n$-ary predicate $R$ of $\lang$, for some positive $n$;
\item
$\lnot\phi$ for some formula $\phi$;
\item
$(\phi\land\psi)$ for some formulas $\phi$ and $\psi$;
\item
$\Exists x\phi$ for some formula $\phi$ and
some variable $x$.\hfill\qedsymbol
  \end{enumerate}
\end{theorem}

In order to define interpretations of arbitrary formulas, we can still
use~\eqref{eqn:int-not-and} above to define
$\lnot\phi^{\str A}$ and 
$(\phi\land\psi)^{\str A}$ in terms of $\phi^{\str A}$ and $\psi^{\str
  A}$.  
We should also define  $(\Exists
{x}\phi)^{\str A}$ in terms of $\phi^{\str A}$; but for this, we need
a notion of \tech{arity}{} of arbitrary formulas.  Ultimately, if $\phi$
is $(n+1)$-ary, and $x$ is $\varble_n$, then we shall have
\begin{equation}\label{eqn:E-prelim}
\tuple a\in(\Exists x\phi)^{\str A}\Iff
(\tuple a,b)\in\phi^{\str A}\text{ for
    some $b$ in $A$.}
\end{equation}
But complications arise if $x$ is $\varble_k$, where $k<n$.  When one
takes care of these things, then, for every $n$-ary formula $\phi$
of $\lang$,
there will be an $n$-ary relation $\phi^{\str A}$ on $A$; this relation is
\defnplain{defined by}{}%
\index{defin!---ed by}
$\phi$, and the relation can be
called a 
\defnplain{$0$-definable}{}%
\index{defin!---able relation!$0$---}
 relation of $\str A$.  The
\defnplain{definable}{}%
\index{defin!---able relation}
relations are those defined by formulas of
$\lang(A)$; more generally, if $X\included A$, then the $X$-definable
relations are those defined by formulas of $\lang(X)$.  (Singulary
definable relations can just be called 
\defnplain{definable set}{s.})%
\index{defin!---able set}

If $X$ and $Y$ are $k$-ary definable relations of $\str A$, then so
are $X\comp$, 
$X\cap Y$, $X\cup Y$, \&c.  In short, all \defn{Boolean combination}s
of definable relations are definable, since we work in an
adequate signature for propositional logic.

If $\phi$ is an $n$-ary formula, defining as such the $n$-ary
relation $X$, then we can also treat $\phi$ as $(n+1)$-ary,
defining the relation $X\times A$ on $A$.  This relation is the set
\begin{equation*}
  \{(\tuple x,y)\in A^{n+1}\colon \tuple x\in X\}.
\end{equation*}
This set is also $\pi\inv\setimb X$, where $\pi$ is the function
\begin{equation}\label{eqn:pi}
  (\tuple x,y)\longmapsto\tuple x
\end{equation}
from $A^{n+1}$ to $A^n$;
this function is \defn{projection}{} onto the first $n$ coordinates.
In short then, \emph{inverse images} of definable sets under projections
are definable.  By~\eqref{eqn:E-prelim}, 
\emph{images} under projections will be definable.

Let the set of formulas of $\lang$ be 
\begin{equation*}
\Fm{}{\lang}.
\end{equation*}
We recursively
define a function
\begin{equation*}
  \phi\longmapsto \fv\phi
\end{equation*}
from $\Fm{}{\lang}$ to $\pow{\{\varble_k\colon
  k\in\vnn\}}$
as follows:
\begin{enumerate}
  \item
$\fv\alpha$ is the set of variables in $\alpha$, if $\alpha$ is
    atomic; 
\item
$\fv{(\phi\land\psi)}=\fv\phi\cup\fv\psi$;
\item
$\fv{\Exists x\phi}=\fv\phi\setminus\{x\}$.
\end{enumerate}
Then $\fv\phi$ is the set of 
\defnplain{free variable}s%
\index{free!--- variable} 
of $\phi$.  If
$\fv\phi=\emptyset$, then $\phi$ is a \defn{sentence}; the set of
sentences of $\lang$ can be denoted by
\begin{equation*}
  \Sn.
\end{equation*}
So an atomic
sentence $\alpha$ is a nullary atomic formula; in this case, we can
define
\begin{equation}\label{eqn:truth-atomic}
  \str A\models\alpha\Iff \alpha^{\str A}=1;
\end{equation}
in either case, $\alpha$ is \defn{true}{} in $\str A$.  Otherwise,
$\alpha$ is \defn{false in}{} $\str A$, and we can write
\begin{equation*}%\label{eqn:false-sentence}
  \str A\nmodels\alpha.
\end{equation*}
We can also define
\begin{equation}\label{eqn:def-neg-and}
\begin{aligned}
  \str A\models\lnot\sigma&\Iff \str A\nmodels\sigma;\\
\str A\models\sigma\land\tau&\Iff \str A\models\sigma\amp
\str A\models\tau; 
\end{aligned}
\end{equation}
provided $\sigma$ and $\tau$ are sentences for which truth and falsity
in $\str A$ have been defined.  
To define $\str A\models\Exists x\phi$, we need a notion of
\tech{substitution}, whereby to convert $\phi$ to a sentence; but then
we should
assume that we have been working with formulas of $\lang(A)$ all
along.
For formulas $\phi$, if $x$ is a variable and $t$ is a term, we
define the formula
\begin{equation*}
  (\phi)_t^{x}
\end{equation*}
recursively:
\begin{enumerate}
  \item
If $\alpha$ is atomic, then $(\alpha)_t^{x}$ is the result of
substituting $t$ for $x$ in $\alpha$;
\item
$(\lnot\phi)_t^{x}$ is $\lnot(\phi)_t^{x}$;
\item
$((\phi\land\psi))_t^{x}$ is $((\phi)_t^{x}\land(\psi)_t^{x})$;
\item
$(\Exists{x}\phi)_t^{x}$ is $\Exists{x}\phi$ (no change);
\item
$(\Exists u\phi)_t^{x}$ is $\Exists u(\phi)_t^{x}$, if $u$
  is not $x$. 
\end{enumerate}
Then $(\phi)_t^x$ is the result of replacing each 
\defnplain{free occurrence}{}%
\index{free!--- occurrence}%
\index{occurrence!free ---}
of $x$ in $\phi$ with $t$.
Now we can define
\begin{equation}\label{eqn:truth-E}
  \str A\models\Exists{x}\phi\Iff\str
  A\models\phi_a^{x}\text{ for some $a$ in $A$.}
\end{equation}
We have now completed the definition of truth; it is expressed by
\eqref{eqn:truth-=R},
\eqref{eqn:truth-atomic}, 
%\eqref{eqn:false-sentence},
\eqref{eqn:def-neg-and},
and~\eqref{eqn:truth-E}.  

%\subsection*{Interpretations}

If $\fv{\phi}\included\{\varble_k\colon k<n\}$, then $\phi$ can be
called $n$-ary, 
and we can write $\phi$ as
\begin{equation*}
  \phi(\varble_0,\dots,\varble_{n-1}).
\end{equation*}
Then, instead of
$(\dotsb((\phi)_{a_0}^{\varble_0})\dotsb)_{a_{n-1}}^{\varble_{n-1}}$, we can write 
\begin{equation*}
  \phi(a_0,\dots,a_{n-1})
\end{equation*}
or $\phi(\tuple a)$.  Note well that $\tuple a$ is a tuple of
\emph{constants}.  We can let it be a tuple $(t_0,\dots,t_{n-1})$ of
arbitrary terms; but then we must ensure that
$\phi(t_0,\dots,t_{n-1})$ is the result of \emph{simultaneously}
substituting each $t_k$ for the free instances of the corresponding
variable $\varble_k$.
Now we can define
\begin{equation*}
  \phi^{\str A}=\{\tuple a\in A^n\colon \str A\models\phi(\tuple a)\}
\end{equation*}
for all formulas $\phi$.
\pagebreak
%\begin{minipage}[t]{\textwidth}
\begin{theorem}\label{thm:definable-sets}
  Let $\phi$ be an $n$-ary formula of $\lang$.
  \begin{enumerate}
\item
If $\phi$ is $\lnot\psi$, then 
$\phi^{\str A}=A^n\setminus\psi^{\str A}$.
\item
If $\phi$ is $(\chi\land\psi)$, then
$\phi^{\str A}=\chi^{\str A}\cap\psi^{\str A}$.
\item
If $\phi$ is $\Exists{\varble_n}\psi$, then
$\phi^{\str A}=\pi\setimb{\psi^{\str A}}$,
where $\pi$ is as in~\eqref{eqn:pi}. \hfill\qedsymbol
  \end{enumerate}
\end{theorem}
%\end{minipage}

If $\fv\phi=\{u_0,\dots,u_{n-1}\}$, and
\begin{equation*}
  \str A\models\Exists{u_0}\dotsb\Exists{u_{n-1}}\phi,
\end{equation*}
then $\phi$ is 
\defnplain{satisfied}{}%
\index{satisfi!---ed, ---es}
 in $\str A$, or
$\str A$ \defnplain{satisfies}{} $\phi$.

In a formula of $\lang(A)$, any constants from $A$ can be called
\defn{parameter}{s.}  So the definable relations of $\str A$ are, more
fully, the relations definable \tech{with parameters}.
\techplain{Algebraic geometry}{}%
\index{algebra!---ic geometry}%
\index{geometry!algebraic ---}
studies the definable relations of $\C$ and of
  other fields.  
See Appendix~\ref{app:definable-sets} for more on definable sets in
general. 

Strictly, interpretations and the truth-relation $\models$ have been
defined for a particular signature.  However, expanding a structure
does not change the interpretation of a formula or the truth-value of
a sentence in that structure.

\begin{theorem}
  Say $\lang\included\lang'$, and $\str A$ is an $\lang$-structure
  with an expansion $\str A'$ to $\lang'$.  If $\phi$ is a formula of
  $\lang$, then
  \begin{equation*}
    \phi^{\str A'}=\phi^{\str A}.
  \end{equation*}
In particular, if $\sigma$ is a sentence of $\lang$, then
\begin{equation*}
  \str A'\models\sigma\Iff\str A\models\sigma.
\end{equation*}
\end{theorem}

\begin{proof}
  The definition of the interpretation of $\phi$ involves no symbols
  in $\lang'\setminus\lang$ or their interpretations, so $\tuple
  a\in\phi^{\str A'}\Iff\tuple a\in\phi^{\str A}$. 
\end{proof}

Two sentences are 
\defnplain{(logically) equivalent}{}%
\index{logic!---ally equivalent}%
\index{equivalent!logically ---}
if
each is a logical consequence of the other.  
We define subformulas as in propositional logic, and we have analogues
of Lemmas~\ref{lem:subformulas} and~\ref{lem:sub-->form}, and hence

\begin{theorem}[Replacement]\label{thm:replacement}%
\index{theorem!Replacement Th---}%
\index{replacement!R--- Theorem}
  If $\phi$ is a subformula of $\psi$, and $\phi'$ is another
  formula, then the result of replacing $\phi$ with $\phi'$ in $\psi$
  is a formula $\psi'$.  If also $\phi$ is equivalent to
  $\phi'$, then $\psi$ is equivalent to $\psi'$.\hfill\qedsymbol
\end{theorem}

We can use $(\phi\lto\psi)$ as equivalent to $\lnot(\phi\land\lnot\psi)$,
and $(\phi\iff\psi)$ as equivalent to $((\phi\lto\psi)\land(\psi\lto\phi))$.

\begin{theorem}\label{thm:log-equiv}
\mbox{}
\begin{enumerate}
\item
Two sentences $\sigma$ and $\tau$ of $\lang$
are equivalent if and only if
${}\models (\sigma\iff\tau)$.
\item
Logical equivalence is an equivalence-relation on
$\Sn$.\hfill\qedsymbol 
\end{enumerate}
\end{theorem}








\section{Logical entailment}

Having defined the truth-relation $\models$ for first-order logic, we have
related notions, as in propositional logic.  
If $\Sigma$ is a set of
sentences, each of which is true in $\str A$, then $\str A$ is a
\defn{model}{} of $\Sigma$, and we write
\begin{equation*}
  \str A\models\Sigma.
\end{equation*}
The $\lang$-structures that are models of $\Sigma$ compose
\begin{equation*}
\Mod{\Sigma};
\end{equation*}
if this is nonempty, then it is a 
\techplain{proper class}{}%
\index{proper!--- class}%
\index{class!proper ---}
(see \S\ref{sect:ordinals}).
The \defn{theory}{} of a class $\class K$ of structures is the set
\begin{equation*}
\Th{\class K}
\end{equation*}
of sentences that are true in each structure in $\class K$.  If
$\class K$ has a single element, $\str A$, then $\Th{\class K}$ can be
written as
\begin{equation*}
  \Th{\str A}.
\end{equation*}
Hence
\begin{equation*}
  \sigma\in\Th{\str A}\Iff\str A\models\sigma.
\end{equation*}
The set of 
\defnplain{logical consequence}s%
\index{logic!---al consequence}%
\index{consequence!logical ---}
of a set $\Sigma$ of sentences is $\Th{\Mod{\Sigma}}$, also denoted by
\begin{equation*}
  \Cn{\Sigma}.
\end{equation*}
Then $\Cn{\Sigma}$ is always a theory, namely the theory of
$\Mod{\Sigma}$. 
We now have analogues of Lemma~\ref{lem:reversing} and
Theorem~\ref{thm:closure}.

If $\sigma\in\Cn{\Sigma}$, then $\Sigma$
\defn{logically entail}s $\sigma$.  The logical consequences of
$\emptyset$ are the \defn{validities}.
In particular, if $\sF$ is a tautology of propositional logic, and
$\sigma_i$ are sentences of $\lang$, then
$\sF(\tau_0,\dots,\tau_{n-1})$ is a validity of $\lang$.  Such a
validity is also called a
\defnplain{tautology};%
\index{tautolog!---y} 
but there are validities that are not tautologies, for example
$\lnot\Exists xx\neq x$.

Instead of $\lnot\Exists v\phi$, we may write
\begin{equation*}
  \Forall v\lnot\phi.
\end{equation*}
Here $\forall$ is the
\defnplain{universal quantifier}.%
\index{universal!--- quantifier}%
\index{quantifier!universal ---}
Then $\lnot\Forall v\phi$ is equivalent to $\Exists v\lnot\phi$.
Let $P$ and $Q$ be singulary predicates.
  To prove that the sentence
  \begin{equation}\label{eqn:xPxQx}
      (\Forall x(Px\lto Qx)\lto(\Forall
xPx\lto\Forall xQx))
  \end{equation}
is a validity, it is enough to show that 
$\str A\models(\Forall xPx\lto\Forall xQx)$ whenever $\str
A\models\Forall x(Px\lto Qx)$.  So suppose 
\begin{equation}\label{eqn:ex1}
  \str A\models\Forall
x(Px\lto Qx).  
\end{equation}
It is now enough to show that, if also $\str
A\models\Forall xPx$, then $\str A\models\Forall xQx$.  So suppose
\begin{equation}\label{eqn:ex2}
  \str A\models\Forall xPx.
\end{equation}
Let $a\in A$.  Then $\str A\models Pa$, by~\eqref{eqn:ex2}.  But $\str
A\models (Pa\lto Qa)$, by~\eqref{eqn:ex1}.  Hence $\str A\models Qa$.
Since $a$ was arbitrary, we have $\str A\models\Forall xQx$.
Therefore~\eqref{eqn:xPxQx} is a validity.

The theory $\Cn{\Sigma}$ is
\defnplain{axiomatize}{ed}%
\index{axiom!---atized}
by $\Sigma$; the elements of $\Sigma$ are
\defn{axiom}s for this theory.
If $\fv\phi=\{u_0,\dots,u_{n-1}\}$, then the sentence
$\Forall{u_0}\dotsb\Forall{u_{n-1}}\phi$ is a 
\defn{generalization}{}
of $\phi$.  
We may use formulas to denote their generalizations.
For example,
group-theory in the signature $\{1,{}\inv,{}\cdot{}\}$ is axiomatized by
the (generalizations of the) following formulas.
\begin{equation}\label{eqn:group-axioms} 
  \begin{gathered}
    x\cdot(y\cdot z)=(x\cdot y)\cdot z,\\
    \begin{aligned}
      x\cdot 1&=x,\\
1\cdot x&=x,
    \end{aligned} \qquad
    \begin{aligned}
      x\cdot x\inv=1,\\
x\inv\cdot x=1.
    \end{aligned}
  \end{gathered}
\end{equation}

If $\Sigma$ has no models, then $\Cn{\Sigma}=\Sn$.  Thus $\Sn$ is the
only theory (of $\lang$) with no models.  A theory with models is
\defnplain{complete}{}% 
\index{complete!--- theory}
if, for every sentence $\sigma$ of its signature, the theory contains
either $\sigma$ or $\lnot\sigma$ (hence exactly one of these).


\begin{theorem}\label{thm:complete}
Every theory $\Th{\str A}$ is complete.  Every complete theory is
$\Th{\str A}$ for some $\str A$.
\end{theorem}

\begin{proof}
Since
\begin{equation*}
  \sigma\in\Th{\str A}\Iff \str A\models\sigma\Iff\str
  A\nmodels\lnot\sigma\Iff\lnot\sigma\not\in\Th{\str A},
\end{equation*}
$\Th{\str A}$ is complete.  If $T$ is a complete theory, then in
particular it has a model $\str A$.  Then $T\included\Th{\str A}$; but
also
\begin{equation*}
  \sigma\not\in T\implies\lnot\sigma\in T\implies\str
  A\models\lnot\sigma\implies \lnot\sigma\in\Th{\str
  A}\implies\sigma\not\in\Th{\str A},
\end{equation*}
by completeness of $T$, and hence $\Th{\str A}\included T$.
\end{proof}




\section*{Exercises}

\begin{xca}
\mbox{}
\begin{enumerate}
  \item
Prove the Associativity Theorem, \ref{thm:ass-1st}.
\item
What does this theorem have to do with
Theorems~\ref{thm:add}\eqref{item:+ass}
and~\ref{thm:mul}\eqref{item:.ass}? 
\end{enumerate}
\end{xca}

\begin{xca}
  Prove Theorem~\ref{thm:UR-formulas}.
\end{xca}

\begin{xca}
  Prove Theorem~\ref{thm:definable-sets}.
\end{xca}

\begin{xca}
Let $\lang=\{R\}$, where $R$ is a binary predicate, and let $\str A$
be the $\lang$-structure $(\Z,\leq)$.  Determine $\phi^{\str A}$ if
$\phi$ is:
\begin{enumerate}
  \item
$\Forall {x_1}(Rx_1x_0\lto Rx_0x_1)$;
\item
$\Forall {x_2}(Rx_2x_0\lor Rx_1x_2)$.
\end{enumerate}
\end{xca}

\begin{xca}
    Let $\lang$ be $\{S,P\}$, where $S$ and $P$ are binary
    function-symbols.  Then $(\R,+,\cdot)$ is an $\lang$-structure.
    Show that the following sets and relations are definable in this
    structure:
    \begin{enumerate}
\item
$\{0\}$;
\item
$\{1\}$;
\item
$\{x\in\R\colon 0< x\}$;
\item
$\{(x,y)\in\R^2\colon x<y\}$.
    \end{enumerate}
\end{xca}

\begin{xca}
    Show that the following sets are definable in
    $(\vnn,+,\cdot,\leq,0,1)$:
    \begin{enumerate}
      \item
the set of even numbers;
\item
the set of prime numbers.
    \end{enumerate}
\end{xca}

\begin{xca}
  Let $R$ be the binary
  relation 
  \begin{equation*}
      \{(x,x+1)\colon x\in \Z\}
  \end{equation*}
 on $\Z$.  Show that $R$ is $0$-definable in the structure $(\Z,<)$;
  that is, find a binary formula $\phi$ 
  in the signature $\{<\}$ such that $\phi^{(\Z,<)}=R$.
\end{xca}

\begin{xca}
  Find an \emph{open} sentence that is a validity, but not a tautology.
\end{xca}

\begin{xca}
Prove the \defnplain{Lemma on Constants}:%
\index{constant!Lemma on C---s}%
\index{theorem!Lemma!--- on Constants}
  Suppose $\Sigma$ is a set of sentences of $\lang$, and $c_k$ are
  constants not in $\lang$, and $\phi$ is an $n$-ary formula of
  $\lang$.  Then 
  \begin{equation*}
    \Sigma\models\Forall{\varble_0}\dotsb\Forall{\varble_{n-1}}\phi\Iff
\Sigma\models\phi(c_0,\dots,c_{n-1}).
  \end{equation*}
\end{xca}

\begin{xca}
  Prove the Replacement Theorem, \ref{thm:replacement}.
\end{xca}

\begin{xca}
  Prove Theorem~\ref{thm:log-equiv}.
\end{xca}
\pagebreak
\begin{xca}
Letting $P$ and $Q$ be singulary predicates, determine, from the
    definition of $\models$, whether the following hold.
    \begin{enumerate}
            \item
$\Exists xPx\lto \Exists xQx\models\Forall x(Px\lto Qx)$;
\item
$\Forall xPx\lto\Exists xQx\models\Exists x(Px\lto Qx)$;
\item
$\Exists x(Px\lto Qx)\models\Forall xPx\lto\Exists xQx$;
    \item
$\{\Exists x Px,\;\Exists xQx\}\models\Exists x(Px\land Qx)$;
\item
$\Exists xPx\lto\Exists yQy\models\Forall x\Exists y(Px\lto Qy)$.
    \end{enumerate}
\end{xca}

\begin{xca}
  Axiomatize group-theory in the signature $\{{}\cdot{}\}$.
\end{xca}



\chapter{Quantifier-elimination and complete theories}\label{ch:QE}
%\numberwithin{lemma}{chapter}
%\numberwithin{theorem}{chapter}

It is easy to show that a theory is \emph{not}
complete.
For example, the theory of groups is not complete, since the sentence 
  \begin{equation*}
    \Forall x\Forall y x\cdot y=y\cdot x
  \end{equation*}
is true only in abelian groups (by definition), but there are
non-abelian groups (such as the group of permutations of three
objects).  The theory of abelian groups is not complete either, since
(in the signature $\{+,-,0\}$) the sentence
  \begin{equation*}
      \Forall x (x+x=0\lto x=0) 
  \end{equation*}
is true in $(\Z,+,-,0)$, but false in $(\Z/2\Z,+,-,0)$.

To show that a theory \emph{is} complete, there are various
methods that can be tried.  One of these is
\tech{elimination of quantifier}{s,}%
%\index{elimination!--- of quantifiers}%
\index{quantifier!elimination of ---s}
which we shall perform in two examples.

\section{Total orders}

Let $\TO$ be the theory of {strict} total orders; this is
axiomatized by the generalizations of:
\begin{equation*}
  \begin{gathered}
  x\not< x,\\
x< y\lto y\not< x,
  \end{gathered}
\qquad\qquad
\begin{gathered}
x< y\land y< z\lto x< z,\\
x< y\lor y< x\lor x=y.
\end{gathered}
\end{equation*}
This theory is not complete, since $(\vnn,<)$ and $(\Z,<)$ are
models of $\TO$ with different complete theories (\exercise).

Let $\TO^*$ be the theory of 
\defn{dense total orders without endpoints}:
%\index{dense!--- total order without endpoints}
this means $\TO^*$ has the axioms of $\TO$, along with the
  generalizations of:
\begin{equation*}
  \Exists z (x<z\land z<y),
\qquad\qquad
\Exists y y<x,
\qquad\qquad
\Exists y x<y.
\end{equation*}
The theory $\TO^*$ has a model, namely $(\Q,<)$.
We shall show that $\TO^*$ is complete, hence equal to $\Th{\Q,<}$.

Two formulas $\phi$ and $\psi$ are \defn{equivalent}{}
\defnplain{\emph{modulo}}{}%
\index{modulo@\emph{modulo}} 
a theory $T$, or equivalent \defnplain{in $T$}, if
 \begin{equation*}
   T\models\phi\iff\psi.
 \end{equation*}
Then $T$ admits
 \defnplain{(full) elimination of quantifiers}{}% 
\index{elimination of quantifiers}%
\index{full elimination of quantifiers}%
\index{quantifier!full elimination of ---s}
if, for every formula, there is an \emph{open} formula that is
 equivalent to it in $T$.

\begin{lemma}\label{lem:QE}
  An $\lang$-theory $T$ admits quantifier-elimination, provided that,
  if $\phi$ is an open formula, and $v$ is a variable, then $\Exists
  v\phi$ is equivalent \emph{modulo} $T$ to an open formula.
\end{lemma}

\begin{proof}
  Use induction on formulas.  
Every atomic formula is equivalent in $T$ to an open
formula, namely itself.
Now suppose $\phi$ and $\psi$ are equivalent in $T$ to
open formulas $\alpha$ and $\beta$ respectively.  Then
\begin{equation*}
T\models\lnot\phi\iff\lnot\alpha,\qquad\qquad
  T\models\phi\land\psi\iff\alpha\land\beta;
\end{equation*}
but $\lnot\alpha$ and $\alpha\lto\beta$ are open.
Finally, $T\models\Exists v\phi\iff\Exists v\alpha$
(\exercise); but by
assumption, $\Exists v\alpha$ is equivalent to an open formula
$\gamma$; so $T\models\Exists v\phi\iff\gamma$ (\exercise).
This completes the induction. 
\end{proof}

The lemma can be improved slightly.  First, if $\Sigma$ is a set
$\{\phi_k\colon k<n\}$ of formulas, then for the 
\defnplain{disjunction}{}%
\index{disjunct!---ion}
$\phi_0\lor\phi_1\lor\dotsb\lor\phi_{n-1}$, we may write simply
\begin{equation*}
  \bigvee_{k<n}\phi_k,
\end{equation*}
or even $\bigvee\Sigma$.  The order in which the $\phi_k$ appear in
the original disjunction is unimportant.  Likewise, the
\defnplain{conjunction}{} 
\index{conjunction of formulas}
$\phi_0\land\phi_1\land\dotsb\land\phi_{n-1}$ can
be denoted by
\begin{equation*}
  \bigwedge_{k<n}\phi_k,
\end{equation*}
or even $\bigwedge\Sigma$.

\begin{theorem}
Every open formula is logically
equivalent to a formula
\begin{equation}\label{eqn:DNF}
  \bigvee_{i<m}\bigwedge \Sigma_i
\end{equation}
where $\Sigma_i$ is $\{\alpha_i^{(j)}\colon j<n\}$, and each
$\alpha_i^{(j)}$ is an atomic or a negated atomic formula.
\end{theorem}

\begin{proof}
  Exercise.
\end{proof}

The formula in~\eqref{eqn:DNF} is in 
\defnplain{disjunctive normal form}.%
\index{disjunct!---ive normal form}
Note that
\begin{equation}\label{eqn:Ednf}
\models  \Exists v\bigvee_{i<m}\bigwedge \Sigma_i \iff
\bigvee_{i<m}\Exists v\bigwedge \Sigma_i
\end{equation}
(\textbf{exercise}).
The formulas $\Exists v\bigwedge\Sigma_i$ are said to be
\techplain{primitive}.%
\index{primitive formula}
\index{formula!primitive ---}
In general, a \defnplain{primitive formula}{} is a formula
\begin{equation*}
  \Exists {u_0}\dotsb\Exists{u_{n-1}}\bigwedge \Sigma,
\end{equation*}
where $\Sigma$ is a {finite} non-empty set of atomic and negated atomic
formulas.
Using \eqref{eqn:Ednf}, we can adjust the induction in the proof above
to show 

\begin{lemma}
A theory admits quantifier-elimination, provided that
every primitive formula with one (existential) quantifier is equivalent,
\emph{modulo} the theory, to an open formula.\hfill\qedsymbol
\end{lemma}


Henceforth suppose $\lang$ is $\{<\}$, and $\TO\included T$; so $T$ is
\emph{a} theory of total orders.  Then we can
improve Lemma~\ref{lem:QE} even more.
Indeed, the atomic formulas of $\lang$ now are $x=y$ and $x<y$, where
$x$ and $y$ are variables.  Moreover,
\begin{gather*}
  \TO\models x\not<y\iff x=y\lor y<x,\\
\TO\models x\neq y\iff x<y\lor y<x.
\end{gather*}
Hence, in $\lang$, any formula is equivalent, \emph{modulo} $\TO$, to
the result of replacing each negated atomic subformula with the
appropriate disjunction of atomic formulas.  If this replacement is
done to a formula in disjunctive normal form, then the new formula
will have a disjunctive
normal form that involves no negations.  So $T$ admits
quantifier-elimination, 
provided that every formula
\begin{equation*}
  \Exists v\bigwedge\Sigma
\end{equation*}
is equivalent, \emph{modulo} $T$, to an open formula, where now
$\Sigma$ is a set of atomic formulas.
Using this criterion, we show:

\begin{theorem}\label{thm:TO-QE}
  $\TO^*$ admits elimination of quantifiers.
\end{theorem}

\begin{proof}
Let $\Sigma$ be a finite, non-empty set of atomic formulas of
$\lang$.  We shall eliminate the quantifier from the formula $\Exists
v\bigwedge\Sigma$. 
Let $X$ be a set $\{\varble_0,\dots,\varble_n\}$ containing
all variables appearing in formulas in $\Sigma$.
Suppose $\str A$ is an $\lang$-structure, and $\tuple a\in A^{n+1}$.
Then we can let 
\begin{equation*}
  \Sigma(\tuple a)=\{\alpha(\tuple a)\colon \alpha\in\Sigma\}.
\end{equation*}
Suppose in fact 
\begin{equation*}
  \str A\models\TO\cup\{\bigwedge\Sigma(\tuple a)\}.
\end{equation*}
Let us define
  $\Sigma_{(\str A,\tuple a)}$ as the set of atomic formulas $\alpha$
  such that $\fv{\alpha}\included X$ and
  $\str A\models\alpha(\tuple a)$.  Then
  \begin{equation*}
    \Sigma\included\Sigma_{(\str A,\tuple a)}.
  \end{equation*}
Moreover, once $\Sigma$ has been chosen, \emph{there are only finitely
  many possibilities for the set $\Sigma_{(\str A,\tuple a)}$.}  Let
  us list these possibilities as
  \begin{equation*}
    \Sigma_0,\dots,\Sigma_{m-1}.
  \end{equation*}
Now, possibly $m=0$ here.  In this case,
\begin{equation*}
  \TO\models\Exists v\bigwedge\Sigma\iff v\neq v,
\end{equation*}
so we are done.
Henceforth we may assume $m>0$.
If $\str B\models\TO\cup\{\bigwedge\Sigma(\tuple b)\}$, then
\begin{equation*}
  \str B\models\bigwedge\Sigma_i(\tuple b)
\end{equation*}
for some $i$ in $m$.
Therefore
\begin{equation*}
  \TO\models\bigwedge\Sigma\iff\bigvee_{i<m}\bigwedge\Sigma_{i},
\end{equation*}
and hence
\begin{equation*}
  \TO\models\Exists v\bigwedge\Sigma\iff \bigvee_{i<m}\Exists
  v\bigwedge\Sigma_i. 
\end{equation*}
Therefore, for our proof of quantifier-elimination, we may assume that
$\Sigma$ \emph{is} one of the sets $\Sigma_{(\str A,\tuple a)}$ (so
that, in particular, $m=1$).

Now partition $\Sigma$ as $\Gamma\cup\Delta$, where no formula in
$\Gamma$, but every formula in $\Delta$, contains~$v$.  
There are two extreme possibilities, where one of these sets is empty.
Suppose first $\Gamma=\emptyset$.  Then $X=\{v\}$ (since if $x\in
X\setminus\{v\}$, then $\Gamma$ contains $x=x$).   Also,
$\Sigma=\Delta=\{v=v\}$, so
\begin{equation*}
  \models\Exists v\bigwedge\Sigma\iff v=v,
\end{equation*}
and we are done in this case.  Now
suppose $\Delta=\emptyset$.  Then $v\notin X$, and
\begin{equation*}
  \models\Exists v\bigwedge\Sigma\iff\bigwedge\Sigma,
\end{equation*}
so we are done in \emph{this} case.
Henceforth, suppose neither $\Gamma$ nor $\Delta$ is empty.
Then
\begin{equation*}
  \models\Exists v\bigwedge\Sigma\iff\bigwedge\Gamma\land\Exists
  v\bigwedge\Delta. 
\end{equation*}
We shall show that 
\begin{equation}\label{eqn:goal}
  \TO^*\models\Exists
v\bigwedge\Sigma\iff\bigwedge\Gamma,
\end{equation}
 which will complete the proof.  To show \eqref{eqn:goal}, it is
 enough to show
 \begin{equation*}
   \TO^*\models\bigwedge\Gamma\lto\Exists v\bigwedge\Delta.
 \end{equation*}

Since $\Sigma$ is $\Sigma_{(\str A,\tuple a)}$, we have for all $i$ and
$j$ in $n+1$ that $a_i<a_j$ if and only if $\Sigma$ contains
$\varble_i<\varble_j$, and
$a_i=a_j$ if and only if $\Sigma$ contains $x_i=x_j$.
We also have $v\in X$.  We can relabel the elements of $X$ as necessary so
that $v$ is $\varble_{n}$ and
\begin{equation*}
  a_0\leq\dotsb\leq a_{n-1}.
\end{equation*}
Suppose $\str 
B\models\TO^*$, and $B^n$ contains $\tuple b$ such that $\str
B\models\bigwedge\Gamma(\tuple b)$.  We have to show that there is $c$
in $B$ such that $\str B\models\bigwedge\Delta(\tuple b,c)$.
Now, for all $i$ and $j$ in $n$, we have
\begin{equation*}
  b_i<b_j\Iff a_i<a_j,\qquad\qquad
b_i=b_j\Iff a_i=a_j.
\end{equation*}
Because $\str B$ is a model of $\TO^*$ (and not just $\TO$), we can
find $c$ as needed according to the relation of $a_n$ with the other
$a_i$: 
\begin{enumerate}
  \item
If $a_n=a_i$ for some $i$ in $n$, then let $c=b_i$.
\item
If $a_{n-1}<a_n$, then let $c$ be greater than $b_{n-1}$.
\item
If $a_n<a_0$, then let $c$ be less than $b_0$.
\item
If $a_k<a_n<a_{k+1}$, then we can let $c$ be such
that $b_k<c<b_{k+1}$.
\end{enumerate}
This completes the proof that $\TO^*$ admits quantifier-elimination.
\end{proof}

In the proof, we can let $X$ be \emph{precisely} the set of  variables
appearing in $\Sigma$.  Then we have that, \emph{modulo} $\TO^*$, the
formula $\Exists
v\bigwedge\Sigma$ is equivalent to $v\neq v$ or $v=v$ or an open
formula \emph{with the same free variables as $\Exists
  v\bigwedge\Sigma$}.  In the signature $\{<\}$, there are no open
sentences.  Therefore, \emph{modulo} $\TO^*$, every sentence is
equivalent to $v\neq v$ or $v=v$.
The former is an \defn{absurdity}{} (the negation of a validity),
which we can denote by $\bot$; and $v=v$ is a validity, which we can
denote by $\top$.

\begin{theorem}
  $\TO^*$ is a complete theory.
\end{theorem}

\begin{proof}
As we have just noted, every sentence is equivalent to an absurdity or a
  validity.  Suppose $\TO^*\models\sigma\iff\bot$.  But
  $\models(\sigma\iff\bot)\iff \lnot\sigma$; so
  $\TO^*\models\lnot\sigma$.  Similarly, if
  $\TO^*\models\sigma\iff\top$, then $\TO^*\models\sigma$.  Hence,
  for all sentences $\sigma$, if $\TO^*\nmodels\sigma$, then
  $\TO^*\models\lnot\sigma$.  Therefore $\TO^*$ is complete by
  Theorem~\ref{thm:complete}. 
\end{proof}

\section{The natural numbers}\label{sect:omega,0,s}

Let us now understand the signature of iterative
structures (in the sense of \S\ref{sect:nat}) as $\{0,{}'\}$.  In this
signature, let $\It$ be the theory axiomatized by (the generalizations of)
  \begin{gather}\notag
x'\neq0,\\\label{eqn:x'y'xy}
x'=y'\lto x=y;
  \end{gather}
let $\It^*$ be the theory with the same two axioms and one more,
namely
\begin{equation}\label{eqn:xy0y}
\Exists y(x=0\lor y'=x).
\end{equation}
Note that all models of $\It$ that admit induction are isomorphic to
$\vnn$; models of $\It^*$ satisfy one \emph{consequence} of induction,
namely Theorem~\ref{thm:1-or-succ}.

An \defn{embedding}{} of algebras is an injective homomorphism.

\begin{lemma}\label{lem:omega-embeds}
For every model $\str A$ of $\It$, for every $b$ in $A$, the unique
homomorphism from $\vnn$ into $(A,b,{}')$ is an embedding.\hfill\qedsymbol
\end{lemma}

\begin{theorem}\label{thm:itr-qe}
  The theory $\It^*$ admits elimination of quantifiers.
\end{theorem}

\begin{proof}
Instead of
$x^{\prime\prime\dotsb\prime}$ with $n$ primes,
let us write $x^{(n)}$.
Let $\Sigma$ be a non-empty finite
  set of atomic and negated atomic formulas.  
If the variable $v$ does
  not appear in any formula in $\Sigma$, then $\Exists
  v\bigwedge\Sigma$ is equivalent \emph{modulo} $\It^*$ to
  $\bigwedge\Sigma$.
So suppose it does appear.  We consider the case where $v$ is the
\emph{only} variable in some formula in $\Sigma$.
If 
\begin{equation*}
\Sigma=\Lambda\cup\{v^{(m)}=v^{(m)}\}, 
\end{equation*}
then
$\Exists v\bigwedge\Sigma$ is equivalent in $\It$ to 
$\Exists v\bigwedge\Lambda$, while
if $\Sigma$ contains $v^{(m)}\neq v^{(m)}$, then
$\Exists v\bigwedge\Sigma$ is equivalent to $v\neq v$.  
By Lemma~\ref{lem:omega-embeds},
if $\Sigma$ contains
$v^{(m)}=v^{(n)}$ where $m\neq n$, then
$\Exists v\bigwedge\Sigma$ is equivalent to $v\neq v$, while if
$\Sigma=\Lambda\cup\{v^{(m)}\neq v^{(n)}\}$ where $m\neq n$, then
$\Exists v\bigwedge\Sigma$ is equivalent to
$\Exists v\bigwedge\Lambda$.

We may now assume that $v$ never appears on both sides of an equation
or inequation in $\Sigma$.
But suppose
  $\Sigma=\Lambda\cup\{v^{(m)}=t^{(n)}\}$, where $t$ is a
term not featuring $v$.  By the injectivity of $x\mapsto x'$ ensured
by~\eqref{eqn:x'y'xy}, we may assume
that $m$ or $n$ is $0$.  Suppose first $m=0$.  Let $\Lambda^*$ 
  be the result of replacing each $v$ appearing in a formula in
  $\Lambda$ with $t^{(n)}$.  Then 
  \begin{align*}
\It^*\models\Exists v\bigwedge\Sigma
&\Iff\Exists v\Bigl(\bigwedge\Lambda^*\land v=t^{(n)}\Bigr)\\
&\Iff\bigwedge\Lambda^*\land\Exists vv=t^{(n)}\\
&\Iff\bigwedge\Lambda^*.
  \end{align*}
Now suppose instead $n=0$.
Whenever $v$ appears in an equation or inequation in $\Lambda$,
we can add primes to both sides until $v$ has at least $m$
primes on it: the resulting formula is equivalent to the original
formula.  Then we can replace $v^{(m)}$ with $t$.
Thus we get $\Lambda^{*}$ such that
  \begin{align*}
\It^*\models\Exists v\bigwedge\Sigma
&\Iff\Exists v\Bigl(\bigwedge\Lambda^{*}\land v^{(m)}=t\Bigr)\\
&\Iff\bigwedge\Lambda^{*}\land\Exists vv^{(m)}=t.
  \end{align*}
But by~\eqref{eqn:xy0y} we have also
\begin{equation*}
  \It^*\models\Exists vv^{(m)}=t\iff\bigwedge_{k<m}t\neq0^{(k)}.
\end{equation*}

In the final case, $v$ appears only in \emph{inequations} in $\Sigma$
(and only on one side of each of these),
not in equations.  Let $\Sigma^{*}$ be the result of deleting those
inequations.  Then $\Exists v\bigwedge\Sigma$ is equivalent to
$\bigwedge\Sigma^{*}$.  To see this, note first that all models of
$\It^*$ are infinite.  We may assume that the
variables in $\Sigma$ form the list
$(\varble_0,\dots,\varble_n)$, where $\varble_n$ is $v$, so that
the variables in $\Sigma^{*}$ are on the list
$(\varble_0,\dots,\varble_{n-1})$.  Let $\str A\models \It^*$ and
$\str A\models\Sigma^{*}(\tuple a)$ for some $\tuple a$ from $A$.  The
inequations in $\Sigma(\tuple a,v)$ involving $v$ give a \emph{finite}
set of elements of $A$ that $v$ cannot be.  Chose $b$ from outside
this set.  Then
$\It^*\models\Sigma(\tuple a,b)$, so
$\It^*\models\Exists{v}\bigwedge\Sigma(\tuple a,v)$.  
\end{proof}

\begin{theorem}\label{thm:It*}
  $\It^*=\Th{\vnn,0,{}'}$; in particular, $\It^*$ is
  complete.\hfill\qedsymbol
\end{theorem}



\section*{Exercises}
%\addcontentsline{toc}{section}{Exercises}

\begin{xca}
Show that $(\vnn,<)$ and $(\Z,<)$ have different complete theories.
\end{xca}

\begin{xca}
  Supply the details of the proof of Lemma~\ref{lem:QE}.
\end{xca}

\begin{xca}
  Prove Lemma~\ref{lem:omega-embeds}.
\end{xca}

\begin{xca}
  Prove Theorem~\ref{thm:It*}.
\end{xca}

\begin{xca}
  Describe all models of $\It^*$.  In particular, find a one-to-one
  correspondence between the cardinal numbers and the
  isomorphism-classes of models of $\It^*$.  Why does this, together
  with Theorem~\ref{thm:It*}, not contradict
  Theorem~\ref{thm:isom-to-N}?
\end{xca}

\begin{xca}
  Let $A$ be an infinite set, that is, an infinite structure in the
  empty signature.  Find a complete axiomatization of $\Th A$.
\end{xca}

\begin{xca}
  Find a complete axiomatization of $\Th{\vnn,0,{}',<}$, and describe
  all models of this theory.
\end{xca}

\begin{xca}
  Describe all definable sets of a model of $\TO^*$.
\end{xca}

\begin{xca}
  Describe all definable sets of
  \begin{enumerate}
    \item
$(\vnn,0,{}')$;
\item
an arbitrary model of $\It^*$;
\item
$(\vnn,0,{}',<)$;
\item
an arbitrary model of
$\Th{\vnn,0,{}',<}$.
  \end{enumerate}
\end{xca}





\addtocontents{toc}{\newpage}

\chapter{Relations between structures}\label{ch:relations}
%\numberwithin{lemma}{section}
%\numberwithin{theorem}{section}

Given a signature $\lang$, we consider several relations on the class
of $\lang$-structures, namely
\begin{equation*}
  \Mod{\emptyset}. 
\end{equation*}
Throughout this chapter,
let $\str A$ and $\str B$ be arbitrary $\lang$-structures.

\section{Basic relations}

If $h\colon A\to B$, then we may understand $h$ also as the function
from $A^n$ to $B^n$ given by
\begin{equation}\label{eqn:h}
  h(a_0,\dots,a_{n-1})=(h(a_0),\dots,h(a_{n-1})).
\end{equation}
Then $\str A$ and $\str B$ are 
\defnplain{isomorphic},%
\index{isomorphi!---c}
and we write
\begin{equation*}
  \str A\cong\str B,
\end{equation*}
if there is a bijection $h$ from $A$ to $B$ such that
\begin{enumerate}
  \item
$h(c^{\str A})=c^{\str B}$ for all constants $c$ in $\lang$,
\item
$h\circ f^{\str A}=f^{\str B}\circ h$ for all function-symbols $f$ in
  $\lang$,
\item
$h\setimb{R^{\str A}}=R^{\str B}$ for all predicates in $\lang$.
\end{enumerate}
In this case, we may write
\begin{equation*}
  h\colon\str A\overset{\cong}{\longrightarrow}\str B,
\end{equation*}
calling $h$ an \defnplain{isomorphism}.%
\index{isomorphi!---sm}
This is also the name of the relation $\cong$, which is an
equivalence-relation on $\Mod{\emptyset}$.

Another equivalence-relation on $\Mod{\emptyset}$ is 
\defnplain{elementary equivalence}.%
\index{elementar!---y equivalence}
The two structures $\str A$ and $\str B$ are
\defnplain{elementarily equivalent},%
\index{equivalent!elementarily ---}%
\index{elementar!---ily equivalent}
and we write
\begin{equation*}
  \str A\equiv\str B,
\end{equation*}
if they have the same theory, that is, $\Th{\str A}=\Th{\str B}$.

The notion of
\tech{substructure}{}
was introduced in
\S\ref{sect:structures}. 
We say that $\str A$ is a
\defn{substructure}{} of $\str B$, and $\str 
B$ is an \defn{extension}{} of $\str A$, and we write
\begin{equation*}
  \str A\included\str B,
\end{equation*}
if $A\included B$ and
\begin{enumerate}
  \item
$c^{\str A}=c^{\str B}$ for all constants $c$ of $\lang$,
\item
$f^{\str A}=f^{\str B}\circ\id_{A^n}$  for all $n$-ary
  function-symbols $f$ of
  $\lang$, for all positive $n$ in~$\vnn$,
\item
$R^{\str A}=A^n\cap R^{\str B}$ for all $n$-ary predicates $R$ of
  $\lang$, for all positive $n$ in~$\vnn$.
\end{enumerate}
Then $\included$ is a reflexive ordering of $\Mod{\emptyset}$.

\section{Derived relations}\label{sect:additional}

If $\str A\included\str B$ and $\str A_A\equiv\str B_A$, then 
$\str A$ is an 
\defnplain{elementary substructure}{}%
\index{elementar!---y substructure}%
\index{substructure!elementary ---}
of $\str B$, and $\str B$ is an 
\defnplain{elementary extension}{}%
\index{elementar!---y extension}%
\index{extension!elementary ---}
of $\str A$, and 
  we write\footnote{Some people just write $\str A\prec\str
    B$.}   
  \begin{equation*}
    \str A\elsub\str B.
  \end{equation*}
Then $\elsub$ is also a reflexive ordering of $\Mod{\emptyset}$.

Now suppose $h\colon\str A\overset{\cong}{\to}\str C$.  If
also $\str C\included\str B$, then $h$ is an
\defn{embedding}{} of $\str A$ in $\str B$, and we may write
\begin{equation*}
  h\colon\str A\longrightarrow\str B;
\end{equation*}
if in addition $\str C\elsub\str B$, then $h$ is an
\defnplain{elementary embedding}{}%
\index{elementar!---y embedding}%
\index{embedding!elementary ---}
of $\str A$ in $\str B$, and we may write
\begin{equation*}
  h\colon\str A\overset{\equiv}{\longrightarrow}\str B.
\end{equation*}
The notion of \tech{homomorphism}{} defined in \S\ref{sect:structures}
is weaker than embedding and will no longer be of much interest.


\section{Implications}\label{sect:implications}
\setcounter{equation}1

The 
\defnplain{(Robinson) diagram}{}%
\index{diagram!--- of a structure}%
\index{Robinson diagram} 
of $\str A$ is the set of \emph{open} sentences of $\Th{\str A_A}$; it
  can be denoted by
  \begin{equation*}
    \diag{\str A}.
  \end{equation*}
Then $\Th{\str A_A}$ may be called the
\defnplain{complete}{}%
\index{complete!--- diagram}%
\index{diagram!complete or elementary ---}
or the
\defnplain{elementary diagram}{}%
\index{elementary!--- diagram}
of $\str A$.

Isomorphic structures are practically the same.  We have already used
this implicitly, in Theorem~\ref{thm:isom-to-N} for example.  The
following makes this precise.

\begin{lemma}[Diagram Lemma]\index{theorem!Lemma!Diagram ---}%
\index{diagram!D--- Lemma}%
\label{lem:fundamental}
  Suppose $h\colon A\to B$, and $\str
B^*$ is the expansion of $\str B$ to $\lang(A)$ such that
\begin{equation}\label{eqn:B*}
  a^{\str B^*}=h(a)
\end{equation}
for all $a$ in $A$.
Then
\begin{gather}\label{eqn:diag}
  \str B^*\models\diag{\str A}\Iff h\colon\str A\longrightarrow\str
  B;\\ \label{eqn:eldiag} 
\str B^*\models\Th{\str A_A}\Iff h\colon\str
A\overset{\equiv}{\longrightarrow}\str B. 
\end{gather} 
Hence if $T$ is a theory admitting quantifier-elimination, then all
  embeddings of models of $T$ are elementary embeddings.
If $h$ is surjective, then
\begin{equation}\label{eqn:diag-eldiag}
    \str B^*\models\diag{\str A}\Iff h\colon\str
    A\overset{\cong}{\longrightarrow}\str B \Iff
\str B^*\models\Th{\str A_A}.
\end{equation}
If $A\included B$, then
\begin{gather*}
  \str B\models\diag{\str A}\Iff \str A\included\str B;\\
\str B\models\Th{\str A_A}\Iff \str A\elsub\str B.
\end{gather*}
\end{lemma}

\begin{proof}
  Suppose $h\colon A\to B$.  
Then 
\begin{equation}\label{eqn:*}
\str B^*\models\phi(\tuple a)\Iff\str
B_{h\setimb A}\models\phi(h(\tuple a)).  
\end{equation}
Assume first $\str
B^*\models\diag{\str A}$.  We want to show $h\colon \str A\to\str B$,
that is,
\begin{enumerate}
  \item
$h(c^{\str A})=c^{\str B}$ for all constants $c$ in $\lang$,
\item
$h\circ f^{\str A}=f^{\str B}\circ h$ for all function-symbols $f$ in
  $\lang$,
\item
$h\setimb{R^{\str A}}=h\setimb A\cap R^{\str B}$ for all predicates in
  $\lang$. 
\end{enumerate}
This follows by considering the open formulas $c=\varble_0$, $f\varble_0\dotsb
\varble_{n-1}=\varble_n$, and $R\varble_0\dots\varble_{n-1}$.  Indeed,
if $c^{\str A}=a$, then $\diag{\str A}$ contains $c=a$, so $c^{\str
  B}=h(a)$ by~\eqref{eqn:*}.  The remaining cases are similar.

Now assume conversely $h\colon\str A\to\str B$.  
We establish $\str B^*\models\diag{\str A}$ by showing
\begin{equation}\label{eqn:isom-alt}
  \tuple a\in\phi^{\str A}\Iff h(\tuple a)\in\phi^{\str B}
\end{equation}
for all $\tuple a$ from $A$,
  for all
  open formulas $\phi$ of $\lang$.
We first establish by
induction that 
\begin{equation}\label{eqn:isom-terms}
h\circ t^{\str A}=t^{\str B}\circ h
\end{equation}
for all terms $t$ of $\lang$.  The claim is true by definition of
  embedding if $t$ is a constant or variable.
If \eqref{eqn:isom-terms} is true when $t\in\{u_0,\dots,u_{n-1}\}$,
and now $t$ is $fu_0\dotsb u_{n-1}$, then
\begin{align*}
  h\circ t^{\str A}
&=h\circ f^{\str A}\circ(u_0{}^{\str A},\dots,u_{n-1}{}^{\str
    A})&&\text{[by def'n of $t^{\str A}$]}\\
&=f^{\str B}\circ h\circ(u_0{}^{\str A},\dots,u_{n-1}{}^{\str
    A})&&\text{[by def'n of embedding]}\\
&=f^{\str B}\circ (h\circ u_0{}^{\str A},\dots,h\circ u_{n-1}{}^{\str
    A})&&\text{[by \eqref{eqn:h}]}\\
&=f^{\str B}\circ (u_0{}^{\str B}\circ h,\dots,u_{n-1}{}^{\str B}\circ
  h)&&\text{[by inductive hyp.]}\\
&=f^{\str B}\circ (u_0{}^{\str B},\dots,u_{n-1}{}^{\str B})\circ h\\
&=t^{\str B}\circ h.&&\text{[by def'n of $t^{\str A}$]}
\end{align*}
Therefore \eqref{eqn:isom-terms} holds for all $t$.  
%
To prove~\eqref{eqn:isom-alt} for all $\tuple a$, for all open
formulas $\phi$, we again use induction. 
If $\phi$ is $t_0=t_1$ for some terms $t_i$, then
\begin{align*}
  \tuple a\in\phi^{\str A}
&\Iff t_0{}^{\str A}(\tuple a)=t_1{}^{\str A}(\tuple a) && \text{ [by
      definition of $\phi^{\str A}$]}\\
&\Iff h(t_0{}^{\str A}(\tuple a))=h(t_1{}^{\str A}(\tuple a)) &&
  \text{ [since $h$ is injective]}\\
&\Iff t_0{}^{\str B}(h(\tuple a)))=t_1{}^{\str B}(h(\tuple a))) &&
  \text{ [by \eqref{eqn:isom-terms}]}\\
&\Iff
  h(\tuple a)\in\phi^{\str B}.&&\text{ [by definition of $\phi^{\str B}$]}
\end{align*}
If $\phi$ is $Rt_0\dotsb t_{n-1}$ for some terms $t_i$ and predicate
$R$, then:
\begin{align*}
  \tuple a\in \phi^{\str A}
&\Iff (t_0{}^{\str A}(\tuple a),\dots,t_{n-1}{}^{\str A}(\tuple a))\in
  R^{\str A}&&\text{ [by def'n of $\phi^{\str A}$]}\\
&\Iff h(t_0{}^{\str A}(\tuple a),\dots,t_{n-1}{}^{\str A}(\tuple a))\in
  R^{\str B}&&\text{ [by def'n of embedding]}\\
&\Iff
(t_0{}^{\str B}(h(\tuple a)),\dots,t_{n-1}{}^{\str B}(h(\tuple a)))\in
  R^{\str B}&&\text{ [by \eqref{eqn:isom-terms}]}\\
&\Iff h(\tuple a)\in \phi^{\str B}.&&\text{ [by def'n of
      $\phi^{\str B}$]}
\end{align*}
If \eqref{eqn:isom-alt} holds for all $\tuple a$ when $\phi$ is
$\psi$, and now $\phi$ is $\lnot\psi$, then:
\begin{align*}
  \tuple a\in\phi^{\str A}
&\Iff \tuple a\notin\psi^{\str A}&&\text{ [by def'n of $\phi^{\str
	A}$]}\\
&\Iff h(\tuple a)\notin\psi^{\str B}&&\text{ [by inductive
      hypothesis]}\\
&\Iff h(\tuple a)\in\phi^{\str B}.&&\text{ [by def'n of $\phi^{\str
	B}$]}
\end{align*}
Similarly, if \eqref{eqn:isom-alt} holds for all $\tuple a$ when
$\phi$ is $\chi$ or $\psi$, 
and now $\phi$ is $\chi\land\psi$, then:
\begin{align*}
  \tuple a\in\phi^{\str A}
&\Iff \tuple a\in\chi^{\str A}\amp\tuple a\in\psi^{\str A}&&\text{
    [by def'n of $\phi^{\str 
	A}$]}\\
&\Iff h(\tuple a)\in\chi^{\str B}\amp h(\tuple a)\in\psi^{\str
    B}&&\text{ [by inductive  hypothesis]}\\
&\Iff h(\tuple a)\in\phi^{\str B}.&&\text{ [by def'n of $\phi^{\str
	B}$]}
\end{align*}
We have now established~\eqref{eqn:diag}.

If $h\colon \str A\to\str B$ and $h$ is surjective, then the foregoing
proof serves
to establish~\eqref{eqn:isom-alt} for all $\tuple a$ and all
formulas $\phi$, once we add one more step.
Suppose \eqref{eqn:isom-alt} holds when $\phi$ is an $(m+1)$-ary
formula $\psi$, 
and now $\phi$ is the $m$-ary 
$\Exists{\varble_m}\psi$.  We have
\begin{align*}
  \tuple a\in\phi^{\str A}
&\Iff (\tuple a,b)\in\psi^{\str A}\text{ for some $b$ in $A$}\\
&\Iff (h(\tuple a),h(b))\in\psi^{\str B}\text{ for some $b$ in $A$}\\
&\Iff (h(\tuple a),c)\in\psi^{\str B}\text{ for some $c$ in $A$}\\
&\Iff h(\tuple a)\in\phi^{\str B}.
\end{align*}
(Note how the surjectivity of $h$ was used.)   This gives
us~\eqref{eqn:diag-eldiag} and then~\eqref{eqn:eldiag}.
\end{proof}

\begin{corollary*}
  If $\str A\cong\str B$, then $\str A\equiv\str B$.\hfill\qedsymbol
\end{corollary*}

\begin{lemma}[Tarski--Vaught Test]%
\index{Test|see{theorem}}
\index{theorem!Test!Tarski--Vaught ---}%
\index{Tarski!---{}--Vaught Test}%
\index{Vaught!Tarski--{}--- Test}%
%\index{test!Tarski-Vaught T---}%
\index{Lemma|see{theorem}}
  Suppose $\str B$ is an $\lang$-structure, $A\included B$, and for
  all singulary $\lang(A)$ formulas $\phi$, if $\str
  B\models\Exists{\varble_0}\phi$, then $\str B\models\phi(a)$ for
  some $a$ in $A$.  Then  $A$ is the universe of an elementary
  substructure of $\str B$.
\end{lemma}

\begin{proof}
  The assumption ensures that $A$ is the universe of a substructure
  $\str A$ of $\str B$.  We now want to show
  \begin{equation*}
    \str A_A\models\phi(\tuple a)\Iff\str B_A\models\phi(\tuple
    a),
  \end{equation*}
for all $\tuple a$ from $A$,
for all $\lang$-formulas $\phi$.
Since $\str A\included\str B$, we have the claim when $\phi$ is open.
Moreover, the set of $\phi$ for which the claim holds is closed under
negation and conjunction.  Finally, if the claim holds when $\phi$ is
an $(n+1)$-ary formula $\psi$, then
\begin{align*}
  \str A_A\models\Exists{x}\phi(\tuple a,x)
&\Iff\str A_A\models\phi(\tuple a,b)\text{ for some $b$ in $A$}\\
&\Iff\str B_A\models\phi(\tuple a,b)\text{ for some $b$ in $A$}\\
&\Iff\str B_A\models\Exists x\phi(\tuple a,x)
\end{align*}
by assumption.  This completes the induction.
\end{proof}







\section{Cardinalities}\label{sect:categoricity}

The \defnplain{cardinality}{}%
\index{cardinal!---ity} of a structure is the cardinality of its universe.
At the end of \S\ref{sect:additional}, it was noted that the same
theory may have models of different 
cardinalities.
Usually cardinals are denoted
by Greek letters like $\kappa$ and $\lambda$.
The cardinality of $\size{\Sn}$ is sometimes
denoted simply by $\size{\lang}$: so this is always
infinite, even though $\lang$ may be a finite signature.
Also,
\begin{equation*}
  \size{\lang(X)}=\max(\size{\lang}, \size X,\aleph_0).
\end{equation*}


\begin{theorem}[Downward L\"owenheim--Skolem]\label{thm:DLS}%
\index{theorem!L\"owenheim--Skolem Th---!Downward ---}%
\index{Downward L\"owenheim--Skolem Theorem}%
\index{Lowenheim@L\"owenheim--Skolem Theorem!Downward ---}
  Suppose $\str B$ is an $\lang$-structure, $X\included B$, and
  \begin{equation*}
    \size{\lang(X)}\leq\kappa\leq\size B.
  \end{equation*}
Then $\str A\elsub\str B$ for some $\str A$ of cardinality $\kappa$
such that $X\included A$.
\end{theorem}

\begin{proof}
If $Y\included B$, let $Y'$ be a set such that $Y\included Y'\included
B$ and, for all singulary formulas $\phi$ of $\lang(Y)$, if $\str
B\models\Exists{\varble_0}\phi$, then $\str B_B\models\phi(a)$ for some
$a$ in $Y'$.  We may assume $\size{Y'}\leq\size{\lang(Y)}$.  Now let
$X_0=X$ and $X_{n+1}=X_n{}'$.  by the Tarski--Vaught Test,
$\bigcup\{X_n\colon n\in\vnn\}$ is the universe of an elementary
substructure $\str A$ of $\str B$ such that $X\included A$.  Also
$\size A\leq\size{\lang(X)}$.  We may assume also $\size X=\kappa$,
in which case $\size A=\kappa$.
\end{proof}
\pagebreak
A theory $T$ is called
\defnplain{$\kappa$-categorical}{}%
\index{categorical@$\kappa$-categorical theory} if
\begin{enumerate}
  \item
$T$ has a model of cardinality $\kappa$;
\item
all models of $T$ of cardinality $\kappa$ are isomorphic (to each
other). 
\end{enumerate}
A theory is \defn{totally categorical}{} if it is $\kappa$-categorical
for each infinite $\kappa$.  There is an easy example.
In the empty signature, structures are pure sets, and isomorphisms are
just bijections.  Hence, if $\lang=\emptyset$, then
$\Cn{\emptyset}$ 
is totally categorical.

However, there are theories that are $\vnn$-categorical (that is,
$\aleph_0$-categorical), but not 
$\kappa$-categorical for any uncountable $\kappa$.
To give an example, we first note that
there are sentences $\sigma_n$ (where $n>0$) in the empty signature
such that, for all theories $T$ and structures $\str A$ of some common
signature,
\begin{equation*}
  \str A\models T\cup\{\sigma_n\colon n>0\}\Iff\str A\models T\amp\size
  A\geq\vnn. 
\end{equation*}
Indeed, let $\sigma_n$ be
\begin{equation*}
  \Exists{x_0}\dotsb\Exists{x_{n-1}}\bigwedge_{i<j<n}x_i\neq x_j. 
\end{equation*}
Moreover, for any singulary formula $\phi$,
if $n>1$, we can form the sentence
\begin{equation*}
  \Exists{x_0}\dotsb\Exists{x_{n-1}}\Bigl(\bigwedge_{i<j<n}x_i\neq x_j\land
  \bigwedge_{i<n}\phi(x_i)\Bigr);
\end{equation*}
this sentence can be
abbreviated by
\begin{equation*}
  \Existsgeqn x\;\phi.
\end{equation*}
Then
\begin{equation*}
  \str A\models\Existsgeqn x\phi\Iff\size{\phi^{\str A}}\geq n.
\end{equation*}
Now suppose $\lang=\{E\}$, where $E$ is a binary predicate, and let
$T$ be the theory of an equivalence-relation with exactly two classes,
both infinite.
So $T$ has as axioms the generalizations of
\begin{equation*}
\begin{gathered}
x\mathrel E x,\\
x \mathrel Ey\lto y\mathrel Ex,\\
x\mathrel Ey\land y\mathrel Ez\lto x\mathrel Ez,
\end{gathered}\qquad\qquad
\begin{gathered}
\Exists x\Exists y\lnot(x\mathrel Ey),\\
x\mathrel Ey\lor y\mathrel Ez\lor z\mathrel Ex,\\
\Existsgeqn yx\mathrel Ey.
\end{gathered}
\end{equation*}
(Note that there are infinitely many axioms.)
Then $T$ is $\vnn$-categorical.
However, if $\kappa$ is an \emph{uncountable} cardinal, then $T$ is
not $\kappa$-categorical.  For example, there is a model
in which both $E$-classes have size $\aleph_1$,
and a model in which one class has size $\aleph_1$, the other~$\vnn$.

The \defn{continuum}{} is $\R$, whose cardinality is
$\size{2^{\vnn}}$.  The
\defnplain{Continuum Hypothesis}{}%
\index{continuum!C--- Hypothesis}%
\index{hypothesis!Continuum H---} is that $\size{2^{\vnn}}=\vnn_1$;
but this is logically independent of the usual axioms of set-theory.
In a countable signature, there are at most continuum-many
non-isomorphic countable structures, because in such a structure $\str
A$, each symbol in the signature will be interpreted as a subset of
some $A^n$, and there are at most continuum-many of these.

For a given signature $\lang$, the \defn{spectrum-function}{} is
\begin{equation*}
  (T,\kappa)\longmapsto I(T,\kappa),
\end{equation*}
where $T$ is a theory, $\kappa$ is an infinite
cardinal, and $I(T,\kappa)$ is the number of non-isomorphic
$\lang$-structures of cardinality $\kappa$ that are models of
$T$.  If $T$ is included in another theory, $U$, of $\lang$, then
\begin{equation*}
  I(U,\kappa)\leq I(T,\kappa).
\end{equation*}
Usually we are interested in $I(T,\kappa)$ only when $T$ is complete.

If $\size{\lang}=\vnn$, then a theory of $\lang$ is also called
\defn{countable}.  Let $T$ be such, with an infinite model.  By the
Downward L\"owenheim--Skolem Theorem, $T$ has a countable model. 
Therefore
\begin{equation}\label{eqn:IT}
  1\leq I(T,\vnn)\leq\size{2^{\vnn}}.
\end{equation}
Letting $T$ be the theory of an infinite sets (in the empty signature)
shows that the lower bound cannot be improved when $T$ is complete. 
\defn{Vaught's Conjecture}{} 
\index{conjecture!Vaught's C---}
is that
\begin{equation*}
  I(T,\vnn)<\size{2^{\vnn}}\implies I(T,\vnn)\leq\vnn.
\end{equation*}
If the {Continuum Hypothesis} is accepted, than this implication is
trivial; the Conjecture is that the implication holds even if the
Continuum Hypothesis is rejected.


The upper bound of \eqref{eqn:IT} cannot be improved.
For example, let $\lang$ be $\{P_n\colon n\in\vnn\}$, where each $P_n$
is a singulary predicate.  Let $T$ have as axioms all sentences of the
form 
\begin{equation*}
  \Exists x\Bigl(\bigwedge_{i\in I}P_ix\land\bigwedge_{j\in J}\lnot
  P_jx\Bigr), 
\end{equation*}
where
$I$ and $J$ are finite disjoint subsets of $\vnn$.
Then $T$ admits quantifier-elimination and is complete (\exercise).
But $T$ has continu\-um-many countably infinite models.  To see this,
we start with the uncountable model $\str A$, where $A=2^{\vnn}$ and
\begin{equation*}
  P_n{}^{\str A}=\{\sigma\in A\colon s(n)=1\}
\end{equation*}
for each $n$ in $\vnn$.
This has the substructure $\str B$, where $B$ comprises only those
elements of $2^{\vnn}$ that are eventually $0$; that is, $\sigma\in B$
if and only if, for some $k$, if $n\geq k$, then $\sigma(n)=0$.  Then
$\str B$ is a countable model of $T$.  Indeed, if $\sigma\in B$, let
$\sigma^*=\sigma\restriction{n+1}$, where $n$ is the greatest $k$
such that $\sigma(k)=1$.  Then $\sigma\mapsto\sigma^*$ is an injection
from $B$ into $2^{{}<\vnn}$, where
\begin{equation*}
  2^{{}<\vnn}=\bigcup_{n\in\vnn}2^n.
\end{equation*}
This set is partially ordered by $\pincluded$ and is a tree, part of
which can be depicted as follows.  
\begin{equation*}
  \xymatrix@C=-7pt{
&&&&&&&(\;)\ar@{-}[dllll]\ar@{-}[drrrr]&&&&&&&\\
&&&(0)\ar@{-}[dll]\ar@{-}[drr]&&&&&&&&(1)\ar@{-}[dll]\ar@{-}[drr]&&&\\
&(0,0)\ar@{-}[dl]\ar@{-}[dr]&&&&(0,1)\ar@{-}[dl]\ar@{-}[dr]&&&&(1,0)\ar@{-}[dl]\ar@{-}[dr]&&&&(1,1)\ar@{-}[dl]\ar@{-}[dr]&\\
(0,0,0)&&(0,0,1)&&(0,1,0)&&(0,1,1)&&(1,0,0)&&(1,0,1)&&(1,1,0)&&(1,1,1)
}
\end{equation*}
\begin{comment}


\begin{center}
  \pstree{\TR{$(\;)$}}
         {\pstree{\TR{$(0)$}}
                 {\pstree{\TR{$(0,0)$}}
                         {\TR{$(0,0,0)$}
                          \TR{$(0,0,1)$}
                         }
                  \pstree{\TR{$(0,1)$}}
                         {\TR{$(0,1,0)$}
                          \TR{$(0,1,1)$}
                         }
                 }
          \pstree{\TR{$(1)$}}
                 {\pstree{\TR{$(1,0)$}}
                         {\TR{$(1,0,0)$}
                          \TR{$(1,0,1)$}
                         }
                  \pstree{\TR{$(1,1)$}}
                         {\TR{$(1,1,0)$}
                          \TR{$(1,1,1)$}
                         }
                 }
         }
\end{center}




\end{comment}
A
\defnplain{branch}{}%
\index{branch of a tree}%
\index{tree!branch}
of a tree is a maximal totally ordered subset; the
union of a branch of $2^{{}<\vnn}$ is an element of $2^{\vnn}$, and
conversely.  So $2^{{}<\vnn}$ is countable, but has continuum-many
branches. 
If $\sigma$ and $\tau$ are distinct elements of
$2^{\vnn}$, then 
$\sigma(n)\neq\tau(n)$ for some $n$ in $\vnn$, and then
\begin{equation*}
  \sigma\in P_n{}^{\str A}\Iff\tau\notin P_n{}^{\str A}.
\end{equation*}
Hence, if also $\sigma$ and $\tau$ are not in $B$, then
$B\cup\{\sigma\}$ and $B\cup\{\tau\}$ are the universes of
non-isomorphic models of $T$.  Hence $T$ has at least (and therefore
exactly) continuum-many countable models.  

When $T=\Th{\vnn,0,{}'}$, then $I(T,\vnn)=\vnn$.  Indeed, let $\str
A\models T$.  There is an equivalence-relation $\sim$ on $A$ such that
$a\sim b$ if and only if $a^{(m)}=b^{(n)}$ (in the notation of the
proof of Theorem~\ref{thm:itr-qe}) for some $m$ and $n$ in $\vnn$.
Let $[a]$ be the $\sim$-class of $a$.
If $a\sim 0^{\str A}$, then $([a],{}')$ is isomorphic to $(\vnn,{}')$; if
$a\not\sim 0^{\str A}$, then $([a],{}')$ is isomorphic to
$(\Z,x\mapsto x+1)$.  Thus $\str A$ is determined up to isomorphism by
$\size{A/\mathord{\sim}}$.  Moreover,
\begin{equation*}
  \size A=
  \begin{cases}
    \vnn,& \text{ if }\size{A/\mathord{\sim}}\leq\vnn;\\
\size{A/\mathord{\sim}},& \text{ otherwise.}
  \end{cases}
\end{equation*}
Therefore
\begin{equation*}
  I(\Th{\vnn,{}',0},\kappa)=
  \begin{cases}
    \vnn,&\text{ if }\kappa=\vnn;\\
1,&\text{ if }\kappa>\vnn.
  \end{cases}
\end{equation*}
Thus the theory of $(\vnn,0,{}')$ is 
\defnplain{uncountably categorical}.%
\index{categorical!uncountably ---}%
\index{uncountabl!---y categorical}

  We shall see in Theorem~\ref{thm:Cantor} that $\TO^*$ is
  $\vnn$-categorical; however, it is not $\kappa$-categorical if
  $\kappa>\vnn$. 
In a countable signature, the question of whether
$\kappa$-categoricity for \emph{one} uncountable $\kappa$ implies the
same for all was answered affirmatively by Michael Morley in his 1962
doctoral dissertation \cite{MR0175782}.
The question of which finite values can be taken by $I(T,\vnn)$ is
  treated below in Ch.~\ref{ch:numbers}.

\section*{Exercises}

\begin{xca}\label{ex:univ}
  An 
\defnplain{existential formula}{}%
\index{existential!--- formula}%
\index{formula!existential ---}
is a formula of the form $\Exists{x_0}\dotsb\Exists{x_{n-1}}\phi$,
where $\phi$ is open; a
\defnplain{universal formula}{}%
\index{universal!--- formula}%
\index{formula!universal ---} takes the form
 $\Forall{x_0}\dotsb\Forall{x_{n-1}}\phi$.
Suppose $\str A\included\str B$, in signature $\lang$.  Let $\sigma$
be a sentence of $\lang(A)$.  Prove that,
\begin{enumerate}
  \item
if $\sigma$ is universal, and $\str B\models\sigma$, then $\str
A\models\sigma$;
\item
if $\sigma$ is existential, and $\str A\models\sigma$, then $\str
B\models\sigma$. 
\end{enumerate}
\end{xca}

\begin{xca}
\mbox{}
\begin{enumerate}
  \item
Show that, in the signature $\{1,{}\inv,{}\cdot{}\}$, every
substructure of a group is a group.
\item
Show that this fails in the signature $\{1,{}\cdot{}\}$.
\item
Does it fail in the signature $\{{}\inv,{}\cdot{}\}$?
\end{enumerate}
\end{xca}

\begin{xca}
Supply missing details in the proof of the Diagram Lemma, and prove
its corollary.
\end{xca}

\begin{xca}\label{ex:TA}
  For any theory $T$, let $T_{\forall}$ be the set of universal
  consequences of $T$.  By Exercise~\ref{ex:univ}, every substructure
  of a model of $T$ is a model of $T_{\forall}$.  By
  Exercise~\ref{ex:TA-cont}, we shall have the converse.  Meanwhile,
  it is possible to identify $T_{\forall}$ when $T$ is the theory of
  fields.  That is, it is possible to find a theory $U$ such that
  every substructure of a field is a model of $U$, \emph{and} every
  model of $U$ extends to a field.
  Do this: find $U$.
\end{xca}

\begin{xca}
  Axiomatize the theory of \emph{infinite} sets and show that this theory is
  complete and totally categorical.
\end{xca}
\pagebreak
\begin{xca}
  In the signature $\{\sim,f\}$,
  where $\sim$ be a binary predicate, and $f$ a singulary function-symbol,
 let $T$ be the theory saying that $\sim$ is
  an equivalence-relation with just two classes, and $f$ is an
  injective function taking every element to an inequivalent element.
  \begin{enumerate}
    \item
Find a theory $U$ such that $T\included U$ and $U$ is totally
categorical.
\item
Find a theory $T^*$ such that every model of $T$ extends to a model of
$T^*$ and $I(T^*,\vnn)=\vnn$.
  \end{enumerate}
\end{xca}

\begin{xca}
  In \S\ref{sect:categoricity}, prove that the theory in the signature
  $\{P_n\colon n\in\vnn\}$ is complete.
\end{xca}

\begin{xca}
For each $n$ in $\vnn$, let $E_n$ be a binary predicate.
  In the signature $\{E_n\colon n\in \vnn\}$, let $T$ be the theory
  saying that each $E_n$ is an equivalence-relation with infinitely
  many classes, and each
  equivalence-class of $E_{n+1}$ is included in an equivalence-class
  of $E_n$.
  \begin{enumerate}
    \item
Show that $T$ is complete.
\item
Show that $I(T,\vnn)=\size{2^{\vnn}}$.
  \end{enumerate}
\end{xca}

\begin{xca}
  Find $I(T,\kappa)$ for all infinite $\kappa$ when $T$ is the theory of:
  \begin{enumerate}
    \item
$(\vnn,0,{}',<)$;
\item
vector-spaces over a given scalar-field;
\item
the field $\C$ of complex numbers.
  \end{enumerate}
\end{xca}




\chapter{Compactness}
%\numberwithin{lemma}{chapter}
%\numberwithin{theorem}{chapter}

\section{Theorem}

A subset $\Sigma$ of $\Sn$ is
\begin{enumerate}
  \item
\defn{satisfiable}{} if it has a model;
  \item
\defn{finitely satisfiable}{} if every finite subset of $\Sigma$ has a model.
\end{enumerate}
We now aim to prove that every finitely satisfiable set is
 satisfiable: this is 
\defnplain{compactness}{}%
\index{compact!---ness} for first-order logic.


To prove the Compactness Theorem 
%(\ref{thm:prop-compactness}) 
for propositional logic, we used Lemma~\ref{lem:fin-sat}.  The following
is the same lemma for first-order logic and has the same proof.

\begin{lemma}
  If $\Sigma$ is finitely satisfiable, then so is
  $\Sigma\cup\{\sigma\}$ or $\Sigma\cup\{\lnot\sigma\}$.\hfill\qedsymbol
\end{lemma}

In propositional logic, we took a finitely satisfiable set
$\Sigma$ of propositional formulas and extended it to a set from which
we could obtain a model of $\Sigma$.
We can try to do something similar to prove compactness for
first-order logic.  Suppose $\Sigma$ is a 
\defnplain{maximal}{}%
\index{maximal!--- set of formulas}
 finitely
satisfiable set of first-order formulas in some signature $\lang$:
this means $\sigma\in\Sigma\Iff\lnot\sigma\notin\Sigma$.  We
can try to define an $\lang$-structure $\str A$ by letting:
\begin{enumerate}
  \item
$A$ be the set of constants in $\lang$;
\item
$c^{\str A}=c$ for every constant $c$ in $\lang$;
\item
$f^{\str A}(c_0,\dots,c_{n-1})=d\Iff (fc_0\dotsb
  c_{n-1}=d)\in\Sigma$;
\item
$(c_0,\dots,c_{n-1})\in R^{\str A}\Iff Rc_0\dotsb c_{n-1}\in\Sigma$.
\end{enumerate}
We want $\str A$ to be a model of $\Sigma$.
There are three potential problems:
\begin{enumerate}
  \item
The signature $\lang$ might not contain any constants.
\item
Suppose $\lang$ does contain constants $c$ and $d$.
We have 
\begin{equation*}
\str A\models c=d \Iff c^{\str A}=d^{\str A}\Iff c=d.  
\end{equation*}
So
$\str A$ can't be a model of $\Sigma$ unless either $\Sigma$ does not
contain the sentence $c=d$, or $c$ and $d$ are the same symbol.
\item
If $\str A\models(\lnot\phi)_c^x$ for every constant $c$ in $\lang$,
then $\str A\models\Forall x\lnot\phi$.  However, possibly $\Sigma$
contains all 
of the formulas $(\lnot\phi)_c^x$, but also $\Exists x\phi$.
\end{enumerate}

The solutions to these problems will be as follows.
\begin{enumerate}
  \item
We expand $\lang$ to a signature $\lang'$ that contains infinitely
many constants.  Then we enlarge $\Sigma$ to a maximal finitely
satisfiable subset $\Sigma'$ of $\Sn[\lang']$.
\item
Letting $C$ be the set of constants of $\lang'$, we define an
equivalence-relation $E$ on $C$ by
\begin{equation*}
  c\mathrel Ed\Iff (c=d)\in\Sigma'.
\end{equation*}
Then we let $A$ be, not $C$, but $C/E$.
\item
In enlarging $\Sigma$ to $\Sigma'$, we ensure that, if $\Exists
x\phi\in\Sigma'$, then $(\phi)_c^x\in\Sigma'$ for some $c$ in~$C$.
\end{enumerate}
The proof that these \emph{do} solve the problems will depend on
$\size{\lang}$.

\begin{theorem}[Compactness]\label{thm:compactness-1st}%
\index{theorem!Compactness Th---}%
\index{compact!C---ness Theorem}
  Every finitely satisfiable set of sentences (in some signature) is
  satisfiable.
\end{theorem}

\begin{proof}
  Suppose $\Sigma$ is a finitely satisfiable subset of $\Sn$.  Let $C$ be
a set of new constants (so $\lang\cap C=\emptyset$).
  For any
  $\lang$-structure $\str A$, there is some $a$ in $A$; so we can
  expand $\str A$ to an $\lang\cup C$-structure $\str A'$ by defining
  \begin{equation*}
    c^{\str A'}=a
  \end{equation*}
for all $c$ in $C$.  In particular, $\Sigma$ is still finitely
satisfiable \emph{as a set of sentences of $\lang'$}.

Assume first that $\lang$ is countable, and let $C$ be countably
infinite.  Then we can enumerate
$\Sn[\lang\cup C]$ as 
$\{\sigma_n\colon n\in\vnn\}$, and $C$ as $\{c_n\colon n\in\vnn\}$.
We shall define a chain
\begin{equation*}
  \Sigma_0\included\Sigma_{1}\included\Sigma_2\included\dotsb,
\end{equation*}
where each $\Sigma_k$ is finitely satisfiable, and only finitely many
constants in $C$ appear in formulas in $\Sigma_k$.  The recursive
definition is the following:
\begin{enumerate}
  \item
$\Sigma_0=\Sigma$.  (By assumption, $\Sigma_0$ is finitely
    satisfiable, and it contains no constants of $C$.)
\item
Assume $\Sigma_{2n}$ has been defined as required.  Then define
\begin{equation*}
  \Sigma_{2n+1}=
  \begin{cases}
    \Sigma_{2n}\cup\{\sigma_n\},&\text{ if this is finitely
    satisfiable;}\\
\Sigma_{2n},&\text{ if not.}
  \end{cases}
\end{equation*}
Then $\Sigma_{2n+1}$ is as required.
\item
Suppose $\Sigma_{2n+1}$ has been defined as required.
Suppose also $\sigma_n\in\Sigma_{2n+1}$, and
$\sigma_n$ is $\Exists{\varble}\phi$ for some $\phi$.  The set of $m$ such
that $c_m$ does \emph{not} appear 
    in a formula in $\Sigma_{2n+1}$ has a least element, $k$.  Then
    the set $\Sigma_{2n+1}\cup\{(\phi)_{c_k}^x\}$ is
    finitely satisfiable.  For, if $\Gamma$ is a finite subset of
    $\Sigma_{2n+1}$, then it has a model $\str A$.  Then $\str
    A\models(\phi)_a^x$ for some $a$ in 
$A$; so we can expand $\str A$ to a model of
    $\Sigma_{2n+1}\cup\{(\phi)_{c_k}^x\}$ by 
interpreting $c_k$ as $a$.  In this case we define
\begin{equation*}
  \Sigma_{2n+2}= \Sigma_{2n+1}\cup\{(\phi)_{c_k}^x\};
\end{equation*}
otherwise, let $\Sigma_{2n+2}=\Sigma_{2n+1}$.  In either case,
$\Sigma_{2n+2}$ is as desired.
\end{enumerate}
Now we define
\begin{equation*}
  \Sigma^*=\bigcup_{n\in\vnn}\Sigma_n.
\end{equation*}
This is finitely satisfiable, since each finite subset is a subset of
some $\Sigma_n$.  Suppose $\Sigma^*\cup\{\sigma\}$ is finitely
satisfiable.  But $\sigma$ is $\sigma_n$ for some $n$, and
$\Sigma_{2n}\cup\{\sigma\}$ is finitely satisfiable, so
$\sigma\in\Sigma_{2n+1}$, and $\sigma\in\Sigma^*$.  So $\Sigma^*$ is a
maximal finitely satisfiable set. 

We now define a structure $\str A$ of $\lang\cup C$ that will turn out
to be a model of $\Sigma$.
We first define
\begin{equation*}
  E=\{(c,d)\in C^2\colon (c=d)\in\Sigma^*\}.
\end{equation*}
Then $E$ is an equivalence-relation on $C$ (\exercise).  So,
we can let
\begin{equation*}
  A=C/E.
\end{equation*}
Let the $E$-class of $c$ be denoted by $[c]$.  We can define
\begin{equation*}
  c^{\str A}=[c].
\end{equation*}
If $R$ is an $n$-ary
predicate in $\lang$, we let $R^{\str A}$ consist of those
$([c_0],\dots,[c_{n-1}])$ such that $\Sigma^*$ contains $Rd_0\dotsb
d_{n-1}$ for some $d_i$ such that $d_i\mathrel Ec_i$ for each $i$ in
$n$.  Then
\begin{equation*}
([c_0],\dots,[c_{n-1}])\in R^{\str A}\Iff
  Rc_0\dotsb c_{n-1}\in\Sigma^*
\end{equation*}
(\exercise).  If $f$ is an $n$-ary function-symbol in $\lang$,
then $\Sigma^*$ contains $\Exists x fc_0\dotsb c_{n-1}=x$ (since this
sentence is true in every structure), so $\Sigma^*$ contains
$fc_0\dotsb c_{n-1}=d$ for some $d$ in $C$, by construction of
$\Sigma^*$.  Moreover, if $c_i\mathrel Ec_i'$ for each $i$ in $n$, and
$\Sigma^*$ contains both
$fc_0\dotsb c_{n-1}=d$ and
$fc_0'\dotsb c_{n-1}'=d'$, then $d\mathrel Ed'$
(\exercise).  
Hence we can define $f^{\str A}$ by
\begin{equation*}
  f^{\str A}([c_0]\dotsb [c_{n-1}])=[d]\Iff fc_0\dotsb
  c_{n-1}=d\in\Sigma^* 
\end{equation*}
(\exercise).
This works, even if $f$ is nullary---is a constant $c$ of $\lang$.
That is, we define
\begin{equation*}
  c^{\str A}=[d]\Iff c=d\in\Sigma^*.
\end{equation*}
So we have $\str A$.  

It remains to show $\str A\models\Sigma^*$.  We
shall do this by showing
\begin{equation}\label{eqn:compactness}
  \str A\models\sigma\Iff\sigma\in\Sigma^*
\end{equation}
for all sentences $\sigma$ of $\lang\cup C$.  Rather, so that we can
use induction, we shall show
\begin{equation*}
  \str A\models\phi(\tuple c)\Iff\phi(\tuple c)\in\Sigma^*
\end{equation*}
for all $\tuple c$ from $C$, for all formulas $\phi$.
We need a preliminary observation:  If $t$ is a constant term, and
$c\in C$, then
\begin{equation*}
  t^{\str A}=[c]\Iff t=c\in\Sigma^*
\end{equation*}
(\exercise).
Now suppose $\sigma$ is the atomic sentence $Rt_0\dotsb t_{n-1}$, and
$t_i{}^{\str A}=[c_i]$ for each $i$ in $n$.  Then
\begin{align*}
  \str A\models \sigma&\Iff (t_0{}^{\str
  A},\dots,t_{n-1}{}^{\str A})\in R^{\str A}\\
&\Iff([c_0],\dots,[c_{n-1}])\in R^{\str A}\\
&\Iff Rc_0\dotsb c_{n-1}\in\Sigma^*\\
&\Iff \sigma\in\Sigma^*.
\end{align*}
If instead $\sigma$ is the equation $t_0=t_1$, then
\begin{align*}
  \str A\models\sigma&\Iff t_0{}^{\str A}=t_1{}^{\str A}\\
&\Iff [c_0]=[c_1]\\
&\Iff c_0=c_1\in\Sigma^*\\
&\Iff \sigma\in\Sigma^*.
\end{align*}
Now suppose that \eqref{eqn:compactness} holds when $\sigma$ is
$\tau$.
If $\sigma$ is $\lnot\tau$, then
\begin{equation*}
  \str A\models\sigma\Iff \str A\nmodels\tau\Iff
  \tau\notin\Sigma^*\Iff \sigma\in\Sigma^*
\end{equation*}
by maximality of $\Sigma^*$.
If \eqref{eqn:compactness} holds also when $\sigma$ is $\theta$, and
now $\sigma$ is $\tau\land\theta$, then
\begin{align*}
\str A\models\sigma&\Iff \str A\models\tau\amp\str
A\models\theta\\
&\Iff \tau\in\Sigma^*\amp\theta\in\Sigma^*\\
&\Iff \sigma\in\Sigma^*
\end{align*}
by maximality of $\Sigma^*$.
Finally, suppose~\eqref{eqn:compactness} holds whenever $\sigma$ is
$\phi(c)$ for some $c$.
If $\sigma$ is $\Exists x\phi$, then
\begin{align*}
  \str A\models\sigma
&\Iff \str A\models\phi(a)\text{ for some $a$ in
  $A$}\\
&\Iff \str A\models\phi(c)\text{ for some $c$ in
  $C$}\\
&\Iff \phi(c)\in\Sigma^* \text{ for some $c$ in
  $C$}\\
&\Iff\Exists x\phi\in\Sigma^*
\end{align*}
by definition of $\Sigma^*$.
By induction, \eqref{eqn:compactness} holds for all $\sigma$, so $\str
A\models\Sigma^*$.  This completes the proof when $\lang$ is
countable.  If $\size{\lang}=\kappa>\vnn$, then we can enumerate
$\Sn[\lang\cup C]$ as $\{\sigma_{\alpha}\colon \alpha\in\kappa\}$, and
$C$ as $\{c_{\alpha}\colon\alpha\in\kappa\}$.  We proceed as before,
only, if $\lambda$ is a limit ordinal in $\kappa$, then
$\Sigma_{\lambda}=\bigcup_{\alpha\in\lambda}\Sigma_{\alpha}$. 
\end{proof}

For the model $\str A$ of $\Sigma$ produced in the proof, we have
$\size A\leq\size C=\size{\lang}$. 

\section{Applications}

\begin{theorem}
  If $T$ is a theory such that, for all $n$ in $\vnn$, there is a
  model of $T$ of size greater than $n$, then $T$ has an infinite
  model. 
\end{theorem}

\begin{proof}
For each $n$ in $\vnn$, introduce a new constant $c_n$.
  Every model of the theory $T\cup\{c_i\neq c_j\colon i<j<\vnn\}$ is
  infinite.  Also this theory \emph{has} models, by Compactness, since
  the theory is finitely satisfiable.  Indeed, every finite subset of
  the theory is a subset of $T\cup\{c_i<c_j\colon i<j<n\}$ for some $n$.  We
  can expand a
  model of $T$ of size greater than $n$ to a model of
  the larger theory by interpreting each $c_i$ by a different element
  of the universe.
\end{proof}

For example, let $\class K$ be the class of finite fields (considered as
  structures in the signature $\{+,-,\cdot, 0,1\}$).  Then $\Th{\class
  K}$ has infinite models; these are called 
\defnplain{pseudo-finite}{}%
\index{pseudo-finite field}
  fields.  Every field $F$ has a \defnplain{characteristic}:%
\index{characteristic of a field}  
If
  \begin{equation*}
    F\models \underbrace {1+\dotsb+1}_p=0
  \end{equation*}
for some prime number $p$, then $p$ is the characteristic of $F$, or
$\Char F=p$; if
there is no such $p$, then $\Char F=0$.  The field $F$ is
\defnplain{perfect}{}%
\index{perfect field} if either:
\begin{enumerate}
  \item
$\Char F=0$; or
\item
$\Char F=p$ and every element of $F$ has a $p$-th root.
\end{enumerate}
Then perfect fields are precisely the fields that satisfy the axioms
\begin{equation*}
  \Forall x\Exists y(\underbrace{1+\dotsb+1}_p=0\lto y^p=x).
\end{equation*}
Now, if $F$ is finite, then $\Char F=p$ for some prime $p$, and the
function $x\mapsto x^p$ is an \defn{automorphism}{} of $F$, that is, an
isomorphism from $F$ to itself.  This shows $F$ is perfect.  Therefore
the pseudo-finite fields are also perfect.  In fact, axioms can be
written for the theory of pseudo-finite fields \cite{MR0229613}. 

\pagebreak
Another field-theoretic application of Compactness is to
\defnplain{ordered field}{s,}%
\index{order!---ed field}
namely, structures $\str F$ or $(F,+,-,\cdot,0,
  1, <)$ such that: 
  \begin{enumerate}
    \item
$(F,+,-,\cdot,0,1)$ is a
      field;
\item
$(F,<)$ is a total order;
\item
$\str F\models\Forall x\Forall y(0<x\land 0<y\lto 0<x+y\land 0<x\cdot
  y)$;
\item
$\str F\models\Forall x(x<0\lto 0<-x)$.
  \end{enumerate}
An ordered field must have characteristic $0$ (why?); hence $\Q$ can
be treated as a sub-field of it.
In an ordered field, the formula $0<x$ defines the set of
\defnplain{positive element}{s.}%
\index{positive element of an ordered field}  
The ordered field $\str F$ is
\defnplain{Archimedean}{}%
\index{Archimedean ordered field}
if, for all positive $a$ and $b$ in $F$, there is a
natural number $n$ such that
\begin{equation*}
  \str F\models a<\underbrace{b+\dotsb+b}_n.
\end{equation*}
Then $\R$ is an Archimedean ordered field.  However, there is an ordered field
$\str F$ such that $\str F\equiv\R$, but $\str F$ is not Archimedean.
Indeed, let $c$ be a new constant.  Then the theory
\begin{equation*}
  \Th{\R}\cup\{\underbrace{1+\dotsb+1}_n<c\colon n\in\vnn\}
\end{equation*}
is finitely satisfiable, since for every finite subset $\Sigma$ of
this theory, $\R$ itself expands to a model of $\Sigma$.  So the
theory has a model $\str F$, by Compactness; but this model is not
Archimedean. 

\begin{lemma}[L\"owenheim--Skolem]\label{lem:LS}
  Suppose $\str A$ is an infinite $\lang$-structure, and $\kappa$ is
  an infinite cardinal such that $\size{\lang}\leq\kappa$.
Then
  there is an $\lang$-structure
  $\str B$ such that $\size B=\kappa$ and $\str A\equiv\str B$. 
\end{lemma}

\begin{proof}
Introduce $\kappa$-many new constants $c_\alpha$ (where $\alpha<\kappa$).
  In the proof of the Compactness Theorem, let $\Sigma$
  be $\Th{\str A}\cup\{c_\alpha\neq c_\beta\colon \alpha<\beta<\kappa\}$.
  This set is finitely satisfiable.  Indeed, any finite subset is
  included in a subset $\Th{\str
  A}\cup\{c_{\alpha_i}\neq
  c_{\alpha_j}\colon i<j<n\}$ for some finite subset
  $\{\alpha_0,\dots,\alpha_{n-1}\}$ of $\kappa$.  Then $\str A$
  expands to a model of this set of sentences, once we interpret each
  constant $c_{\alpha_i}$ as a different element of $A$.  (Since $A$
  is infinite, we can do this.)  Therefore $\Sigma$ is finitely
  satisfiable. 
  The \emph{proof} of Compactness now produces a model of $\Sigma$
  of size $\kappa$.  
\end{proof}

\begin{theorem}[Upward L\"owenheim--Skolem]\label{thm:LST}%
\index{theorem!L\"owenheim--Skolem Th---!Upward ---}%
\index{Upward L\"owenheim--Skolem Theorem}%
\index{Lowenheim@L\"owenheim--Skolem Theorem!Upward ---}
If $\str A$ is an infinite $\lang$-structure, and
$\size{\lang(A)}\leq\kappa$, then $\str A$ has an elementary extension
of cardinality $\kappa$.  
\end{theorem}

\begin{proof}
  In the lemma, replace $\str A$ with $\str A_A$ and use
  the Diagram Lemma. 
\end{proof}


\begin{theorem}[\L o\'s--Vaught Test]\label{thm:Vaught}%
\index{theorem!Test!Los@\L o\'s--Vaught ---}%
\index{Los@\L o\'s--Vaught Test}%
\index{Vaught!\L o\'s--Vaught Test}
  Suppose $T$ is a satisfiable theory of $\lang$.  If
  \begin{enumerate}
    \item
$T$ has no finite models, and
\item
$T$ is $\kappa$-categorical for some $\kappa$ such that
  $\size{\lang}\leq\kappa$,
  \end{enumerate}
then $T$ is complete.
\end{theorem}

\begin{proof}
  Suppose $T$ is satisfiable, and has no finite models, but
  is not complete.  Then for 
  some sentence $\sigma$, neither $\sigma$ nor $\lnot\sigma$ is a
  consequence of $T$.  Hence, both $T\cup\{\lnot\sigma\}$ and
  $T\cup\{\sigma\}$ have models.  By Lemma~\ref{lem:LS}, they
  have models of size $\kappa$.  These models are not elementarily
  equivalent, so they are not isomorphic; this means $T$ is not
  $\kappa$-categorical. 
\end{proof}

Hence the theory of an equivalence-relation with just two classes,
both infinite (\S\ref{sect:categoricity}), is $\vnn$-categorical.
Likewise for many other examples given above.

\section*{Exercises}

\begin{xca}
Supply the missing details of the proof of Compactness.
\end{xca}

\begin{xca}
\mbox{}
\begin{enumerate}
\item
Show that every Archimedean ordered field is elementarily equivalent
to some \emph{countable, non-Archimedean} 
ordered field.  
\item
  Show that every non-Archimedean ordered field contains
  \defn{infinitesimal}{} elements, that is, positive elements $a$ that
  are less than every positive rational number.
\item
  Find an explicit example of a non-Archimedean ordered field.
\end{enumerate}
\end{xca}

\begin{xca}
The 
\defnplain{order}{}%
\index{order!--- of a group-element}
of an element $g$ of a group is the size of the
subgroup $\{g^n\colon n\in \Z\}$ that $g$ generates.  In a \defn{periodic
group}, all elements have finite order.  Suppose $G$ is a periodic
group in which there is no finite upper bound on the orders of
elements.  Show that $G\equiv H$ for some non-periodic group $H$.
\end{xca}

\begin{xca}
  Suppose $(X,<)$ is an infinite total order in which $X$ is
  \emph{well-ordered}
  by $<$.  Show that there is a total order $(X^*,<^*)$ such that
  \begin{equation*}
    (X,<)\equiv(X^*,<^*),
  \end{equation*}
but $X^*$ is \emph{not} well-ordered by $<^*$.
\end{xca}

\begin{xca}\label{ex:TA-cont}
  For any theory $T$, prove that $\str A\models T_{\forall}$ if and
  only if $\str A$ is a substructure of a model of $T$.  (See
  Exercise~\ref{ex:TA}.) 
\end{xca}

\begin{xca}
  Find a theory that is $\vnn$-categorical, but not complete.
\end{xca}

\begin{xca}
  Describe all fields $F$ such that the theory of vector-spaces over
  $F$ is complete.
\end{xca}

\begin{xca}
  Give a complete axiomatization of $\Th{\C}$. 
\end{xca}










%\input{completeness}
\chapter{Completeness}
\numberwithin{lemma}{section}
\numberwithin{theorem}{section}

\section{Introduction}

We aim now to establish a 
\techplain{sound}, 
\techplain{complete}{} 
\techplain{proof system}{}%
\index{proof!--- system}%
\index{sound!--- proof system}
\index{complete!--- proof system}
 for first-order logic.  The terminology is just as for
  propositional logic in \S\ref{sect:syn-entail}
  and \S\ref{sect:complete}.  But since we shall consider various
  possible proof-systems $\psys S$, we shall write
\begin{equation*}
  \Sigma\proves[S]\sigma
\end{equation*}
in case there is a formal proof, in the system $S$, of $\sigma$ from
$\Sigma$.  If $\psys T$ is another proof system, which has the axioms
and rules of inference of $\psys S$ among its own axioms and rules of
inference, then we may write
\begin{equation*}
  \psys S\included\psys T.
\end{equation*}
To the basic observations of Lemmas~\ref{lem:immediate},
\ref{lem:pis-proof}, and~\ref{lem:GSF}, which hold quite generally, we
can add

\begin{lemma}\label{lem:gen}
\mbox{}  
\begin{enumerate}
\item
if $\Sigma\proves[S]\sigma$ and $\psys S\included\psys T$, then
$\Sigma\proves[T]\sigma$; 
\item
if $\Sigma\proves[S]\sigma$, then $\Sigma_0\proves[S]\sigma$ for some
\emph{finite} subset $\Sigma_0$ of $\Sigma$.\hfill\qedsymbol
\end{enumerate}
\end{lemma}

\section{Propositional logic}

A generalization of Theorem~\ref{thm:sound} is

\begin{lemma}\label{lem:sound}
  Let $\psys S$ be a proof system for propositional logic.  Then
  $\psys S$ is sound 
  if and only if:
  \begin{enumerate}
    \item\label{item:each-axiom}
each axiom of $\psys S$ is a tautology;
\item\label{item:Phi-phi}
$\Phi\models\phi$ whenever $\phi$ can be inferred from $\Phi$
  by one of the rules of inference of $\psys S$.
  \end{enumerate}
\end{lemma}

\begin{proof}
  Suppose $\psys S$ is sound.  If $\phi$ is an axiom of $\psys S$,
  then $\proves [S]\phi$ and therefore $\models\phi$.
Suppose that $\phi$ can be inferred from $\Phi$ by one of the
  rules of inference of $\psys S$.  Then there is a subset
  $\{\psi_0,\dots,\psi_{n}\}$ of $\Phi$ for which the sequence
  \begin{equation*}%\label{eqn:psi-phi}
    (\psi_0,\dots,\psi_{n-1},\phi)
  \end{equation*}
is a deduction of $\phi$ from $\Phi$ in $\psys S$.  Hence
$\Phi\proves[S]\phi$, and therefore $\Phi\models\phi$.

The converse can be proved by induction on deductions from $\Phi$.
Suppose~\eqref{item:each-axiom} and~\eqref{item:Phi-phi} hold.  Say
$\phi$ has a
  deduction $(\psi_0,\dots,\psi_{n-1},\phi)$ from $\Phi$ in $\psys S$.
As an inductive
  hypothesis, suppose $\Phi\models\psi_i$ for each $i$ in $n$.
If $\phi\in\Phi$, then
$\Phi\models\phi$ trivially.  
If $\phi$ is an axiom of $\psys
S$, then $\models\phi$ by assumption, so $\Phi\models\phi$.  The
remaining possibility is that $\phi$ can be
inferred, by a rule of inference of $\psys S$, from some subset
$\Phi_0$ of $\{\psi_0,\dots,\psi_{n-1}\}$.
Then $\Phi_0\models\phi$ by assumption, so
$\Phi\models\phi$ by Lemma~\ref{lem:GSF}. 
\end{proof}

Let us return again in this chapter to using the signature
$\{\lnot,\lto\}$ for propositional logic.
In Ch.~\ref{ch:prop} we established a sound and complete proof system
in this system.  Henceforth, for propositional logic, let us
just use the system $\psys P$, in which all tautologies are axioms, and
Deduction 
is the only rule of inference.  This too is sound and complete.




\section{Tautological completeness}

Let $\lang$ be a signature for first-order logic.
To \emph{prove} that a certain proof system for $\Sn$ is complete, we
shall use the method
first expounded by Leon Henkin, in \cite{MR0033781}.  (Henkin's proof
was a part of his doctoral thesis; see \cite{MR1396852}.  We have
already used Henkin's method to prove Compactness.)  The
particular treatment in these notes owes something to Shoenfield's in
\cite{MR1809685}.  I introduce the notions of 
\techplain{tautological}{}%
\index{tautolog!---ical completeness}%
\index{complete!tautological ---ness}
and
\techplain{deductive completeness}{}% 
\index{deduc!---tive completeness}%
\index{complete!deductive ---ness}
completeness  merely to make our ultimate
proof system seem natural. 

Let us say that a proof-system $\psys S$ for $\Sn$ is
\defnplain{tautologically complete}{}%
\index{tautolog!---ically complete}%
\index{complete!tautologically ---}
if, from the assumption that
 \begin{equation}\label{eqn:prop-ent}
   \sF_k\in\Cn[]{\sF_0,\dots,\sF_{k-1}}
 \end{equation}
where the $\sF_i$ are $n$-ary {propositional}
 formulas, it follows that
\begin{equation}\label{eqn:1-ent}
\{\sF_0(\tuple{\sigma}),\dots,
  \sF_{k-1}(\tuple{\sigma})\}\proves[S]
  \sF_k(\tuple{\sigma})
\end{equation}
for all $n$-tuples $\tuple{\sigma}$ from $\Sn$.


\begin{lemma}\label{lem:1}
  Let $\psys S$ be a proof system for $\Sn$.  Then $\psys S$ is
  tautologically complete if and only if:
  \begin{enumerate}
    \item\label{item:pSs}
  $\proves[S]\sigma$ for all tautologies $\sigma$ of $\Sn$, and 
\item\label{item:sst}
$\{\sigma,\sigma\lto\tau\}\proves[S]\tau$
for all $\sigma$ and $\tau$ in $\Sn$.
  \end{enumerate}
\end{lemma}

\begin{proof}
If $\psys S$ is tautologically complete, then
immediately~\eqref{item:pSs} follows;~\eqref{item:sst} follows since
$\{P_0,P_0\lto P_1\}\models P_1$. 

To prove the converse, we can take advantage of the completeness of
$\psys P$ and use induction in the tree of formal proofs.
Suppose we have \eqref{eqn:prop-ent}.
Then $\sF_k$ has a formal proof from $\{\sF_0,\dots,\sF_{k-1}\}$.  Say
this proof is
\begin{equation*}
  (\sG_0,\dots,\sG_{m-1},\sF_k),
\end{equation*}
and suppose~\eqref{eqn:1-ent} holds when $\sF_k$ is any of the $\sG_i$. 
There are three possibilities:
\begin{enumerate}
  \item
If $\sF_k\in\{\sF_0,\dots,\sF_{k-1}\}$, then trivially \eqref{eqn:1-ent}
follows.
\item
If $\sF_k$ is a tautology, then
$\proves[S]\sF_k(\tuple{\sigma})$ by assumption, so
\eqref{eqn:1-ent}. 
\item
If $\sG_j$ is $(\sG_i\lto \sF_k)$ for some $i$ and $j$ in $m$, then, by
inductive hypothesis, we have
\begin{equation*}
\{\sF_0(\tuple{\sigma}),\dots,
  \sF_{k-1}(\tuple{\sigma})\}\proves[S]
  \sG_i(\tuple{\sigma});\qquad\qquad
\{\sF_0(\tuple{\sigma}),\dots,
  \sF_{k-1}(\tuple{\sigma})\}\proves[S]
  \sG_j(\tuple{\sigma});
\end{equation*}
hence \eqref{eqn:1-ent} by assumption.  
\end{enumerate}
In all cases then, \eqref{eqn:1-ent} follows.
\end{proof}

It should be clear that a complete proof system is tautologically
complete.  The converse fails.  For example, the proof system in which
all tautologies are axioms and
  Detachment is the only rule of inference is not complete,
  since it cannot be used to prove the validity $\Exists x x=x$.

Let $\bot$ be the negation of a tautology, say
\begin{equation*}
  \lnot(\Exists xx=x\lto\Exists xx=x).
\end{equation*}
Henceforth, let $\Sigma\included\Sn$ and $\sigma\in\Sn$.

\begin{lemma}\label{lem:2}
  In a tautologically complete proof system $\psys S$, the following are
  equivalent:
  \begin{enumerate}
    \item
$\Sigma\proves[S]\lnot\sigma$ for some $\sigma$ in $\Sigma$;
\item
$\Sigma\proves[S]\sigma$ and $\Sigma\proves[S]\lnot\sigma$ for some $\sigma$
  in $\Sn$;
\item
$\Sigma\proves[S]\sigma$ for every $\sigma$ in $\Sn$;
\item
$\Sigma\proves[S]\bot$.\hfill\qedsymbol
  \end{enumerate}
\end{lemma}

If $\Sigma\proves[S]\bot$, then $\Sigma$ is
\defn{inconsistent}{} in $\psys S$; otherwise, it is \defn{consistent}.

\begin{lemma}
  In a complete proof system, every consistent subset of
  $\Sn$ has a model.
\end{lemma}

\begin{proof}
  If $\psys S$ is complete, but $\Sigma$ has no model, then
  $\Sigma\models\bot$, so
  $\Sigma\proves[S]\bot$ by completeness, so $\Sigma$ is inconsistent. 
\end{proof}

The converse of the lemma may fail, even if the proof system is
required to be tautologically complete (\exercise).  





\section{Deductive completeness}

Let a proof system $\psys S$ be called 
\defnplain{deductively complete}{}%
\index{deduc!---tively complete}%
\index{complete!deductively complete} 
if
$\Sigma\proves[S]\sigma\lto\tau$ whenever
$\Sigma\cup\{\sigma\}\proves[S]\tau$. 

\begin{lemma}\label{lem:4}
  A tautologically and deductively complete proof system in which
  every consistent set has a model is complete.
\end{lemma}

\begin{proof}
Suppose $\psys S$ is such a system,
 and  $\Sigma\cup\{\lnot\sigma\}$ is 
  inconsistent in $\psys S$.  Then
 $\Sigma\cup\{\lnot\sigma\}\proves[S] \sigma$ by 
  Lemma~\ref{lem:2}, so $\Sigma\proves[S]\lnot\sigma\lto\sigma$ by deductive
  completeness.
But $(\lnot\sigma\lto\sigma)\lto\sigma$ is a tautology, so
  $\Sigma\proves[S]\sigma$ by tautological completeness.  

Therefore, if $\Sigma\nproves[S]\sigma$, then
 $\Sigma\cup\{\lnot\sigma\}$ is 
  consistent, so it has a model by assumption; this shows
 $\Sigma\nmodels\sigma$. 
\end{proof}

\begin{lemma}\label{lem:5}
A tautologically complete proof system whose only rule of inference is
Detachment is deductively complete.\hfill\qedsymbol
\end{lemma}

\begin{lemma}\label{lem:6}
  Suppose $\Sigma$ is consistent in a
  tautologically and deductively complete proof system.
The following are equivalent:
\begin{enumerate}
  \item
If $\Sigma\included\Gamma\included\Sn$ and $\Gamma$ is consistent,
then $\Gamma=\Sigma$.
\item
$\lnot\sigma\in\Sigma\Iff\sigma\notin\Sigma$ for all $\sigma$ in
  $\Sn$. \hfill\qedsymbol
\end{enumerate}
\end{lemma}

A set $\Sigma$ meeting one of the conditions in the lemma can be
called 
\defnplain{maximally consistent}.%
\index{maximal!---ly consistent}

\section{Completeness}

By Lemma~\ref{lem:1}, we know of one tautologically complete
proof system, namely, 
the system whose axioms are the tautologies, and whose rule of
inference is Detachment.  Let $\psys S$ be this system.  Then
$\psys S$ is deductively
complete, by Lemma~\ref{lem:5}, and is sound, by
Lemma~\ref{lem:sound}.  Moreover, soundness
and deductive 
completeness are 
preserved if we add new valid
axioms to $\psys S$.  Now we shall see which valid axioms we can add
in order to ensure 
that every consistent set has a model; then we shall have a complete
system by Lemma~\ref{lem:4}. 

Assuming $\psys S'$ is obtained from $\psys S$ by adding valid axioms,
we try to follow the proof of the Compactness Theorem, replacing
`finitely satisfiable'
with `consistent in $\psys S'$.'  Assume that $\lang$ is countable.  Suppose
$\Sigma$ is a consistent subset of $\Sn$.
We introduce a countably infinite set $C$ of new constants and enumerate
$\Sn[\lang\cup C]$ as $\{\sigma_n\colon n\in\vnn\}$.  We construct a chain
\begin{equation*}
  \Sigma=\Sigma_0\included\Sigma_1\included\Sigma_2\included\dotsb
\end{equation*}
where
\begin{equation*}
  \Sigma_{2n+1}=
  \begin{cases}
    \Sigma_{2n}\cup\{\sigma_n\},& \text{ if this is consistent;}\\
\Sigma_{2n}, & \text{ otherwise.}
  \end{cases}
\end{equation*}
If $\sigma_n$ is $\Exists x\phi$, and this is in $\Sigma_{2n+1}$, then
we want to define $\Sigma_{2n+2}$ as 
\begin{equation*}
  \Sigma_{2n+1}\cup\{\phi_c^x\},
\end{equation*}
where $c$ is a constant not used in $\Sigma_{2n+1}$.  But we need to
know that this set is consistent.
For this, we assume that $\psys S'$ has, as axioms, the sentences
\begin{equation}\label{eqn:axiom-E}
  ((\phi)_c^x\lto\psi)\lto\Exists x\phi\lto \psi,
\end{equation}
where $c$ is a constant not appearing in $\psi$.  Note that these
axioms are valid.  We now have:

\begin{lemma}
  If $\Gamma$ is consistent in $\psys S'$ and contains $\Exists
  x\phi$, and $c$ does
  not appear in $\Gamma$, then $\Gamma\cup\{(\phi)_c^x\}$ is
  consistent in $\psys S'$.
\end{lemma}

\begin{proof}
Suppose it's not.  Then 
\begin{equation*}
  \{\psi_0,\dots,\psi_{k-1}\}\cup\{(\phi)_c^x\}\proves[S']\bot
\end{equation*}
for some $\psi_i$ in $\Gamma$.
By deductive completeness, 
\begin{equation*}
 \proves[S'] (\phi)_c^x\lto\psi_0\lto\dotsb\lto\psi_{k-1}\lto\bot.
\end{equation*}
From \eqref{eqn:axiom-E} and Detachment we have
\begin{equation*}
\proves[S']  \Exists x\phi\lto\psi_0\lto\dotsb\lto\psi_{k-1}\lto\bot.
\end{equation*}
Then $k+1$ applications of Detachment show
\begin{equation*}
  \Gamma\proves[S']\bot,
\end{equation*}
which contradicts the assumption that $\Gamma$ is consistent.
\end{proof}



So now, given a consistent subset $\Sigma$ of $\Sn$, we can construct
a consistent subset $\Sigma^*$ of 
$\Sn[\lang\cup C]$ such that
\begin{enumerate}
\item
$\Sigma\included\Sigma^*$;
\item
$\Sigma^*$ is maximally consistent;
  \item
if $(\Exists x\phi)\in\Sigma$, then $(\phi)_c^x\in\Sigma$ for some $c$
in $C$, that is, $\Sigma^*$ \defn{has witnesses}s.
\end{enumerate}
As in the proof of Compactness, we want to use $\Sigma^*$ to define a
model $\str A$ of itself.
For the sake of defining the universe of $\str A$, we assume now that
$\psys S'$ also has the axioms
\begin{gather}\label{eqn:equality}
  c=c,\\ \label{eqn:more-equality}
c=c'\lto d=d'\lto c=d\lto c'=d',
\end{gather}
where $c$, $c'$, $d$ and $d'$ range over $C$.
Let $E$ be the relation
\begin{equation*}
  \{(c,d)\in C^2\colon (c=d)\in\Sigma^*\}.
\end{equation*}

\begin{lemma}
The relation
$E$ is an equivalence-relation.
\end{lemma}

\begin{proof}
We first show
\begin{gather}\label{eqn:=1}
  \proves[S'] c=c,\\ \label{eqn:=2}
\proves[S'] c=d\lto d=c,\\ \label{eqn:=3}
\proves[S'] c=d\lto d=e\lto c=e
\end{gather}
for all constants $c$, $d$ and $e$ in $C$.
We have \eqref{eqn:=1} trivially by \eqref{eqn:equality}.  An
  instance of 
  \eqref{eqn:more-equality} is
  \begin{equation*}
    c=d\lto c=c\lto c=c\lto d=c;
  \end{equation*}
then \eqref{eqn:=2} follows by tautological completeness.  Another
instance of \eqref{eqn:more-equality} is
\begin{equation*}
  c=c\lto d=e\lto c=d\lto c=e;
\end{equation*}
then \eqref{eqn:=3} follows by tautological completeness.
By its maximal consistency then, $\Sigma^*$ contains $c=c$; and if
$\Sigma^*$ contains $c=d$ and $d=e$, then it contains $d=c$ and $c=e$. 
\end{proof}
We define $A$ to
be $C/E$.  We now define $R^{\str A}$ (for each
$n$-ary predicate $R$ in $\lang$)
as the set
\begin{equation*}
  \{([c_0],\dotsb,[c_{n-1}])\in A^n\colon   (Rc_0\dotsb
  c_{n-1})\in\Sigma^*\}.
\end{equation*}
Then we have
\begin{equation*}
  (Rc_0\dotsb c_{n-1})\in\Sigma^*\implies
  ([c_0],\dotsb,[c_{n-1}])\in R^{\str A}, 
\end{equation*}
but perhaps not the converse.
Possibly then both
$Rc_0\dotsb c_{n-1}$ and $\lnot Rc_0'\dotsb c_{n-1}'$ are in
$\Sigma^*$, although $(c_k=c_k')\in\Sigma^*$ in each
  case.  To prevent this, as as axioms of $\psys S'$ we assume
\begin{equation*}
  c_0=c_0'\lto \dotsb \lto c_{n-1}=c_{n-1}'\lto Rc_0\dotsb
  c_{n-1}\lto Rc_0'\dotsb c_{n-1}'.
\end{equation*}
We now have:

\begin{lemma}\label{lem:PR}
$([c_0],\dotsb,[c_{n-1}])\in R^{\str A}\Iff (Rc_0\dotsb
  c_{n-1})\in\Sigma^*$.\hfill\qedsymbol
\end{lemma}

Finally, suppose $f$ is an $n$-ary function-symbol (where possibly
$n=0$, in which case $f$ is a constant.)  We want
to be able to
define $f^{\str A}$.  (If $c\in C$, then $c^{\str A}=[c]$; but there
might be constants of $\lang$ as well.)  To define $f^{\str A}$, we
first need some lemmas, which are 
based on another axiom:
%\setcounter{equation}0
%\renewcommand{\theequation}{\#\fnsymbol{equation}} 
\begin{equation}\label{eqn:t}
  \phi_t^x\lto \Exists x\phi,
\end{equation}
where $\fv{\phi}\included\{x\}$ and $t$ is a constant term.
Let us assume that this is also an axiom of $\psys S'$.
Then we have:


\begin{lemma}[Substitution]
If $\fv{\phi}\included\{x\}$, and the constant $c$ does not appear in
$\phi$, then
\begin{equation*}
  \proves[S'] (\phi)_c^x\lto(\phi)_t^x
\end{equation*}
for all constant terms $t$.
\end{lemma}

\begin{proof}
We have
  \begin{align*}
    &\proves[S'] (\lnot\phi)_t^x\lto\Exists x\lnot \phi, &&\text{[by
    \eqref{eqn:t}]}\\ 
&\proves[S'] \lnot\Exists x\lnot\phi\lto(\phi)_t^x, &&\text{[by tautological
    completeness]}\\
&\proves[S'] ((\lnot\phi)_c^x\lto\bot)\lto\Exists
    x\lnot\phi\lto\bot,&&\text{[by \eqref{eqn:axiom-E}]}\\
&\proves[S'] (\phi)_c^x\lto\lnot\Exists x\lnot \phi,&&\text{[by tautological
    completeness]}
  \end{align*}
and hence
$\proves[S'] (\phi)_c^x\lto(\phi)_t^x$ by tautological completeness.
\end{proof}

\begin{lemma}
  $\proves[S'] t=t$ for all constant terms $t$. 
\end{lemma}

\begin{proof}
  We have
  \begin{align*}
    &\proves[S'] c=c,&&\text{[by \eqref{eqn:equality}]}\\
&\proves[S'] c=c\lto t=t,&&\text{[by the Substitution Lemma]}
  \end{align*}
and hence
$\proves[S'] t=t$ by tautological completeness.
\end{proof}

\begin{lemma}
  $\proves[S'] \Exists xfc_0\dotsb c_{n-1}=x$.   
\end{lemma}

\begin{proof}
  We have
  \begin{align*}
  &  \proves[S'] fc_0\dotsb c_{n-1}=fc_0\dotsb c_{n-1},&&\text{[by the
  last lemma]}\\
&\proves[S'] fc_0\dotsb c_{n-1}=fc_0\dotsb c_{n-1}\lto \Exists x fc_0\dotsb
  c_{n-1}=x, &&\text{[by \eqref{eqn:t}]}
  \end{align*}
hence
$\proves[S'] \Exists xfc_0\dotsb c_{n-1}=x$ by tautological completeness.
\end{proof}


Finally, we assume as axioms of $\psys S'$ the sentences
\begin{equation}\label{eqn:f}
  c_0=c_0'\lto\dotsb\lto c_{n-1}=c_{n-1}'\lto fc_0\dotsb
c_{n-1}=fc_0'\dotsb c_{n-1}'.
\end{equation}
This enables us to define $f^{\str A}$:

\begin{lemma}
  For each $n$-ary function-symbol $f$, there is an $n$-ary operation
  $f^{\str A}$ on $A$ given by
\begin{equation*}
  f^{\str A}([c_0],\dots,[c_{n-1}])=[d]\Iff (fc_0\dotsb
  c_{n-1}=d)\in\Sigma^*. 
\end{equation*}
\end{lemma}

\begin{proof}
Since $\Sigma^*$ is maximally consistent, we now have
\begin{equation*}
  (\Exists xfc_0\dotsb c_{n-1}=x)\in\Sigma^*.
\end{equation*}
Since $\Sigma^*$ has witnesses, we have
  $(fc_0\dotsb c_{n-1}=d)\in\Sigma^*$
for some constant $d$.  This gives us a value for $f^{\str
  A}([c_0],\dotsb,[c_{n-1}])$; we have to show that this value is
  unique.  For this, it is enough to show
\begin{equation*}
  \proves[S'] c_0=c_0'\lto\dotsb\lto c_{n-1}=c_{n-1}'\lto d=d'\lto fc_0\dotsb
c_{n-1}=d\lto fc_0'\dotsb c_{n-1}'=d'
\end{equation*}
for all $c_k$ and $c_k'$ and $d$
and $d'$ in $C$.  By \eqref{eqn:f} and tautological completeness, it
is enough to show
\begin{equation*}
  \proves[S'] fc_0\dotsb c_{n-1}=fc_0'\dotsb c_{n-1}'\lto d=d'\lto
  fc_0\dotsb c_{n-1}=d\lto fc_0'\dotsb c_{n-1}'=d'.
\end{equation*}
In the axiom \eqref{eqn:more-equality}, we may assume that $c$ is not
one of the variables $c'$, $d$ or 
$d'$. Then by the
Substitution Lemma, we have
\begin{equation*}
  \proves[S']fc_0\dotsb c_{n-1}=c'\lto d=d'\lto fc_0\dotsb c_{n-1}=d\lto c'=d'.
\end{equation*}
We may also assume that $c'$ is not one of the variables $c_k$, $d$ or
$d'$.  Applying the Substitution Lemma again gives what we want.
\end{proof}

The structure $\str A$ is now determined and is a model of $\Sigma$,
by the proof of the Compactness Theorem.
In sum, what we have shown is:
\pagebreak
\begin{theorem}[Completeness]\label{thm:completeness}%
\index{theorem!Completeness Th---}%
\index{complete!C---ness Theorem}
  That proof system for $\Sn$ is complete whose only rule of inference
  is Detachment, and whose axioms are the following:
  \begin{enumerate}
    \item
the tautologies;
\item
$((\phi)_c^x\lto\psi)\lto\Exists x\phi\lto \psi$, where
  $c$ does not appear in $\psi$;
\item
$c=c$;
\item
$c=c'\lto d=d'\lto c=d\lto c'=d'$;
\item
$c_0=c_0'\lto\dots c_{n-1}=c_{n-1}'\lto Rc_0\dotsb c_{n-1}\lto
  Rc_0'\dotsb c_{n-1}'$;
\item
$\phi_t^x\lto\Exists x\phi$;
\item
$c_0=c_0'\lto\dotsb\lto c_{n-1}=c_{n-1}'\lto fc_0\dotsb
  c_{n-1}=fc_0'\dotsb c_{n-1}'$.
  \end{enumerate}
Here the notation is as follows:
\begin{itemize}
\item
$x$ is a variable;
  \item
$\phi$ is a formula such that $\fv{\phi}\included\{x\}$; 
\item
$\psi$ is a sentence;
\item
$t$ is a constant term;
\item
  $c$, $c'$, $c_k$, $c_k'$, $d$ and $d'$ are constants; 
\item
$n\in\vnn$;
\item
  $R$ is an $n$-ary predicate if $n>0$; and 
\item$f$ is an $n$-ary
  function-symbol (or a constant, if $n=0$).
\end{itemize}
\end{theorem}

\section*{Exercises}

\begin{xca}
  Prove Lemma~\ref{lem:gen}.
\end{xca}

\begin{xca}
Show that the proof system in which
all tautologies are axioms and
  Detachment is the only rule of inference cannot be used to prove the
  validity $\Exists x x=x$.
\end{xca}

\begin{xca}
  Prove Lemma~\ref{lem:2}.
\end{xca}

\begin{xca}
Let the axioms
of a proof system $\psys S$ be the tautologies, and 
  let the
  rules of inference be Detachment, along with the rule that
  $\bot$ can be inferred from every finite set that has no model.
  (Note however that this is not really a \emph{syntactical} rule.)
Show that, in $\psys S$, all consistent sets have models, although
  the validity $\Exists xx=x$ is not deducible in~$\psys S$.
\end{xca}

\begin{xca}
  Prove Lemma~\ref{lem:5}.
\end{xca}

\begin{xca}
  Prove Lemma~\ref{lem:6}.
\end{xca}

\begin{xca}
  Prove lemma~\ref{lem:PR}.
\end{xca}

\begin{xca}
  Prove the Compactness Theorem from the Completeness Theorem. 
\end{xca}

%\input{types}
\chapter{Numbers of countable models}\label{ch:numbers}
%\renewcommand{\theequation}{\fnsymbol{equation}}
%\setcounter{equation}0

Our ultimate aim is to show that
\begin{equation}\label{eqn:ITN}
  I(T,\vnn)\neq 2
\end{equation}
whenever $T$ is a countable, \emph{complete} theory.  The proof will
require several interesting general results.

Note that proving \eqref{eqn:ITN} requires $T$ to be complete.  For
  example, 
  Let $P$ be a singulary predicate, and in the signature $\{\lang\}$,
  let $T$ be axiomatized by
  \begin{equation*}
    \Forall x\Forall y(Px\land Py\lto x=y).
  \end{equation*}
Then $T$ has non-isomorphic countably infinite models
$(\vnn,\emptyset)$ and 
$(\vnn,\{0\})$, and every countably infinite model is isomorphic to
one of these.

\section{Three models}

In the signature $\{<\}\cup\{c_n\colon n\in\vnn\}$, let $T_3$ be the theory
axiomatized by
\begin{equation*}
  \TO^*\cup\{c_{n+1}<c_n\colon n\in\vnn\}.
\end{equation*}
We shall see that $T_3$ is complete, and $I(T_3,\vnn)=3$.
Let
  \begin{align*}
    A_0&=\{x\in\Q\colon 0<x\}=\Q\cap(0,\infty),\\
A_1&=\Q\setminus\{0\},\\
A_2&=\Q.
  \end{align*}
Then each $A_k$ is the
universe of a model $\str A_k$ of $T_3$, where $<^{\str A_k}$ is the
usual ordering $<$, and
\begin{equation*}
  c_n{}^{\str A_k}=\frac 1{2^n}.
\end{equation*}
Then the set $\{c_n\colon n\in\vnn\}$ of elements has
\begin{enumerate}
  \item
no lower bound, in $\str A_0$;
\item
a lower bound, but no infimum, in $\str A_1$;
\item
an infimum, in $\str A_2$.
\end{enumerate}
\begin{center}
  \begin{pspicture}(-2,-0.5)(10,2.5)
  \psline{->}(0,2)(10,2)
  \psline{<->}(-2,1)(10,1)
  \psline{<->}(-2,0)(10,0)
\psset{dotsize=3pt 3}
%  \psdots(8,2)(4,2)(2,2)(1,2)(0.5,2)(0.25,2)(0.125,2)(0.0625,2)
%         (8,1)(4,1)(2,1)(1,1)(0.5,1)(0.25,1)(0.125,1)(0.0625,1)
%         (8,0)(4,0)(2,0)(1,0)(0.5,0)(0.25,0)(0.125,0)(0.0625,0)
  \parametricplot[plotpoints=10,plotstyle=dots]{1}{10}{16 2 t exp div 2}
  \parametricplot[plotpoints=10,plotstyle=dots]{1}{10}{16 2 t exp div 1}
  \parametricplot[plotpoints=10,plotstyle=dots]{1}{10}{16 2 t exp div 0}
\psdots(0,0)
\psdots[dotstyle=o](0,2)(0,1)
\uput[r](10,2){$\str A_0$}
\uput[r](10,1){$\str A_1$}
\uput[r](10,0){$\str A_2$}
\end{pspicture}
\end{center}
Hence the three structures are not isomorphic.  However, we shall be
able to show:
\begin{enumerate}
\item
if $\str B\models T_3$ and is countable, then $\str B\cong\str
A_k$ for some $k$ in $3$;
  \item
$T_3$ is complete.
\end{enumerate}
The proof of the first claim will be by the \defn{back-and-forth
  method}.  The following gives the prototypical example:  

\begin{theorem}[Cantor \cite{Cantor}]\label{thm:Cantor}%
\index{theorem!Cantor's Th---}%
\index{Cantor's Theorem}
  $\TO^*$ is $\vnn$-categorical.
\end{theorem}

\begin{proof}
  Suppose $\str A,\str B\models\TO^*$ and $\size A=\vnn=\size B$.  We
  shall show $\str A\cong\str B$.  
We can enumerate the universes:
\begin{equation*}
  A=\{a_n\colon n\in\vnn\},\qquad B=\{b_n\colon n\in\vnn\}.
\end{equation*}
We shall recursively define an order-preserving bijection $h$ from $A$
to $B$.  In particular, $h$ will be $\bigcup\{h_n\colon n\in\vnn\}$, where,
notationally, we shall have
\begin{equation*}
  h_n=\{(a_k,b_k')\colon k<n\}\cup\{(a_k',b_k)\colon k<n\}.
\end{equation*}
We let $h_0=\emptyset$.
Suppose we have $h_n$ so that the tuples
\begin{equation*}
  (a_0,a_0',\dots,a_{n-1},a_{n-1}'), 
\quad\text{ and }\quad (b_0',b_0,\dots,b_{n-1}',b_{n-1})
\end{equation*}
have the same 
\defnplain{order-type}.%
\index{order!---{}-type}  This means that, if we write these tuples
as $(c_0,\dots,c_{2n-1})$ and $(c_0',\dots,c_{2n-1}')$ respectively,
then
\begin{equation*}
  c_i<c_j\Iff c_i'<c_j'
\end{equation*}
for all $i$ and $j$ in $2n$.  Since $\str B$ is a dense total order
without endpoints, we can chose $b_n'$ so that
\begin{equation*}
  (a_0,a_0',\dots,a_{n-1},a_{n-1}',a_n) \quad\text{ and }\quad
  (b_0',b_0,\dots,b_{n-1}',b_{n-1},b_n') 
\end{equation*}
have the same order-type.  Likewise, we can choose $a_n'$ so that
\begin{equation*}
  (a_0,a_0',\dots,a_n,a_n'), \quad\text{ and }\quad
  (b_0',b_0,\dots,b_n',b_n) 
\end{equation*}
have the same order-type.  Now let
$h_{n+1}=h_n\cup\{(a_n,b_n'),(a_n',b_n)\}$. 
\end{proof}

\begin{corollary*}
  $I(T_3,\vnn)=3$.
\end{corollary*}

\begin{proof}
Suppose $\str B$ is a countable model of $T_3$.  The
interpretation in $\str B$ of each formula
\begin{equation*}
  c_{n+1}<x\land x<c_n
\end{equation*}
is (when equipped with the ordering induced from $\str B$) a countable
model of $\TO^*$.  The same
is true for the formula $c_0<x$.  Finally, the set
\begin{equation*}
  \bigcap_{n\in\vnn}\{b\in B\colon b<c_n\}
\end{equation*}
is one of the following:
\begin{enumerate}
  \item
empty;
\item
a countable model of $\TO^*$;
\item
a countable dense total order with a greatest point, but no least
point. 
\end{enumerate}
Then the previous theorem allows us to construct an isomorphism
between $\str B$ and $\str A_0$, $\str A_1$ or $\str A_2$
respectively.
\end{proof}

The following is really a corollary of Theorem~\ref{thm:TO-QE}:

\begin{theorem}\label{thm:T3-QE}
  $T_3$ admits elimination of quantifiers.
\end{theorem}

\begin{proof}
  Any formula $\phi(\tuple x)$ of $\{<,c_0,c_1,\dots\}$ can be
  considered as
  \begin{equation*}
    \theta(\tuple x,c_0,\dots,c_{n-1})
  \end{equation*}
for some formula $\theta$ of $\{<\}$.  By quantifier-elimination in
$\TO^*$, there is an open formula $\alpha$ of $\{<\}$ such that
\begin{equation*}
  \TO^*\models\Forall{\tuple x}\Forall {\tuple y}(\theta(\tuple
  x,\tuple y)\land\bigwedge_{i<n}y_{i+1}<y_i\iff \alpha(\tuple
  x,\tuple y)).
\end{equation*}
But $T_3\models c_{i+1}<c_i$, and $T_3\models\TO^*$; so 
\begin{equation*}
  T_3\models\Forall {\tuple x}(\theta(\tuple x,\tuple
  c)\iff\alpha(\tuple x,\tuple c)).
\end{equation*}
Thus $T_3$ admits quantifier-elimination.
\end{proof}

\begin{corollary*}
  $T_3$ is complete.
\end{corollary*}

\begin{proof}
  The three countable models $\str A_k$ form a chain:
  \begin{equation*}
    \str A_0\included\str A_1\included\str A_2.
  \end{equation*}
By the Diagram Lemma,
the chain is
elementary:
\begin{equation*}
  \str A_0\elsub\str A_1\elsub\str A_2.
\end{equation*}
In particular, the three structures are elementarily equivalent.  Now,
if $\str B$ is an arbitrary model of $T_3$, then it is infinite, so
$\str B\equiv\str C$ for some countably infinite structure $\str C$ by
the Downward L\"owenheim--Skolem
Theorem (\ref{thm:DLS}).  But $\str C\cong\str A_k$ for some $k$, by
the corollary to Cantor's Theorem.   Hence $\str B\equiv\str A_0$.
Thus 
\begin{equation*}
  T_3\models\Th{\str A_0};
\end{equation*}
so $T_3$ is complete.
\end{proof}

\section{Omitting types}

Since there is a sound, complete proof system for first-order logic,
we may say that a set of sentences is \defn{consistent}{} if
it has a model.   
An 
\defnplain{$n$-type}{}%
\index{type!$n$-type} 
of a signature $\lang$ is a set of $n$-ary formulas
of $\lang$.  
An $n$-type $\Phi$ of $\lang$ is \defn{realized}{} by $\tuple a$ in an
$\lang$-structure $\str A$ if
\begin{equation*}
  \str A\models\phi(\tuple a)
\end{equation*}
for all $\phi$ in $\Phi$.  A type not realized in a structure is
\defnplain{omitted}{}%
\index{omit!type ---ted by structure} by the structure.
If a consistent theory $T$ of $\lang$ is specified, then
an \defnplain{$n$-type of}{} $T$ is an $n$-type $\Phi$ that is
\defnplain{consistent with}{} $T$:\index{consistent!--- with $T$}  This
means that $\Phi$ is realized in 
some model of $T$.  Equivalently, it means that, if $\tuple c$ is an
$n$-tuple of new constants, then the set
\begin{equation*}
  T\cup\{\phi(\tuple c)\colon \phi\in\Phi\}
\end{equation*}
is consistent.
  By Compactness, for $\Phi$ to be consistent with
$T$, it is sufficient that
\begin{equation*}
  T\cup\{\Exists{\tuple x}\bigwedge\Phi_0\}
\end{equation*}
be consistent for all finite subsets $\Phi_0$ of $\Phi$.
By Compactness also, for \emph{any} collection of
types consistent with $T$, there is a model of $T$ in which all of the
types are realized.
An $n$-type $\Phi$ of $T$ is 
\defnplain{isolated}{}%
\index{isolated type}
 in $T$ by an $n$-ary
formula $\psi$ if:
\begin{enumerate}
  \item
$T\cup\{\Exists{\tuple x}\psi\}$ is consistent;
\item
$T\models\Forall{\tuple x}(\psi\lto\phi)$
for all $\phi$ in $\Phi$.
\end{enumerate}
Hence, if $\psi$ is satisfied by $\tuple a$ in a model of $T$, then
$\tuple a$ realizes $\Phi$.  Also, if $T$ is complete, then
$T\models\Exists{\tuple x}\psi$, so $\Phi$ is realized in \emph{every}
model of $T$.

A theory is 
\defnplain{countable}{}%
\index{countable!--- theory}%
\index{theory!countable ---}
 if, in
its signature, only countably many formulas are inequivalent in $T$.
It turns out that, in a \emph{countable} theory, being isolated is
the only barrier to being omitted by some model:

\begin{theorem}[Omitting Types]%
\index{theorem!Omitting Types Th---}%
\index{omit!O---ting Types Theorem}
  Suppose $T$ is a countable theory, and $\Phi$ is a non-isolated
  $1$-type of $T$.  Then $\Phi$ is omitted by some countable model of
  $T$.
\end{theorem}

\begin{proof}
  We adjust our proof of the Compactness Theorem.  As there, we
  introduce a set $C$ of new constants $c_n$ (where $n\in\vnn$).  We
  enumerate $\Sn[\lang\cup C]$ as $\{\sigma_n\colon n\in\vnn\}$.  We
  construct a chain
  \begin{equation*}
    T=\Sigma_0\included\Sigma_1\included\dotsb
  \end{equation*}
as follows.  Assume $\Sigma_{3n}$ is consistent.  Then let
\begin{equation*}
  \Sigma_{3n+1}=
  \begin{cases}
    \Sigma_{3n}\cup\{\sigma_n\},&\text{ if this is consistent;}\\
    \Sigma_{3n},                &\text{ otherwise.}
  \end{cases}
\end{equation*}
Now let
\begin{equation*}
  \Sigma_{3n+2}=
  \Sigma_{3n+1}\cup\{\phi(c_k)\},
\end{equation*}
where $k$ is minimal such that $c_k$ does not appear in
  $\Sigma_{3n+1}$, if $\sigma_n\in\Sigma_{3n+1}$ and $\sigma_n$ is
  $\Exists x\phi$; otherwise,
$\Sigma_{3n+2}=  \Sigma_{3n+1}$.  Finally, let
\begin{equation*}
  \Sigma_{3n+3}=\Sigma_{3n+2}\cup\{\lnot\psi(c_n)\},
\end{equation*}
where $\psi$ is an element of $\Phi$ such that
$\Sigma_{3n+2}\cup\{\lnot\psi(c_n)\}$ is consistent.   But we have to
check that there \emph{is} such a formula $\psi$ in $\Phi$.  \emph{If}
there is, then we can let
\begin{equation*}
  \Sigma^*=\bigcup_{n\in\vnn}\Sigma_n.
\end{equation*}
Then $\Sigma^*$ has a countable model $\str A$ (as in the proof of
Compactness) such that every element of $A$ is $c^{\str A}$ for some
$c$ in $C$.  But by construction, no such element can realize $\Phi$;
so $\str A$ omits $\Phi$.

Now, in the definition of $\Sigma_{3n+3}$, the formula $\psi$ exists
as desired because the set $\Sigma_{3n+2}\setminus T$ can be assumed to be
\emph{finite}.  In particular, the formulas in this set use only
finitely many constants from $C$.  We may assume that these constants
form a tuple $(c_n,\tuple d)$.  Then we can write
$\bigwedge\Sigma_{3n+2}\setminus T$ as a sentence
\begin{equation*}
  \phi(c_n,\tuple d),
\end{equation*}
where $\phi$ is a certain formula of $\lang$.  Now, if
\begin{equation*}
  \Sigma_{3n+2}\models\psi(c_n)
\end{equation*}
for some formula $\psi$, then (\exercise)
\begin{equation*}
  T\models\phi(c_n,\tuple d)\lto\psi(c_n),
\end{equation*}
and hence (\exercise)
\begin{equation*}
  T\models\Forall x(\Exists {\tuple y}\phi(x,\tuple y)\lto\psi(x)). 
\end{equation*}
Since $\Phi$ is not isolated in $T$, it is not isolated by
$\Exists{\tuple y}\phi$.  Therefore the set
$\Sigma_{3n+2}\cup\{\lnot\psi(c_n)\}$ must be consistent for some
$\psi$ in $\Phi$. 
\end{proof}

In the proof, it is essential that $\Sigma_n\setminus T$ is finite;
the proof can't be generalized to the case where $T$ is uncountable.
But the proof \emph{can} be generalized to yield the following:

\begin{porism*}
  Suppose $T$ is a countable theory, and $\Phi_k$ is an $n$-type of
  $T$ for some $n$ (depending on $k$), for each $k$ in $\vnn$.  Then
  $T$ has a countable model omitting each $\Phi_k$.
\end{porism*}

An $n$-type $\Phi$ of a theory $T$ is called
\defnplain{complete}{}\index{complete!--- type}\index{type!complete ---} if
\begin{equation*}
  \phi\notin\Phi\Iff\lnot\phi\in\Phi
\end{equation*}
for all $n$-ary formulas $\phi$ of $\lang$.  Any $n$-tuple $\tuple a$
of elements of a model $\str A$ of $T$ determines a complete $n$-type
of $T$, namely
\begin{equation*}
  \{\phi\colon \str A\models\phi(\tuple a)\};
\end{equation*}
this is the \defnplain{(complete) type of $\tuple a$ in $\str
  A$}{}\index{complete!--- type}\index{type!complete ---} and can be
denoted by
\begin{equation*}
  \tp{\str A}{\tuple a}.
\end{equation*}
If $\Phi$ is an arbitrary $n$-type of $T$, then some $\tuple a$ from
some model $\str A$ of $T$ realizes $\Phi$, and therefore
\begin{equation*}
  \Phi\included  \tp{\str A}{\tuple a}.
\end{equation*}
In particular, every type of $T$ is included in a complete type of $T$.

The set of complete $n$-types of $T$ can
be denoted by
\begin{equation*}
  \ts nT;
\end{equation*}
then we can let $\bigcup_{n\in\vnn}\ts nT$ be denoted by
\begin{equation*}
  \ts{}T.
\end{equation*}
So the Omitting Types Theorem gives us that, if $T$ is countable and
$\size{\ts{}T}\leq\vnn$, then $T$ has a countable model that omits
\emph{all} non-isolated types of $T$.

A structure that realizes no non-isolated types of
its is called \defn{atomic}.  For example, the iterative structure
 $(\vnn,0,{}')$ is atomic, because each of its elements $k$ is the
interpretation of the term $0^{(k)}$, so that the complete type of any
tuple $(k_0,\dots,k_{n-1})$ is isolated by
$\varble_0=0^{(k_0)}\land\dotsb\land\varble_{n-1}=0^{(k_{n-1})}$. 

The order $(\vnn,<)$ is atomic, because, for each element $k$, there
is a formula $\phi_k$ such that $\phi_k{}^{(\vnn,<)}=\{k\}$ (\exercise).

\begin{theorem}\label{thm:types-embeddings}
  If $h\colon \str A\overset{\equiv}{\to}\str B$, and $\tuple a\in
  A^n$, then
  \begin{equation*}
\tp{\str B}{h(\tuple a)}=\tp{\str A}{\tuple a};    
  \end{equation*}
thus $\str B$ realizes all types realized in $\str A$.
\end{theorem}

\begin{proof}
  Let $\Phi=\tp{\str A}{\tuple a}$.  Then $\Phi$ is a complete type,
  and $\{\phi(\tuple a)\colon\phi\in\Phi\}\included\Th{\str A_A}$, so
  $h(\tuple a)$ realizes $\Phi$ in $\str B$ by
  the Diagram Lemma. 
\end{proof}

Hence for example if $h$ is an automorphism of $\str A$, then $\tuple
a$ and $h(\tuple a)$ have the same complete type.
For any two elements $a$ and $b$ of an infinite set, there is an
automorphism that takes $a$ to $b$.  Therefore the infinite set is
atomic (\exercise).

However, the theory in signature $\{P_n\colon n\in\vnn\}$ in
 \S\ref{sect:categoricity} has \emph{no} atomic
models.  Indeed, for every formula $\phi$ in this signature, there is
 a predicate $P_k$ that does not appear in $\phi$.  Then both
 $\phi\land P_k\varble_0$ and
 $\phi\land\lnot P_k\varble_0$ are consistent (\exercise), so $\phi$
 does not isolate a complete type.

\section{Prime structures}

A structure is 
\defnplain{prime}{}%
\index{prime structure, model} 
if it embeds elementarily in
every model of its theory; if that theory is $T$, then the structure
is a \defnplain{prime model of $T$}.  (Note then that only complete
theories can have prime models, simply because a prime model is
elementarily equivalent to all other models.)

  If $T$ admits quantifier-elimination, then by
  the Diagram Lemma, all embeddings of models
  of $T$ are elementary 
  embeddings.  Hence, for example, a countably infinite set is a prime
  model of the theory of infinite sets.  Also, $(\Q,<)$ embeds in
  every model of $\TO^*$, so it is a prime model.

By the Downward L\"owenheim--Skolem Theorem, a model of a countable
theory $T$ is 
prime, provided it embeds elementarily in all \emph{countable} models
of $T$.  In particular then, if $T$ is $\vnn$-categorical, then its
countable model is prime.



\begin{theorem}[Vaught]\label{thm:prime-atomic}%
\index{theorem!Vaught's Th---}%
\index{Vaught!---'s Theorem}
  Suppose $T$ is a countable complete theory.  Then the prime models
  of $T$ are precisely the countable atomic models of $T$.
\end{theorem}

\begin{proof}
Suppose $\str A\models T$.

  ($\Rightarrow$)  If $\str A$ is not countable,
  then $\str A$ cannot embed in countable models of $T$ (which must
  exist, by the Upward L\"owenheim--Skolem Theorem,~\ref{thm:LST}), so
  $\str A$ cannot be prime. 

If $\str A$ is not atomic, then $\str A$ realizes some non-isolated
type $\Phi$ of $T$.  But by the Omitting-Types Theorem, $T$ has a
countable model $\str B$ that omits $\Phi$.  Then $\str A$ cannot
embed elementarily in $\str B$, by Theorem~\ref{thm:types-embeddings}.

($\Leftarrow$)   Suppose $\str A$ is countable and atomic, and $\str
B\models T$.  We construct an elementary embedding of $\str A$ in
$\str B$ by the back-and-forth method, except that the construction is
in only one
direction. Write $A$ as
$\{a_n\colon n\in\vnn\}$.  Then each $\tp{\str A}{a_0,\dots,a_{n-1}}$ is
isolated in $T$ by some formula $\phi_n$.  Then we have
\begin{enumerate}
  \item
$T\models\Exists{\varble_0}\dotsb\Exists{\varble_{n-1}}\phi_n$;
\item
$T\models\Forall{\varble_0}\dotsb\Forall{\varble_{n-1}}
  (\phi_n\lto\Exists{\varble_n}\phi_{n+1})$.   
\end{enumerate}
Hence we can recursively find $b_k$ in $B$ so that
\begin{equation*}
  \str B\models\phi_n(b_0,\dots,b_{n-1})
\end{equation*}
for all $n$ in $\vnn$.
Now, every sentence in $\Th{\str A_A}$ is $\theta(a_0,\dots,a_{n-1})$
for some formula $\theta$ of $\lang$.  Then
\begin{equation*}
  T\models\Forall {\tuple{\varble}}(\phi_n\lto\theta),
\end{equation*}
so $\str B\models\theta(\tuple b)$.  Therefore the function $a_k\mapsto
b_k$ from $A$ to $B$ is an elementary embedding of $\str A$ in $\str B$.
\end{proof}

\begin{porism*}%\label{por:prime-isom}
  All prime models of a countable complete theory are isomorphic.
\end{porism*}

\begin{proof}
 In the proof that $\str A$ embeds elementarily in $\str B$, if we
 assume also that $\str B$ is countable and atomic, then the full
 back-and-forth method gives an isomorphism between the structures.  
\end{proof}

\begin{theorem}\label{thm:prime-existence}
Let $T$ be a countable complete theory.
\begin{enumerate}
  \item
  If $I(T,\vnn)\leq\vnn$, then $\size{\ts{}T}\leq\vnn$.
\item
If $\size{\ts{}T}\leq\vnn$, then $T$ has a prime
  model.
\end{enumerate}
\end{theorem}

\begin{proof}
Suppose $\size{\ts{}T}>\vnn$.  Since there are only countably many
formulas, there are only countably many isolated complete  types;
hence there are uncountably many non-isolated complete types.  Let $C$ be an
infinite set of such types.  For every
subset $D$ of $C$, there is a countable model of $T$ that realizes
every type in $D$, but no type in $C\setminus D$.  There are
uncountably many choices for $D$, but different choices yield
non-isomorphic countable models of $T$.  Thus $I(T,\vnn)>\vnn$.  (In
fact, $I(T,\vnn)=\size{2^{\vnn}}$.)

If $\size{\ts{}T}\leq\vnn$, then $T$ has a countable atomic model by
Omitting Types, hence a prime model by Theorem~\ref{thm:prime-atomic}.
\end{proof}

Note however that $(\vnn,0,{}',<)$ is a prime model of its theory $T$,
but $I(T,\vnn)=\size{2^{\vnn}}$.

\section{Saturated structures}

A \defn{saturated structure}{} is the opposite of an atomic structure.
Atomic structures realize as \emph{few} types as possible.  Saturated
structures realize as \emph{many} types as possible; moreover, these
types are allowed to have parameters from the structure. 

To be precise, let $\str M$ be an infinite $\lang$-structure, and let
$A\included M$.  In this context, the set $\ts n{\Th{\str M_A}}$ can be
denoted by
\begin{equation*}
  \ts nA.
\end{equation*}
Consider the special case where $A$ is $M$ itself.  The set $\ts 1M$,
for example, contains types that include the type
\begin{equation*}
  \{x\neq a\colon a\in M\}.
\end{equation*}
These types cannot be realized in $\str M$.  So we say that $\str M$
is \defn{saturated}{}, provided that, whenever $A\included M$ and $\size
A<\size M$, each type in $\ts{}A$ is realized in $\str M$.  (In
particular, if
$\str M$ is countable here, then the sets $A$ should be finite.)

We can construct saturated models by means of 
\defnplain{chain}{s,}%
\index{chain of structures}
namely, sequences $(\str A_n\colon n\in\vnn)$ of structures such that
$\str A_n\included\str A_{n+1}$.  Since $\included$ is transitive, we
may write the chain as
\begin{equation*}
  \str A_0\included \str A_1\included\str A_2\included\dotsb.  
\end{equation*}
If each inclusion is elementary, then the chain is called
\defnplain{elementary};
\index{elementar!---y chain of structures}%
\index{chain of structures!elementary ---}
since the relation $\elsub$ is
transitive (\exercise), we may write the elementary chain as
\begin{equation*}
  \str A_0\elsub \str A_1\elsub\str A_2\elsub\dotsb.  
\end{equation*}
The union of a chain is a structure of which all the links in the
chain are substructures. 

\begin{theorem}[Tarski--Vaught]%
\index{theorem!Tarski--Vaught Th---}%
\index{Tarski!---{}--Vaught Theorem}%
\index{Vaught!Tarski--{}--- Theorem}
  The union of an elementary chain is an elementary extension of all
  of the links.\hfill\qedsymbol
\end{theorem}

\begin{theorem}\label{thm:saturated}
  Suppose $T$ is countable and complete, and
  $\size{\ts{}T}\leq\vnn$.  Then
  $T$ has a 
  countable saturated model.
\end{theorem}

\begin{proof}
  Suppose $\str M$ is a countable model of $T$.  If $A$ is a finite
  subset $\{a_k\colon k<n\}$ of $M$, then each element of $\ts mA$
  is
  \begin{equation*}
    \{\phi(\varble_0,\dots,\varble_{m-1},a_0,\dots,a_{n-1})\colon \phi\in p\}
  \end{equation*}
for some $p$ in $\ts{m+n}{T}$.  Hence $\size{\ts{}A}$ is countable.
Therefore the set
\begin{equation*}
  \bigcup\{\ts{}A\colon A\text{ is a finite subset of }M\}
\end{equation*}
is countable.  So all of the types in this set are realized in a
countable elementary extension $\str M'$ of $\str M$.
Thus, if $\str M_0$ is a countable model of $T$, then we can form an
elementary chain
\begin{equation*}
  \str M_0\elsub \str M_1\elsub\str M_2\elsub\dotsb
\end{equation*}
where $\str M_{n+1}=\str M_n{}'$.
Every finite
subset of $N$ is a subset of some $\str M_n$, and so the types of
$\ts{}A$ are realized in $\str M_{n+1}$, hence in $\str N$.  So $\str
N$ is saturated.
\end{proof}

If $A$ is a finite subset $\{a_k\colon k<n\}$ of $M$, and $\tuple a$ is
$(a_0,\dots,a_{n-1})$, we can denote $\str M_A$ by
\begin{equation*}
  (\str M,\tuple a).
\end{equation*}
If $\str M$ is countable, then $\str M$ is called 
\defnplain{homogeneous}{}%
\index{homogeneous structure}
if
\begin{equation*}
  \tp{\str M}{\tuple a}=\tp{\str M}{\tuple b}\implies(\str M,\tuple
  a)\cong(\str M,\tuple b)
\end{equation*}
for all $n$-tuples $\tuple a$ and $\tuple b$ from $M$, for all $n$ in
$\vnn$. 

\begin{theorem}\label{thm:homog}
  Countable saturated structures are homogeneous.
\end{theorem}

\begin{proof}
  The back-and-forth method.
\end{proof}

\section{One model}

For the sake of stating and proving the following theorem more easily,
we can use the following notation.  Suppose $T$ is a theory of $\lang$.  Then
equivalence in $T$ is an equivalence-relation on the set of $n$-ary
formulas of $\lang$.  Let the set of corresponding equivalence-classes
be denoted by
\begin{equation*}
  \LT nT.
\end{equation*}

\begin{theorem}[Ryll-Nardzewski]%
\index{theorem!Ryll-Nardzewski's Th---}%
\index{Ryll-Nardzewski's Theorem}
  Suppose $T$ is a countable complete theory.  The following
  statements are equivalent:
  \begin{enumerate}
    \item\label{item:RN1}
$I(T,\vnn)=1$.
\item\label{item:RN2}
All types of $T$ are isolated.
\item\label{item:RN3}
Each set $\LT nT$ is finite.
\item\label{item:RN4}
Each set $\ts nT$ is finite.
  \end{enumerate}
\end{theorem}

\begin{proof}
\eqref{item:RN1}$\Rightarrow$\eqref{item:RN2}:  If $\ts{}T$ contains a
  non-isolated type, then it is realized in
  some, but not all, countable models of $T$, so $I(T,\vnn)>1$.

\eqref{item:RN2}$\Rightarrow$\eqref{item:RN1}:  If all types of $T$
   are isolated, then all 
   models of $T$ are atomic, so all \emph{countable} models of $T$ are
   prime and therefore isomorphic.

\eqref{item:RN3}$\Rightarrow$\eqref{item:RN4}:  Immediate.

\eqref{item:RN4}$\Rightarrow$\eqref{item:RN2}\&\eqref{item:RN3}:
   Suppose $\ts nT=\{p_0,\dots,p_{m-1}\}$.  For 
   each $i$ and $j$ in $m$, if $i\neq j$, then there is a formula
   $\phi_{ij}$ in $p_i\setminus p_j$.  Let $\psi_i$ be the formula
   \begin{equation*}
     \bigwedge_{j\in m\setminus\{i\}}\phi_{ij}.
   \end{equation*}
Then $\psi_i$ is in $p_j$ if and only if $j=i$.  If $\str A\models T$,
and $\tuple a$ is an $n$-tuple from $A$, then $\str A$ realizes some
unique $p_i$, and then
$\str A\models \psi_i(\tuple a)$.
Conversely, if $\str A\models\psi_i(\tuple a)$, then $\tuple a$ must
realize $p_i$.  Therefore $\psi_i$ isolates $p_i$.

If $\chi$ is an
arbitrary $n$-ary formula, let $I=\{i\in m\colon \chi\in p_i\}$.  Then
\begin{equation*}
  T\models\Forall{\tuple x}(\chi\iff \bigvee_{i\in I}\psi_i).
\end{equation*}
There are only finitely many possibilities for $I$, so $\LT nT$ is
finite. 

\eqref{item:RN2}$\Rightarrow$\eqref{item:RN4}:  Suppose infinitely
many complete $n$-types are isolated 
in $T$.  Since $T$ is countable, there must be countably many such
types.  Say they compose the set
   $\{p_k\colon k\in\vnn\}$, and each $p_k$
   is isolated by $\phi_k$.  Then the type
   \begin{equation*}
     \{\lnot \phi_k\colon k\in\vnn\}
   \end{equation*}
is consistent with $T$.  It is not included in any of the $p_k$, so it
must be included in a non-isolated type.
\end{proof}

\begin{corollary*}
If $\str A$ is a structure in a countable signature, and $\tuple a$ is
a tuple from $A$, and $\Th{\str A,\tuple a}$ is $\vnn$-categorical,
  then so is $\Th{\str A}$.\hfill\qedsymbol  
\end{corollary*}

\section{Not two models}

\begin{theorem}[Vaught]%
\index{theorem!Vaught's Th---}%
\index{Vaught!---'s Theorem}
  If $T$ is a countable complete theory, then $I(T,\vnn)\neq2$.
\end{theorem}

\begin{proof}
Suppose $2\leq I(T,\vnn)\leq\vnn$.  Then $T$ has a prime model $\str
A$ by Theorem~\ref{thm:prime-existence}, and a saturated model $\str
B$, by Theorem~\ref{thm:saturated}; moreover, some $\tuple b$ in $\str
B$ must have a non-isolated complete type.  Suppose $(\str C,\tuple
c)\equiv(\str B,\tuple b)$.
If $\str C\cong\str B$, then $(\str C,\tuple c)\cong(\str
B,\tuple a)$ for some $\tuple a$.  But then the types of $\tuple a$,
$\tuple c$, and $\tuple b$ are the same, so  $(\str B,\tuple a)\cong(\str
B,\tuple b)$ by Theorem~\ref{thm:homog}, and therefore  $(\str
C,\tuple c)\cong(\str B,\tuple b)$.  Since $\Th{\str B,\tuple b}$ is
not $\vnn$-categorical by the corollary to Ryll-Nardzewski's Theorem,
we conclude that it has a countable model $(\str D,\tuple d)$ such
that $\str D\ncong\str B$.  Also $\str D\ncong\str A$, since $\str D$
realizes $\tp{\str B}{\tuple b}$.  Thus $I(T,\vnn)\geq3$.
\end{proof}

\section*{Exercises}

\begin{xca}
For each finite $n$ greater than $2$, find a theory $T$ such that
$I(T,\vnn)=n$. 
\end{xca}

\begin{xca}
  Supply the missing details in the proof of the Omitting Types
  Theorem. 
\end{xca}

\begin{xca}
Show that, for each $k$ in $\vnn$, there
is a formula $\phi_k$ such that $\phi_k{}^{(\vnn,<)}=\{k\}$.
\end{xca}

\begin{xca}
  Verify that an infinite set is atomic.
\end{xca}

\begin{xca}
  Let $T$ be the theory in signature $\{P_n\colon n\in\vnn\}$ given in
 \S\ref{sect:categoricity}.  Suppose $P_k$ does not appear in the
 formula $\phi$ in this signature.  Show that both
 $\phi\land P_k\varble_0$ and
 $\phi\land\lnot P_k\varble_0$ are consistent with $T$.
\end{xca}

\begin{xca}
  Prove the Tarski--Vaught Theorem on unions of elementary chains.
\end{xca}

\begin{xca}
  Prove the corollary to Ryll-Nardzewski's Theorem.
\end{xca}

\begin{xca}
  Prove the theorem of Chang \cite{MR0103812} and \L o\'s and Suszko
  \cite{MR0089813} that a theory has $\forall\exists$ axioms (that is,
  axioms $\Forall{\tuple x}\Exists{\tuple y}\phi$, where $\phi$ is
  open) if and only if, for all chains of models of the theory, the
  union is also a model.  Conclude for example that the union of a
  chain of fields is a field.  
\end{xca}


\appendix

%\chapter*{Appendix}
\chapter{The German script}\label{app:german}

Writing in 1993, Wilfrid Hodges \cite[Ch.~1, p.~21]{MR94e:03002} observes
\begin{quotation}
  Until about a dozen years ago, most model theorists named structures
  in horrible Fraktur lettering.  Recent writers sometimes adopt a
  notation according to which all structures are named $M$, $M'$,
  $M^*$, $\bar M$, $M_0$, $M_i$ or occasionally $N$.  
%I hope I cause no offence by using a more freewheeling notation.
\end{quotation}
For Hodges, structures are $A$, $B$, $C$, and so forth; he refers to
their universes as
\defnplain{domain}s%
\index{domain!--- of a structure}
and denotes these by $\dom A$ and so forth.  This practice is
convenient if one is using a typewriter (as in the preparation of
another of Hodges's books \cite{Hodges-Building}, from 1985).
In 2002, David Marker \cite{MR1924282} uses `calligraphic' letters for
structures, so that $M$ is the universe of~$\mathcal M$.
I still prefer the Fraktur letters:
%In \AmS\ \LaTeX\ (by which these notes are typeset) these letters are:
\begin{equation*}
  \begin{array}{ccccccc}
\mathfrak A&\mathfrak B&\mathfrak C&\mathfrak D&\mathfrak E&\mathfrak F&\mathfrak G\\
\mathfrak H&\mathfrak I&\mathfrak J&\mathfrak K&\mathfrak L&\mathfrak M&\mathfrak N\\
\mathfrak O&\mathfrak P&\mathfrak Q&\mathfrak R&\mathfrak S&\mathfrak T&\mathfrak U\\
           &\mathfrak V&\mathfrak W&\mathfrak X&\mathfrak Y&\mathfrak Z&
  \end{array}
\qquad\qquad
  \begin{array}{ccccccc}
\mathfrak a&\mathfrak b&\mathfrak c&\mathfrak d&\mathfrak e&\mathfrak f&\mathfrak g\\
\mathfrak h&\mathfrak i&\mathfrak j&\mathfrak k&\mathfrak l&\mathfrak m&\mathfrak n\\
\mathfrak o&\mathfrak p&\mathfrak q&\mathfrak r&\mathfrak s&\mathfrak t&\mathfrak u\\
           &\mathfrak v&\mathfrak w&\mathfrak x&\mathfrak y&\mathfrak z&
  \end{array}
\end{equation*}
A way to write these by hand is seen in a textbook
of German from 1931 \cite{German}:
\begin{center}
\includegraphics[width=6in,height=4.17in]{german-script-cropped.eps}
\end{center}


\chapter{The Greek alphabet}\label{app:greek}


  \begin{center}
      \begin{tabular}{c c c c}
capital&minuscule&transliteration&name\\\hline
\Gk{A}& \Gk{a} & a & alpha \\ 
\Gk{B}& \Gk{b} & b & beta   \\ 
\Gk{G}& \Gk{g} & g & gamma \\ 
\Gk{D}& \Gk{d} & d & delta \\ 
\Gk{E}& \Gk{e} & e & epsilon\\ 
%&\Gk{\stigma}& stigma\\
\Gk{Z}& \Gk{z} & z & zeta   \\ 
\Gk{H}& \Gk{h} & \^e & eta \\ 
 \Gk{J}& \Gk{j} & th & theta \\
%\hline 
 \Gk{I}& \Gk{i} & i & iota\\ 
\Gk{K}& \Gk{k} & k & kappa\\ 
 \Gk{L}& \Gk{l} & l & lambda\\ 
 \Gk{M}& \Gk{m} & m & mu \\
 \Gk{N}& \Gk{n} & n & nu \\ 
 \Gk{X}& \Gk{x} & x & xi \\ 
 \Gk{O}& \Gk{o} & o & omicron\\ 
 \Gk{P}& \Gk{p} & p & pi\\ 
%&\Gk{\qoppa}& koppa\\
%\hline
 \Gk{R}& \Gk{r} & r & rho\\ 
 \Gk{S}& \Gk{s, c} & s & sigma \\
 \Gk{T}& \Gk{t} & t & tau \\ 
 \Gk{U}& \Gk{u} & y, u & upsilon \\ 
 \Gk{F}& \Gk{f} & ph & phi\\ 
 \Gk{Q}& \Gk{q} & ch & chi\\ 
 \Gk{Y}& \Gk{y} & ps & psi\\ 
 \Gk{W}& \Gk{w} & \^o & omega\\
%&\Gk{\sampi}& sampi\\
\hline
  \end{tabular}
  \end{center}

The following remarks pertain to \emph{ancient} Greek.
%Each Greek letter has a name, written here in Latin letters; the first
%letter or two of the Latin name provides a transliteration for the
%Greek letter.  
The vowels are 
\begin{center}
\Gk{a, e, h, i, o, u, w,} 
\end{center}
where
\Gk h is a long \Gk e, and \Gk w is a long \Gk o; the other vowels
(\Gk{a, i, u}) can be long or short.  
Some vowels may be given tonal accents
(\Gk{'a, ~a, `a}).  
An initial vowel takes either a rough\-{}-breathing mark (as in
\Gk{<a}) or a 
smooth\-{}-breathing mark (\Gk{>a}): the former mark is transliterated
by a preceding
\lett h; the latter can be ignored:
\begin{quote}
  \Gk{<uperbol'h} hyperbol\^e \emph{hyperbola;}\qquad
\Gk{>orjog'wnion} orthog\^onion \emph{rectangle.}
\end{quote}
Likewise, \Gk{<r} is
transliterated as \lett{rh}:  
\begin{quote}
  \Gk{<r'omboc} rhombos \emph{rhombus.}
\end{quote}
A long vowel may have an iota subscript, as in \Gk{<~h|} \emph{qu\^a}
(see p.~\pageref{qua}).
Of the two forms of minuscule sigma, the \Gk{c} appears at
the ends of words; elsewhere, \Gk{sv} appears: 
\begin{quote}
\Gk{b'asic} basis
\emph{base.} 
\end{quote}


\appendix               % Because of the preceding switching of
			% languages, this is needed to have
			% ``appendix'' rather than ``chapter'' appear
			% in the table of contents  
\setcounter{chapter}2
\chapter{The natural numbers}\label{app:N}

\numberwithin{theorem}{chapter}
\numberwithin{lemma}{chapter}

\setcounter{theorem}0

%The proofs of all unproved propositions here are left as exercises
%for the reader.  

  \begin{definition}[Addition]\label{def:add}
    For each $m$ in $\N$, the operation $x\mapsto m+x$ on $\N$ is the
    unique homomorphism from $(\N,1,\scr)$ to $(\N,m\sscr,\scr)$
    guaranteed by the Recursion Theorem (\ref{thm:recursion-in-N}).
    That is,  
    \begin{equation}\label{eqn:+}
      \begin{aligned}
      m+1&=m\sscr,\\
m+n\sscr&=(m+n)\sscr.
    \end{aligned}
    \end{equation}
  \end{definition}

  \begin{lemma}\mbox{}
%    For all $n$ and $m$ in $\N$,
    \begin{enumerate}
      \item
$1+n=n\sscr$;
\item
$m\sscr+n=(m+n)\sscr$.\hfill\qedsymbol
    \end{enumerate}
  \end{lemma}

  \begin{theorem}\label{thm:add}\mbox{}
%    For all $n$, $m$, and $k$ in $\N$,
    \begin{enumerate}
\item
$n+m=m+n$;
\item\label{item:+ass}
$(n+m)+k=n+(m+k)$.\hfill\qedsymbol
    \end{enumerate}
  \end{theorem}


Addition exists with
the foregoing properties in every inductive structure (in the sense of
\S\ref{sect:nat}).   
Edmund Landau \cite{MR12:397m} shows this implicitly.  See Leon
Henkin \cite{MR0120156} and Alexandre Borovik
\cite{Borovik-Shadows-0.62} for explicit discussion.  

  \begin{theorem}
    In any inductive structure
    there is an operation of addition satisfying~\eqref{eqn:+} and
    hence the lemma and theorem.
  \end{theorem}

  \begin{proof}
    Let $M$ be the set of $m$ in the structure for which there is a
    singulary operation $x\mapsto m+x$ as desired.  Then $1\in M$,
    since if we define $1+x$ as $x\sscr$, then
    \begin{gather*}
      1+1=1\sscr,\\
1+n\sscr=n\sscr{}\sscr=(1+n)\sscr.
    \end{gather*}
Suppose $k\in M$.  If we define $k\sscr+x$ as $(k+x)\sscr$, then
\begin{gather*}
  k\sscr+1=(k+1)\sscr=k\sscr{}\sscr,\\
k\sscr+n\sscr=(k+n\sscr)\sscr=(k+n)\sscr{}\sscr=(k\sscr+n)\sscr,
\end{gather*}
so $k\sscr\in M$.  By induction, all $m$ are in $M$.  The earlier
lemma and theorem are also proved by induction alone.
  \end{proof}

  \begin{definition}[Multiplication]\label{def:mul}
    For each $m$ in $\N$, the operation $x\mapsto m\cdot x$ on $\N$ is
    the
unique homomorphism from $(\N,1,\scr)$ to $(\N,m,x\mapsto x+m)$
    guaranteed by the Recursion Theorem (\ref{thm:recursion-in-N}).
That is,
\begin{equation}\label{eqn:.}
      \begin{aligned}
      m\cdot1&=m,\\
m\cdot(n+1)&=m\cdot n+m.
    \end{aligned}
\end{equation}
  \end{definition}

  \begin{lemma}\mbox{}
%    For all $n$ and $m$ in $\N$,
    \begin{enumerate}
      \item
$1\cdot n=n$;
\item
$(m+1)\cdot n=m\cdot n+n$.\hfill\qedsymbol
    \end{enumerate}
  \end{lemma}

  \begin{theorem}\label{thm:mul}
    For all $n$, $m$, and $k$ in $\N$,
    \begin{enumerate}
\item
$n\cdot m=m\cdot n$;
\item
$n\cdot(m+k)=n\cdot m+n\cdot k$;
\item\label{item:.ass}
$(n\cdot m)\cdot k=n\cdot (m\cdot k)$;\hfill\qedsymbol
    \end{enumerate}
  \end{theorem}

As before, only induction has been required so far.

\begin{theorem}
    In any inductive structure (in the sense of \S\ref{sect:nat})
    there is an operation of multiplication satisfying~\eqref{eqn:.} and
    hence the lemma and theorem.  \hfill\qedsymbol
\end{theorem}

The next theorem \emph{does} need recursion.

  \begin{theorem}[Cancellation]%
\index{theorem!Cancellation Th---}
\index{Cancellation Theorem}
\mbox{}
    \begin{enumerate}
      \item
if $n+k=m+k$, then $n=m$;
\item
if $n\cdot m=1$, then $n=1$ and $m=1$;
\item
if $n\cdot k=m\cdot k$, then $n=m$.\hfill\qedsymbol
    \end{enumerate}
  \end{theorem}

  \begin{definition}[Exponentiation]
    For each $m$ in $\N$, the operation $x\mapsto m^x$ on $\N$ is the 
unique homomorphism from $(\N,1,\scr)$ to $(\N,m,x\mapsto x\cdot m)$
    guaranteed by the Recursion Theorem (\ref{thm:recursion-in-N}).
That is,
\begin{equation}\label{eqn:exp}
    \begin{aligned}
      m^1&=m,\\
m^{n+1}&=m^n\cdot m.
    \end{aligned}
\end{equation}
  \end{definition}

  \begin{theorem}
\mbox{}
    \begin{enumerate}
\item
$1^n=1$;
\item
$n^{m+k}=n^m\cdot n^k$;
\item
$(n\cdot m)^k=n^k\cdot m^k$;
\item
$(n^m)^k=n^{m\cdot k}$.\hfill\qedsymbol
    \end{enumerate}
  \end{theorem}

Exponentiation requires more than induction, because of the following
theorem.  (See Don Zagier \cite{Zagier} for a different formulation.) 

\begin{theorem}
  Let $n\in\N$.  On the cyclic group $\Z/n\Z$ there is an operation of
  exponentiation satisfying~\eqref{eqn:exp} if and
  only if $n\in\{1,2,6,42,1806\}$.
\end{theorem}

\begin{proof}
The proof is an exercise in number theory, but it involves an interesting
recursive definition.  We always have exponentiation as a function
from $\Z/n\Z\times\N$ to $\Z/n\Z$.  We want to find those $n$ such
that
\begin{equation}\label{eqn:pmod}
  x^{n+1}\equiv x\pmod n
\end{equation}
for all integers $x$, or just all $x$ in $\{1,\dots,n\}$.
If $p^2\divides n$ for some prime $p$, 
and $x=n/p$, then $x^k\equiv 0\pmod n$ when $k>1$,
so~\eqref{eqn:pmod} fails.  So we may assume $n$ is squarefree.  It is
now equivalent to ensure
\begin{equation*}
  x^{n+1}\equiv x\pmod p
\end{equation*}
for all prime factors $p$ of $n$.  We have this when $p\divides x$;
and in the other case, it is equivalent to ensure
\begin{equation*}
  x^n\equiv 1\pmod p.
\end{equation*}
Since $x$ can be chosen as a primitive root of $p$, it is equivalent
to ensure
\begin{equation*}
  p-1\divides n
\end{equation*}
for all prime factors $p$ of $n$.  Then also $q-1\divides n$ for all
prime factors $q$ of $p-1$, and so forth.  Keeping in mind that $n$
must be squarefree, let us refer to a prime $p$ as 
\defnplain{good}{}%
\index{good prime} 
if
$p-1$ is squarefree and all prime factors of $p-1$ are good.  Then all
prime factors of $n$ must be good.  We obtain the good primes
recursively.  Trivially, $2$ is good.  For every other good prime $p$,
we must have $2$ as a factor of $p-1$.
Since $2+1=3$, which is prime,
$3$ is good.  And $2\cdot 3+1$ is $7$, which is prime and therefore
good.  But $2\cdot 7+1$ is not prime.  However, $2\cdot 3\cdot
7+1=43$, a good prime.  But there are no more possibilities:
\begin{gather*}
  2\cdot 3\cdot 43+1=259=7\cdot 37;\\
2\cdot 7\cdot 43+1=603=3^2\cdot 67;\\
2\cdot 3\cdot 7\cdot 43+1=1807=13\cdot 139.
\end{gather*}
So the set of good primes is $\{2,3,7,43\}$.  In this set, we have
\begin{equation*}
  p<q\Iff p\divides q-1
\end{equation*}
Hence the set of desired $n$ is $\{1,2,2\cdot 3,2\cdot 3\cdot 7,2\cdot
3\cdot 7\cdot 43\}$, which is as claimed.
\end{proof}

  \begin{definition}[Factorial]
    The operation $x\mapsto x!$ on $\N$ is such that
    \begin{align*}
      1!&=1,\\
(n+1)!&=(n+1)\cdot n!.
    \end{align*}
Its uniqueness is guaranteed
    by the corollary to the Recursion Theorem.
    Indeed, the function $x\mapsto(x,x!)$ is the unique homomorphism from
    $(\N,1,\scr)$ into 
    \begin{equation*}
    (\N\times\N,(1,1),(x,y)\mapsto(x+1,(x+1)\cdot y).
    \end{equation*}
  \end{definition}

In \S\ref{sect:nat} we obtain the strict total ordering $<$, by which
$\N$ is well-ordered.

\begin{theorem}\mbox{}
  \begin{enumerate}
    \item
  $m<n\Iff\Exists xm+x=n$.
\item
$m<n\Iff m+k<n+k$.
\item
$m<n\Iff n\cdot k<n\cdot k$.\hfill\qedsymbol
  \end{enumerate}
\end{theorem}

Hence if $m+k=n$, then $k$ is unique and can be denoted by $n-m$.

\chapter{Syntax and semantics}\label{app:synt}

The Greek etymon for
\Eng{syntax,} namely \Gk{<h s'untaxic,
  -ewc,} refers originally to an \emph{arranging,} a \emph{putting
  together in order,} 
especially of soldiers.  In one passage of Plato's \emph{Republic}
\cite[591d]{Shorey}, it is \emph{wealth} that may be arranged.
In that passage,\footnote{Found with the help of the
  Liddell--Scott lexicon \cite{LSJ}.}
 the character of Socrates describes the wise man: 
\begin{quote}
  \Gk{O>uko~un, e>~ipon, ka`i t`hn >en t~h| t~wn qrhm'atwn kt'hsei
  \Gkemph{s'untax'in} te ka`i sumfwn'ian?  ka`i t`on >'ogkon to~u
  pl'hjouc o>uk >ekplhtt'omenoc <up`o to~u t~wn poll~wn makarismo~u
  >'apeiron a>ux'hsei, >ap'eranta kak`a >'eqwn?}

%\noindent
And will it not also be so, I said, with the \Gkemph{arranging} and
harmonizing of his 
possessions?  He will not let himself be dazzled by the felicitations
of the multitude and pile up the mass of his wealth without measure,
involving himself in measureless ills, will he?\footnote{The
  translation is adapted from Shorey's \cite{Shorey}.}
\end{quote}
The arranging implied by \Gk{s'untaxic} can also be \emph{grammatical,}
a putting together of \emph{words.}

The source of \Eng{semantics} is the Greek
adjective \Gk{shmantik'oc, -'h, -'on,} meaning \emph{significant} or
\emph{meaningful.}  Related words include the verb \Gk{shma'inw}
(signify) and the noun \Gk{t`o shme~ion} (sign).  In \emph{On
  Interpretation} \cite[16a19,
  b5]{Aristotle-LI}, Aristotle defines nouns and verbs:
\begin{quote}
  \Gk{>'Onoma m`en o>~un >est`i fwn`h \Gkemph{shmantik`h} kat`a sunj'hkhn
  >'aneu qr'onou, <~hc mhd`en m'eroc >est`i \Gkemph{shmantik`on}
  keqwrism'enon;

<R~hma d'e >esti t`o \Gkemph{prosshma~inon} qr'onon, o<~u m'eroc o>ud`en
  \Gkemph{shma'inei} qwr'ic, ka`i >'estin >ae`i t~wn 
kaj> <et'erou legom'enwn \Gkemph{shme~ion.}}

A noun is a sound, \Gkemph{meaningful} by convention, without [grammatical]
tense, of which no part separately is \Gkemph{meaningful.}

A verb is [a
  sound] \Gkemph{signifying} a tense besides; no part of it is
  \Gkemph{mean}\-\Gkemph{ingful}
%meaningful
separately; it is always a \Gkemph{sign} of \emph{things said of} something.
\end{quote}
The more basic \Gk{t`o s~hma, -atoc,} meaning \emph{sign, mark,
  token,} appears in Homer (\emph{Iliad,} X.465--468):
\begin{quote}
\Gk{<`Wc >~ar'' >ef'wnhsen, ka`i >ap`o <'ejen <uy'os'' >ae'irac\\
j~hken >an`a mur'ikhn; d'eelon d' >ep`i \Gkemph{s~hm'a} t'' >'ejhke\\
summ'aryac d'onakac mur'ikhc t'' >erijhl'eac >'ozouc,\\
m`h l'ajoi a>~utic >i'onte jo`hn di`a n'ukta m'elainan.}

With these words, he took the spoils and set them upon a tamarisk
tree, and they make a \Gkemph{mark} at the place by pulling up reeds
and gathering boughs of tamarisk, that they might not miss it as they
came back through the fleeting hours of darkness.\footnote{Text and Samuel
  Butler's translation are from \url{http://www.perseus.tufts.edu}.}
\end{quote}


\appendix               % Because of the preceding switching of
			% languages, this is needed to have
			% ``appendix'' rather than ``chapter'' appear
			% in the table of contents  
\setcounter{chapter}4

\chapter{Syntactic entailment}\label{app:Frege}

What we call syntactic entailment in \S\ref{sect:syn-entail} seems to
have its origin
in the \emph{Begriffsschrift} \cite{MR0263601} of Gottlob Frege,
published in 1879.  (The title can be rendered as `ideography' or `concept
writing').  In Frege's work, what we call formulas appear not as strings, but 
as two-dimensional 
figures.  For example, our three axioms correspond to Frege's
Judgments~(1),~(2), and---almost---(28); he writes them as follows:
\setlength{\unitlength}{1.2pt}
\begin{center}
\hfill
  \begin{picture}(40,70)(0,-63)
  \put(0,0){\line(1,0){30}}
  \put(30,-3){$\;\sF$}
  \put(20,-10){\line(0,1){10}}  
  \put(20,-10){\line(1,0){10}}  
  \put(30,-13){$\;\sG$}
  \put(10,-20){\line(0,1){20}}
  \put(10,-20){\line(1,0){20}}
  \put(30,-23){$\;\sF$}
  \linethickness{1.5pt}
  \put(0,-5){\line(0,1){10}}
  \end{picture}
\hfill
  \begin{picture}(50,66)(0,-63)
  \put(0,0){\line(1,0){40}}
  \put(40,-3){$\;\sH$}
  \put(30,-10){\line(0,1){10}}
  \put(30,-10){\line(1,0){10}}
  \put(40,-13){$\;\sF$}
  \put(20,-20){\line(0,1){20}}
  \put(20,-20){\line(1,0){20}}
  \put(40,-23){$\;\sG$}
  \put(30,-30){\line(0,1){10}}
  \put(30,-30){\line(1,0){10}}
  \put(40,-33){$\;\sF$}
  \put(10,-40){\line(0,1){40}}
  \put(10,-40){\line(1,0){30}}
  \put(40,-43){$\;\sH$}
  \put(30,-50){\line(0,1){10}}
  \put(30,-50){\line(1,0){10}}
  \put(40,-53){$\;\sG$}
  \put(20,-60){\line(0,1){20}}
  \put(20,-60){\line(1,0){20}}
  \put(40,-63){$\;\sF$}
  \linethickness{1.5pt}
  \put(0,-5){\line(0,1){10}}
\end{picture}
\hfill
  \begin{picture}(40,66)(0,-63)
  \put(0,0){\line(1,0){30}}
  \put(30,-3){$\;\sF$}
  \put(20,-10){\line(0,1){10}}  
  \put(20,-10){\line(1,0){10}}  
  \put(30,-13){$\;\sG$}
  \put(10,-20){\line(0,1){20}}
  \put(10,-20){\line(1,0){20}}
  \put(25,-24){\line(0,1){4}}
  \put(30,-23){$\;\sG$}
  \put(20,-30){\line(0,1){10}}
  \put(20,-30){\line(1,0){10}}
  \put(25,-34){\line(0,1){4}}
  \put(30,-33){$\;\sF$}
  \linethickness{1.5pt}
  \put(0,-5){\line(0,1){10}}
  \end{picture}
\hfill\mbox{}
\end{center}
This style of writing formulas never caught on, except in the
following sense:  To assert a \Eng{judgment} whose \Eng{content} is $A$, Frege
writes
\begin{center}
  \begin{picture}(30,10)(0,-5)
  \put(0,0){\line(1,0){20}}
  \put(20,-3){$\;A$}
  \linethickness{1.5pt}
  \put(0,-5){\line(0,1){10}}
  \end{picture}  
\end{center}
The vertical bar here is the \Eng{judgment stroke,} while the horizontal
is merely the \Eng{content stroke.}
Frege's notation appears to be the origin of
our own symbol~$\proves$.

\chapter{Galois correspondences}\label{app:compactness}

For an arbitrary set $\Omega$,
a singulary operation $X\mapsto\cl X$ on
$\pow{\Omega}$ is a  
\defnplain{closure-operator},%
\index{closure, ---{}-operator}
or just a 
\defnplain{closure}, 
on $\Omega$ if it is:
\begin{enumerate}
  \item
\defnplain{increasing}:%
\index{increasing closure-operator} $A\included\cl A$; 
\item
\defnplain{monotone}:%
\index{monotone closure-operator} $\cl A\included\cl B$
  whenever $A\included B$; and
\item
\defnplain{idempotent}:%
\index{idempotent closure-operator}
 $\cl{\cl A}=\cl A$.
\end{enumerate}
The closure $X\mapsto\cl X$ is called 
\defnplain{finitary}{}%
\index{finit!---ary closure-operator} 
if
\begin{equation*}
  \cl A=\bigcup_{X\fincluded A}\cl X
\end{equation*}
for all $A$ in $\pow\Omega$.  (Here $X\fincluded A$ means $X$ is a
\emph{finite} subset of $A$, as on p.~\pageref{fincluded}.)
Examples include the following.
\begin{enumerate}
\item
On any set, the identity-function $X\mapsto X$ is trivially a finitary
closure.
\item
On $\PFm$, the function
$\sF\mapsto\Cn[]\sF$ is a closure, by
Theorem~\ref{thm:closure}; it is finitary, by the corollary to the
Compactness Theorem (\ref{thm:prop-compactness}).  
  \item
If $G$ is a group, the function $X\mapsto\gpgen X$ taking a subset of
$G$ to the group that it generates is a finitary closure on
$G$. 
\item
If $\Omega$ is a topological space, the function taking a subset of
$\Omega$ to its topological 
closure is a closure on $\Omega$ (usually not finitary).
\end{enumerate}

Closures can arise from a \tech{Galois correspondence}{}
between two sets.  Suppose $A$ and $B$ are sets, and $R$ is a relation
from $A$ to $B$.  
If $C\included A$, and $D\included B$, let
\begin{gather*}
  C'=\bigcap_{x\in C}\{y\in B\colon x\mathrel Ry\}=\{y\in B\colon \Forall x(x\in
  C\lto x\mathrel R y)\}\\
  D'=\bigcap_{y\in D}\{x\in A\colon x\mathrel Ry\}=\{x\in A\colon \Forall y(y\in
  D\lto x\mathrel R y)\}.
\end{gather*}
So we have functions $X\mapsto X'$ from $\pow A$ to $\pow B$ and from
$\pow B$ to $\pow A$.  These functions are inclusion-reversing; so the
operations $X\mapsto X''$
on $\pow A$ and $\pow B$ are inclusion-preserving (monotone). 
Moreover,
\begin{align*}
  C''
&=\{x\in A\colon \Forall y(y\in C'\lto x\mathrel R y)\}\\
&=\{x\in A\colon \Forall y(\Forall z(z\in
  C\lto z\mathrel R y)\lto x\mathrel R y)\},
\end{align*}
so 
\begin{equation}\label{eqn:CC''}
C\included C''; 
\end{equation}
similarly, 
\begin{equation}\label{eqn:DD''}
D\included D''.
\end{equation}
Thus the $X\mapsto X''$ are increasing.
Replacing $C$ with
$D'$ in~\eqref{eqn:CC''}, we get $D'\included D'''$;
but~\eqref{eqn:DD''} implies
$D'''\included D'$; therefore
\begin{equation*}
D'=D'''.
\end{equation*}
Likewise, $C'=C'''$.
Hence $C''=C''''$ and $D''=D''''$, so the $X\mapsto X''$ are
idempotent and are therefore closures.
Moreover, the functions $X\mapsto X'$ give bijections, not
(necessarily) between $\pow A$ and $\pow B$, but between $\{X'\colon
X\included B\}$ and
$\{X'\colon X\included A\}$.  In short, there is a
\defn{Galois correspondence}{} between these two sets.

The closures in the examples above are $X\mapsto X''$ on $\pow A$ when
\begin{enumerate}
  \item
$A$ is a set, $B$ is the same set, and $R$ is $\neq$;
\item
$A$ is $\PFm$, $B$ is $\B^{\PVar}$, and $R$ is the converse of
  $\models$;
\item
$A$ is $G$, $B$ is the set of subgroups of $G$, and $R$ is $\in$;
\item
$A$ is the space, $B$ is the topology (namely, the set of closed
  subsets), and $R$ is $\in$.
\end{enumerate}

In field-theory arises the original Galois correspondence.  If $L/K$
is a finite normal separable extension of fields, then the fields $F$
such that $K\included F\included L$ are in bijection with the
subgroups of $\Aut{L/K}$.  This correspondence arises as above in case
$A$ is $L$, and $B$ is $\Aut{L/K}$, and
\begin{equation*}
  R=\{(x,\sigma)\in L\times\Aut{L/K}\colon x^{\sigma}=x\}.
\end{equation*}

\chapter{Definable sets}\label{app:definable-sets}


To define the interpretations of formulas in a structure recursively,
we start out as in \S\ref{sect:atomic} with the interpretations of
atomic formulas.  We define the interpretations of negations and
conjunctions as in \S\ref{sect:open}.  To deal with the existential
quantifier, we might proceed as follows.

If $I$ is a finite subset of $\vnn$, and if
$\{i\colon \varble_i\in\fv\phi\}\included I$, let us say that $\phi$
is $I$-ary.  In this case, suppose the interpretation $\phi^{\str A}$
has been defined as a subset of $A^I$.  If $j\in\vnn$, let $\pi_j^I$
be the function
\begin{equation*}
  (x_i\colon i\in I)\longmapsto (x_i\colon i\in I\setminus\{j\})
\end{equation*}
from $A^I$ to
$A^{I\setminus\{j\}}$.
Then we can define
\begin{equation*}
  \Exists {\varble_j}\phi^{\str A}=(\pi_j^I)\setimb{\phi^{\str A}}.
\end{equation*}
Suppose $J$ is another finite subset of $\vnn$, disjoint from $I$.
Then $\phi$ is also $I\cup J$-ary, and the interpretation of $\phi$ as
such is
\begin{equation*}
  \phi^{\str A}\times A^J,
\end{equation*}
which can be understood as the set of functions $h$ on $I\cup J$ such
that $h\restriction I\in \phi^I$ and $h\restriction J\in A^J$.

Without the requirement that $I$ and $J$ be disjoint, suppose
$\alpha\colon J\to I$.  We obtain the function $\alpha^*$ from $A^I$
to $A^J$, namely
\begin{equation*}
  (x_i\colon i\in I)\longmapsto(x_{\alpha(j)}\colon j\in J).
\end{equation*}
In particular, if $J=I\setminus\{j\}$, and $\alpha$ is the inclusion
of this in $I$, then $\alpha^*=\pi^I_j$.

Now suppose again that $I$ and $J$ are disjoint.  If
$I=\{i_0,\dots,i_{m-1}\}$, and $J=\{j_0,\dots,j_{n-1}\}$, then
$\alpha^*\setimb{\phi^{\str A}}$ is the interpretation of the formula
\begin{equation*}
  \Exists{x_{i_0}}\dotsb\Exists{x_{i_{m-1}}}(\phi\land
  x_{j_0}=x_{\alpha(j_0)}\land\dotsb\land x_{j_{n-1}}=x_{\alpha(j_{n-1})}),
\end{equation*}
written more simply as
\begin{equation*}
  \Exists{(x_i\colon i\in I)}(\phi\land\bigwedge_{j\in J}x_j=x_{\alpha(j)}).
\end{equation*}
If $\psi$ is $J$-ary, then $(\alpha^*)\inv\setimb{\psi^{\str A}}$ is
the interpretation of
\begin{equation}\label{eqn:xj}
  \Exists{(x_j\colon j\in J)}(\psi\land\bigwedge_{j\in
  J}x_j=x_{\alpha(j)}). 
\end{equation}
If $I$ and $J$ are not disjoint, then there is still some $K$,
disjoint from each of them, for which there are $\beta$ from $J$ to
$K$, and $\gamma$ from $K$ to $I$, such that
$\alpha=\gamma\circ\beta$.  Then $\alpha^*=\beta^*\circ\gamma^*$, so
$\alpha^*\setimb{\phi^{\str A}}$ and $(\alpha^*)\inv\setimb{\psi^{\str
    A}}$ are still definable sets.

Note that~\eqref{eqn:xj} might be written more simply as
\begin{equation*}
  \phi(x_{\alpha(j)}\colon j\in J).
\end{equation*}
But this is not necessarily the result of substituting $x_{\alpha(j)}$
for $x_j$.


\backmatter
%\bibliographystyle{amsplain}
%\bibliography{../../../../TeX/references.bib}

\def\cprime{$'$}
\providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace}
\providecommand{\MR}{\relax\ifhmode\unskip\space\fi MR }
% \MRhref is called by the amsart/book/proc definition of \MR.
\providecommand{\MRhref}[2]{%
  \href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2}
}
\providecommand{\href}[2]{#2}
\begin{thebibliography}{10}

\bibitem{Aristotle-LI}
Aristotle, \emph{Categories, on interpretation, and prior analytics}, Loeb
  Classical Library, vol. 325, Harvard University Press and William Heinemann
  Ltd, Cambridge, Massachusetts and London, 1973, with an English translation
  by H. P. Cooke and H. Tredennick.

\bibitem{Aristotle-Metaph-LCL-I}
\bysame, \emph{The metaphysics, books i--ix}, Loeb Classical Library, vol. 271,
  Harvard University Press and William Heinemann Ltd., Cambridge,
  Massachusetts, and London, 1980, With an English translation by Hugh
  Tredennick. First printed 1933.

\bibitem{MR0229613}
James Ax, \emph{The elementary theory of finite fields}, Ann. of Math. (2)
  \textbf{88} (1968), 239--271. \MR{MR0229613 (37 \#5187)}

\bibitem{Borovik-Shadows-0.62}
Alexandre~V. Borovik, \emph{Shadows of the truth: Metamathematics of elementary
  mathematics}, \url{http://www.maths.manchester.ac.uk/~avb/MEM.pdf}, August
  25, 2008.

\bibitem{Cantor}
Georg Cantor, \emph{Contributions to the founding of the theory of transfinite
  numbers}, Cosimo Classics, New York, 2007, translated, and provided with an
  introduction and notes, by Philip E. B. Jourdain; originally published 1915.

\bibitem{MR0409165}
C.~C. Chang and H.~J. Keisler, \emph{Model theory}, North-Holland Publishing
  Co., Amsterdam, 1973, Studies in Logic and the Foundations of Mathematics,
  Vol. 73. \MR{MR0409165 (53 \#12927)}

\bibitem{MR0103812}
Chen~Chung Chang, \emph{On unions of chains of models}, Proc. Amer. Math. Soc.
  \textbf{10} (1959), 120--127. \MR{MR0103812 (21 \#2576)}

\bibitem{MR18:631a}
Alonzo Church, \emph{Introduction to mathematical logic. {V}ol. {I}}, Princeton
  University Press, Princeton, N. J., 1956. \MR{18,631a}

\bibitem{C-NL}
R.~G. Collingwood, \emph{The new {L}eviathan, or {M}an, society, civilization,
  and barbarism}, Oxford, 2000, revised edition, edited and introduced by David
  Boucher.

\bibitem{MR0159773}
Richard Dedekind, \emph{Essays on the theory of numbers. {I}: {C}ontinuity and
  irrational numbers. {II}: {T}he nature and meaning of numbers}, authorized
  translation by Wooster Woodruff Beman, Dover Publications Inc., New York,
  1963. \MR{MR0159773 (28 \#2989)}

\bibitem{Redhouse-Eng-Tur}
Robert~Avery et~al. (ed.), \emph{{\.I}ngilizce--{T}{\"u}rk{\c c}e {R}edhouse
  s{\"o}zl{\"u}{\u g}{\"u}}, SEV Matbaac{\i}l{\i}k ve Yay{\i}nc{\i}l{\i}k,
  1974, 33rd printing, 2002.

\bibitem{German}
Roe-Merrill~S. Heffner, \emph{Brief {G}erman grammar}, D. C. Heath and Company,
  Boston, 1931.

\bibitem{MR0033781}
Leon Henkin, \emph{The completeness of the first-order functional calculus}, J.
  Symbolic Logic \textbf{14} (1949), 159--166. \MR{MR0033781 (11,487d)}

\bibitem{MR0120156}
\bysame, \emph{On mathematical induction}, Amer. Math. Monthly \textbf{67}
  (1960), 323--338. \MR{MR0120156 (22 \#10913)}

\bibitem{MR1396852}
\bysame, \emph{The discovery of my completeness proofs}, Bull. Symbolic Logic
  \textbf{2} (1996), no.~2, 127--158. \MR{MR1396852 (97c:03005)}

\bibitem{MR94e:03002}
Wilfrid Hodges, \emph{Model theory}, Encyclopedia of Mathematics and its
  Applications, vol.~42, Cambridge University Press, Cambridge, 1993.
  \MR{94e:03002}

\bibitem{Hodges-Building}
\bysame, \emph{Building models by games}, Dover Publications, Mineola, New
  York, 2006, original publication, 1985. \MR{MR812274 (87h:03045)}

\bibitem{Hume}
David Hume, \emph{A treatise of human nature}, second ed., Oxford, 1978,
  Edited, with an analytical index, by L. A. {S}elby-{B}igge, with text revised
  and variant readings by {P}. {H}. Nidditch.

\bibitem{MR12:397m}
Edmund Landau, \emph{Foundations of analysis. {T}he arithmetic of whole,
  rational, irrational and complex numbers}, third ed., Chelsea Publishing
  Company, New York, N.Y., 1966, translated by F. Steinhardt; first edition
  1951; first German publication, 1929. \MR{12,397m}

\bibitem{LSJ}
Henry~George Liddell and Robert Scott, \emph{A {G}reek-{E}nglish lexicon},
  Clarendon Press, Oxford, 1940, revised and augmented throughout by Sir Henry
  Stuart Jones.

\bibitem{MR0089813}
Jerzy {\L}o{\'s} and Roman Suszko, \emph{On the extending of models ({IV}):
  {I}nfinite sums of models}, Fund. Math. \textbf{44} (1957), 52--60.
  \MR{MR0089813 (19,724c)}

\bibitem{MR1924282}
David Marker, \emph{Model theory: an introduction}, Graduate Texts in
  Mathematics, vol. 217, Springer-Verlag, New York, 2002. \MR{1 924 282}

\bibitem{MR0175782}
Michael Morley, \emph{Categoricity in power}, Trans. Amer. Math. Soc.
  \textbf{114} (1965), 514--538. \MR{MR0175782 (31 \#58)}

\bibitem{OED}
Murray et~al. (eds.), \emph{The compact edition of the {O}xford {E}nglish
  {D}ictionary}, Oxford University Press, 1973.

\bibitem{LatinDili}
Filiz Oktem, \emph{Uygulamal{\i} {L}atin dili}, Sosyal Yay{\i}nlar, Eyl{\"u}l
  1996.

\bibitem{Peano}
Giuseppe Peano, \emph{The principles of arithmetic, presented by a new method},
  From {F}rege to {G}{\"o}del (Jean van Heijenoort, ed.), Harvard University
  Press, 1967, First published 1889.

\bibitem{Shorey}
Plato, \emph{Republic}, Loeb Classical Library, Harvard University Press and
  William Heinemann Ltd., Cambridge, Massachusetts, and London, 1980, with an
  English Translation by Paul Shorey, in two volumes.

\bibitem{Post}
Emil~L. Post, \emph{Introduction to a general theory of elementary
  propositions}, Amer. J. Math. \textbf{43} (1921), no.~3, 163--185.

\bibitem{MR1800596}
Philipp Rothmaler, \emph{Introduction to model theory}, Algebra, Logic and
  Applications, vol.~15, Gordon and Breach Science Publishers, Amsterdam, 2000,
  prepared by Frank Reitmaier, translated and revised from the 1995 German
  original by the author. \MR{MR1800596 (2001h:03002)}

\bibitem{Salinger}
J.~D. Salinger, \emph{Nine stories}, Little, Brown and Company, New York, 2001,
  originally published 1953.

\bibitem{MR1809685}
Joseph~R. Shoenfield, \emph{Mathematical logic}, Association for Symbolic
  Logic, Urbana, IL, 2001, reprint of the 1973 second printing. \MR{MR1809685
  (2001h:03003)}

\bibitem{MR83e:04002}
Robert~R. Stoll, \emph{Set theory and logic}, Dover Publications Inc., New
  York, 1979, corrected reprint of the 1963 edition. \MR{83e:04002}

\bibitem{MR0263601}
Jean van Heijenoort (ed.), \emph{Frege and {G}\"odel. {T}wo fundamental texts
  in mathematical logic}, Harvard University Press, Cambridge, Mass., 1970.
  \MR{MR0263601 (41 \#8202)}

\bibitem{Zagier}
Don Zagier, \emph{Problems posed at the {S}t {A}ndrews {C}olloquium, 1996},
  \url{http://www-groups.dcs.st-and.ac.uk/~john/Zagier/Problems.html}, accessed
  September 16, 2008.

\end{thebibliography}


\printindex

%\chapter*{}
\mbox{}
\vfill
\markboth{}{}
\begin{quotation}
  ``You're just being logical,'' Teddy said to him impassively.

``I'm just being what?'' Nicholson asked, with a little excess of
  politeness. 

``Logical.  You're just giving me a regular, intelligent answer,''
  Teddy said.  ``I was trying to help you.  You asked me how I get out
  of the finite dimensions when I feel like it.  I certainly don't use
  logic when I do it.  Logic's the first thing you have to get rid
  of.''

Nicholson removed a flake of tobacco from his tongue with his fingers.

``You know Adam?'' Teddy asked him.

``Do I know who?''

``Adam.  In the Bible.''

Nicholson smiled.  ``Not personally,'' he said drily.

Teddy hesitated.  ``Don't be angry with me,'' he said.  ``You asked me
a question, and I'm---''

``I'm not \emph{ang}ry with you, for heaven's sake.''

``Okay,'' Teddy said.  He was sitting back in his chair, but his head
was turned toward Nicholson.  ``You know that apple Adam ate in the
Garden of Eden, referred to in the Bible?'' he asked.  ``You know what
was in that apple?  Logic.  Logic and intellectual stuff.  That was all
that was in it.  So---this is my point---what you have to do is vomit
it up if you want to see things as they really are.  I mean if you
vomit it up, then you won't have any more trouble with blocks of wood
and stuff.  You won't see everything stopping \emph{off} all the
time.  And you'll know what your arm really is, if you're interested.
Do you know what I mean?  Do you follow me?''

%\mbox{}

\mbox{}\hfill ---J. D. Salinger, ``Teddy'' \cite[pp.~290f.]{Salinger}
\end{quotation}

\vfill
\vfill

%\layout

\end{document}






