--- /dev/null
+\xcalways\section{Cryptographic background}\x
+
+\xcalways\subsection{Groups}\x
+
+\begin{slide}
+ \resetseq
+ \head{Groups \seq: definition}
+ \topic{definitions}
+
+ A \emph{group} is a set $G$ of objects, and a binary operation $\op$, with
+ the following properties.
+ \begin{description}
+ \item [Closure] If $a$ and $b$ are elements of $G$, then so is $a \op b$.
+ \item [Associativity] For any $a$, $b$ and $c$ in $G$,
+ \[ a \op (b \op c) = (a \op b) \op c. \]
+ \item [Identity] There exists an element $e$ so that, for any $a$ in $G$,
+ \[ a \op e = a = e \op a. \]
+ \item [Inverses] For any $a$ in $G$, there exists a $b$ so that
+ \[ a \op b = e = b \op a. \]
+ \end{description}
+\end{slide}
+
+\begin{slide}
+ \head{Groups \seq: abelian groups}
+
+ A group $(G, \op)$ is \emph{commutative}, or \emph{abelian}, if it has the
+ following additional property.
+ \begin{description}
+ \item [Commutativity] For any $a$ and $b$ in $G$,
+ \[ a \op b = b \op a. \]
+ \end{description}
+ We usually write the operator of an abelian group using some familiar
+ symbol, like $+$ or $\times$.
+\end{slide}
+
+\begin{slide}
+ \head{Groups \seq: notation}
+
+ Let $(G, +)$ be a group written additively. For $A, B \in G$, we do the
+ obvious things and write $0_G$, or just $0$, for the identity, and $-A$ for
+ the inverse of $A$; we also write
+ \[ A - B \equiv A + (- B) \qquad
+ 2 A \equiv A + A \qquad
+ n A \equiv \underbrace{A + A + \cdots + A}_\text{$n$ times}.
+ \]
+ Similarly, if $(H, \times)$ is a group written multiplicatively, and
+ $\alpha, \beta \in H$, we do the obvious things and write $1_H$, or just
+ $1$, for the identity, and $\alpha^{-1}$ for the inverse of $\alpha$; we
+ also write
+ \[ \alpha \beta \equiv \alpha \times \beta \qquad
+ \alpha/\beta \equiv \alpha \times \beta^{-1} \qquad
+ \alpha^2 \equiv \alpha \times \alpha \qquad
+ \alpha^n \equiv \underbrace{\alpha \times \alpha \times \cdots
+ \alpha}_\text{$n$ times}.
+ \]
+ The number of elements of a group $G$ is written $|G|$ (or sometimes
+ $\#G$), and is called the \emph{order} of $G$.
+\end{slide}
+
+\begin{slide}
+ \head{Groups \seq: cyclic groups and discrete logs}
+ \topic{cyclic groups and discrete logs}
+
+ A \emph{cyclic} group $(G, +)$ consists only of elements $n P$ for a single
+ $P$ and varying $n \in \Z$. We call $P$ a \emph{generator} of the group.
+
+ Cyclic groups are always abelian.
+
+ If $G$ is finite, then for any $A \in G$, there is a \emph{unique} $a \in
+ \Nupto{|G|}$ for which $A = a P$. We call $a$ the \emph{discrete
+ logarithm} of $A$ to the base $P$.
+
+ This makes more sense if you consider a multiplicative group $(H, \times)$.
+ Then there's a $\gamma \in H$, and for any $\alpha$, a unique $a \in
+ \Nupto{|H|}$ such that $\alpha = \gamma^a$.
+
+ Computing discrete logs seems to be difficult in some kinds of groups.
+\end{slide}
+
+\begin{slide}
+ \head{Groups \seq: Diffie-Hellman}
+ \topic{Diffie-Hellman}
+
+ Fix some cyclic group $(G, +)$, with generator $P$.
+ \begin{protocol}
+ Alice & Bob \\[\medskipamount]
+ $a \getsr \Nupto{|G|}$; & $b \getsr \Nupto{|G|}$; \\
+ $A \gets a P$; & $B \gets b P$; \\
+ \send{->}{A}
+ \send{<-}{B}
+ $Z \gets a B$; & $Z \gets b A$;
+ \end{protocol}
+ This works because
+ \[ Z = a B = a (b P) = (a b) P = (b a) P = b (a P) = b A. \]
+ Computing $a b P$ from $a P$ and $b P$ seems hard. This is the
+ (computational) \emph{Diffie-Hellman} problem.
+
+ The best way we know is to work out $a$ or $b$ -- i.e., extract a discrete
+ logarithm.
+\end{slide}
+
+\begin{slide}
+ \head{Groups \seq: ElGamal encryption}
+ \topic{ElGamal encryption}
+
+ Suppose $H\colon G \to \Bin^n$ is a secure hash function. Alice has a
+ private key~$a \in \Nupto{|G|}$. Bob knows her public key~$A = a P$. He
+ has an $n$-bit message $m$ he wants to send her.
+ \begin{enumerate}
+ \item He chooses $r \inr \Nupto{|G|}$.
+ \item He computes $R = r P$.
+ \item He computes $Z = r A$.
+ \item He sends Alice the pair $(R, m \xor H(Z))$.
+ \end{enumerate}
+ Alice can decrypt because
+ \[ a R = (a r) P = r A = Z. \]
+\end{slide}
+
+\begin{slide}
+ \head{Groups \seq: Schnorr groups}
+ \topic{examples}
+
+ Let $p$ and $q$ be prime, with $p = q f + 1$. The residues modulo $p$ form
+ a \emph{finite field} $\F_p$. The nonzero elements form a cyclic group
+ $\F_p^*$ under multiplication mod~$p$. Let $\eta$ be a generator of
+ $\F_p^*$.
+
+ Set $\gamma = \eta^f$. Then
+ \[ G = \langle \gamma \rangle = \{\, \gamma^n \mid 0 \le n < q \,\} \]
+ is a \emph{Schnorr group} of $q$ elements. $G$ is cyclic, with generator
+ $\gamma$.
+
+ In practice, you don't need to find $\eta$. Pick $\alpha \in \F_p^*$
+ arbitrarily (2 is a good first choice); if $\alpha^f \ne 1$ then use
+ $\gamma = \alpha^f$.
+\end{slide}
+
+\begin{slide}
+ \head{Groups \seq: elliptic curves}
+
+ Let $q = p^f$ be a prime power. Then there is a finite field $\F_q$ with
+ $q$ elements.
+
+ Let $E$ be an \emph{elliptic curve} over $\F_q$:
+ \[ E(\F_q) = \{\, (x, y) \in \F_q^2 \mid y^2 = x^3 + a x + b \,\} \]
+ \begin{tabularx}{\linewidth}{@{}Xl}
+ \hrule height 0pt
+ The points of $E(\F_q)$ form a group with a peculiar kind of addition
+ (pictured right).
+ \parskip=1ex
+
+ If $\#E(\F_q)$ has a large prime factor then we can
+ use a cyclic subgroup of $E(\F_q)$. The details are complicated\dots
+ &
+ \vtop{\hrule height 0pt \hbox{
+ \ifpdf
+ \includegraphics[scale = 0.6]{ecc-0.pdf}
+ \else
+ \includegraphics[scale = 0.6]{ecc.0}
+ \fi
+ }}
+ \end{tabularx}
+\end{slide}
+
+
+\xcalways\subsection{Bilinear maps}\x
+
+\begin{slide}
+ \resetseq
+ \head{Bilinear maps \seq: definition}
+ \topic{definition}
+
+ Let $(G, +)$, $(G', +)$ and $(G_T, \times)$ be three cyclic groups with the
+ same order. Let $P$ and $P'$ generate $G$ and $G'$ respectively.
+
+ A map $\hat{e}\colon G \times G' \to G_T$ is \emph{bilinear} if
+ \begin{itemize}
+ \item for any $R \in G$ and $S', T' \in G'$,
+ \[ \hat{e}(R, S' + T') = \hat{e}(R, S') \, \hat{e}(R, T'), \]
+ and
+ \item for any $R, S \in G$ and $T' \in G'$,
+ \[ \hat{e}(R + S, T') = \hat{e}(R, T') \, \hat{e}(S, T'). \]
+ \end{itemize}
+ We say that $\hat{e}$ is \emph{non-degenerate} if
+ \[ \hat{e}(P, P') \ne 1. \]
+\end{slide}
+
+\begin{slide}
+ \head{Bilinear maps \seq: properties}
+ \topic{properties}
+
+ Define
+ \[ \gamma = \hat{e}(P, P'). \]
+ Then $\gamma$ generates $G_T$.
+
+ For any $a$ and $b$:
+ \[ \hat{e}(a P, b P') = \gamma^{a b}. \]
+ If $\hat{e}$ is easy to compute then this is like a Diffie-Hellman
+ computation combined with an isomorphism to a different group.
+\end{slide}
+
+\begin{slide}
+ \head{Bilinear maps \seq: the Bilinear Diffie-Hellman problem}
+ \topic{bilinear Diffie-Hellman problem}
+
+ The \emph{bilinear} Diffie-Hellman problem is this: given
+ \begin{itemize}
+ \item $a P$ and $b P$ in $G$, and
+ \item $a P'$ and $c P'$ in $G'$,
+ \end{itemize}
+ compute
+ \[ \zeta = \gamma^{abc}. \]
+
+ This can be done if the Diffie-Hellman problem can be solved in \emph{any}
+ of $G$, $G'$ or $G_T$.
+ \begin{itemize}
+ \item If DHP easy in $G$, then compute $\zeta = \hat{e}(a b P, c P')$.
+ \item If DHP easy in $G'$, then compute $\zeta = \hat{e}(b P, a c P')$.
+ \item If DHP easy in $G_T$, then compute $\alpha = \hat{e}(a P, P') =
+ \gamma^a$, $\beta = \hat{e}(b P, c P') = \gamma^{b c}$ and $\zeta$ from
+ $\alpha$ and $\beta$.
+ \end{itemize}
+\end{slide}
+
+\begin{slide}
+ \head{Bilinear maps \seq: Boneh-Franklin identity-based encryption}
+ \topic{Boneh-Franklin identity-based encryption}
+
+ We need two hashes: $H \colon G_T \to \Bin^n$ and $H_\text{id} \colon
+ \Bin^* \to G'$.
+
+ There is a \emph{trusted third party}. It has a private key $t \inr
+ \Nupto{|G|}$ and a public key $T = t P$.
+
+ Alice's public key is $A = H_\text{id}(\texttt{Alice})$. Her private key
+ is $K_A = t A$.
+
+ Bob wants to send Alice a message $m \in \Bin^n$.
+ \begin{enumerate}
+ \item He chooses $r \inr \Nupto{|G|}$.
+ \item He computes $R = r P$.
+ \item He computes $\psi = \hat{e}(T, A)^r$.
+ \item He sends Alice the pair $(R, m \xor H(\psi))$.
+ \end{enumerate}
+ Alice can decrypt because
+ \[ \hat{e}(R, K_A) = \hat{e}(r P, t A)
+ = \hat{e}(P, A)^{rt}
+ = \hat{e}(t P, A)^r
+ = \hat{e}(T, A)^r
+ = \psi. \]
+\end{slide}
+
+\begin{slide}
+ \head{Bilinear maps \seq: examples}
+ \topic{examples}
+
+ The major examples of bilinear maps are the \emph{Weil} and \emph{Tate
+ pairings} on torsion groups of elliptic curves.
+\end{slide}
+
+
+\xcalways\subsection{Zero-knowledge}\x
+
+\begin{slide}
+ \resetseq
+ \head{Zero-knowledge \seq: interactive proofs}
+ \topic{interactive proofs}
+
+ Prover and verifier exchange messages. Verifier decides whether to
+ \emph{accept} or \emph{reject} the proof.
+
+ An interactive proof system must have the following properties.
+ \begin{description}
+ \item [Completeness] If the prover is honest, the verifier (almost always)
+ accepts.
+ \item [Soundness] If the prover is \emph{dishonest}, the verifier
+ (sometimes) rejects.
+ \end{description}
+ Repeat protocol several times to reduce soundness error.
+\end{slide}
+
+\begin{slide}
+ \head{Zero-knowledge \seq: simulation}
+ \topic{simulation}
+
+ We have a prover $P$ and a verifier $V$. Write
+ \[ x \gets V^P() \]
+ to mean that $V$ outputs $x$ after interacting with $P$.
+
+ Suppose that there is a simulator~$S$ so that, for all $y$,
+ \[ |\Pr[x \gets V^P() : x = y] - \Pr[x \gets S() : x = y]| \le \epsilon. \]
+ If $\epsilon$ is small, then $V$ is unlikely to be able to persuade anyone
+ that he learned anything from $P$.
+
+ in particular, if $\epsilon = 0$ then we say that the proof has
+ \emph{perfect zero knowledge}.
+\end{slide}
+
+\begin{slide}
+ \head{Zero-knowledge \seq: an example}
+ \topic{example}
+
+ Prover $P$ knows that $\alpha = \beta^2 \in \Z/n\Z$ for some composite $n$.
+ \begin{protocol}
+ Prover $P$ & Verifier $V$ \\[\medskipamount]
+ $\gamma \getsr \Z/n\Z$; \\
+ \send{->}{\kappa = \gamma^2 \alpha}
+ \send{<-}{c \inr \Bin}
+ \send{->}{\rho = \gamma \beta^c}
+ & Check: $\kappa = \rho^2 \alpha^{1 - c}$
+ \end{protocol}
+
+ \begin{itemize}
+ \item Completeness:
+ \[ \rho \alpha^{1 - c} = (\gamma \beta^c)^2 \alpha^{1 - c}
+ = \gamma^2 \beta^{2c} \alpha^{1 - c}
+ = \gamma^2 \alpha^c \alpha^{1 - c}
+ = \gamma^2 \alpha
+ = \kappa.
+ \]
+ \item Soundness: if $P^*$ convinces $V$ with probability $p > \frac{1}{2}$
+ then we can, in theory, run $P^*$ with (say) $c = 0$ and get $\gamma$.
+ We then \emph{rewind} $P^*$, give it $c = 1$, and get $\gamma \beta$,
+ from which we compute $\beta$. This works with probability at least $2
+ (p - \frac{1}{2})$.
+ \end{itemize}
+\end{slide}
+
+This statement could use some proof.
+
+Firstly, consider a simple abstract game. There is a secret table, with two
+columns and $r$ rows. Exactly $n$ of the entries in the table contain $1$;
+the remaining entries contain zero. We have to pick a row, and you win if
+both entries in that row contain $1$.
+
+Clearly, if $n \le r$, it's possible to arrange the ones so that each is in a
+different row. If we're playing against someone unscrupulous, we are
+guaranteed to lose.
+
+On the other hand, if $n > r$, there must be a winning row, because there are
+too many ones to fit otherwise. In fact, there must be at least $n - r$
+winning rows. So our probability of winning must be at least $(n - r)/r$.
+
+Apply this to the problem of our dishonest prover. The `table''s rows are
+indexed by all the possible inputs to the prover -- the value $\alpha$ and
+its own internal coin tosses. (We only consider provers running in bounded
+time, so these are finite.) The columns are indexed by our coin $c$. We
+know that $2 r p$ of the entries contain $1$. Hence, the probability that we
+win is at least
+\[ \frac{2 r p - r}{r} = 2 \biggl( p - \frac{1}{2} \biggr). \]
+
+\begin{slide}
+ \head{Zero-knowledge \seq: an example (cont.)}
+
+ We now show zero-knowledge, by describing a simulator.
+ \begin{enumerate}
+ \item Simulator chooses $\rho \inr \Z/n\Z$ and $b \inr \Bin$ at random. It
+ runs $V$ and sends it $\kappa = \rho \alpha^{1 - b}$.
+ \item Verifier runs, returns bit $c$.
+ \item If $b = c$ then simulator sends back $\rho$. If $b \ne c$ then
+ simulator gives up and tries again.
+ \item Verifier (presumably) accepts, because
+ \[ \rho \alpha^{1 - c} = \rho \alpha^{1 - b}. \]
+ \end{enumerate}
+ Simulator fails (and retries) with probability $\frac{1}{2}$, because $b$
+ is uniform and independent of $c$.
+\end{slide}
+
+\begin{slide}
+ \head{Aside on KCDSA}
+ \topic{KCDSA}
+
+ Let $(G, +)$ be a cyclic group of prime order $q$, with generator $P$. Let
+ $H\colon G \to \Bin^t$ and $H'\colon \Bin^* \to \Bin^t$ be cryptographic
+ hash functions.
+
+ Alice's private key is $a \inr \F_q$. Her public key is $A = a P$.
+ Suppose Alice wants to sign a message $m \in \Bin^*$.
+ \begin{enumerate}
+ \item She chooses a random $k \inr \F_q$. She computes $K = k P$, and $r =
+ H(K)$.
+ \item Computes $h = H'(m)$, and $u = (h \xor r) \bmod q$.
+ \item Finally, she computes $s = (k - u)/a$.
+ \end{enumerate}
+ Her signature on $m$ is $\sigma = (r, s)$.
+
+ To verify $(r, s)$, Bob can compute $h = H'(m)$; he checks that
+ \[ r = H(s A + u P). \]
+ Note that $s a = k - u$, so $s a + u = k$. Hence $s A + u P = (s
+ a + u) P = k P = K$.
+\end{slide}
+
+\begin{slide}
+ \head{Zero-knowledge \seq: KCDSA as identification protocol}
+
+ As before, let $(G, +)$ be a cyclic group, generated by $P$, of prime order
+ $q$, and let $H\colon G \to \Bin^t$ be a cryptographic hash function.
+ \begin{protocol}
+ Alice & Bob \\
+ (private key $a \inr \F_q$) & (public key $A = a P$) \\[\medskipamount]
+ $k \getsr \F_q$; $K \gets k P$; \\
+ \send{->}{r = H(K)}
+ \send{<-}{m \inr \Bin^{t}}
+ $u \gets (r \xor m) \bmod q$; & $u \gets (r \xor m) \bmod q$; \\
+ \send{->}{s = (k - u)/x}
+ & Check: $r = H(s A + u P)$
+ \end{protocol}
+\end{slide}
+
+\begin{slide}
+ \head{Zero-knowledge \seq: KCDSA as identification protocol (cont.)}
+
+ \begin{itemize}
+ \item Completeness: as for the KCDSA signature scheme.
+ \item Soundness: suppose some prover manages to impersonate Alice with
+ probability $\epsilon > 2^{-t}$. We choose some $m$ and send it to the
+ prover, getting $s$. Now we \emph{rewind} the prover and send it some
+ $m' \ne m$. With probability at least $\epsilon(\epsilon - 2^{-t})$, we
+ expect it to give us a valid $s'$. Let $u' = (m' \xor r) \bmod q$.
+ \begin{itemize}
+ \item If $s A + u P \ne s' A + u' P$ then we have a hash collision. So
+ assume $s a + u = s' a + u'$.
+ \item We chose $m \ne m'$, so $u = r \xor m \ne r \xor m' = u'$. We'd
+ notice if $a = 0$, so $s \ne s'$.
+ \item Therefore, we can derive
+ \[ a = \frac{u' - u}{s - s'}. \]
+ \end{itemize}
+ So our fake prover can compute discrete logs.
+ \end{itemize}
+\end{slide}
+It's worth proving the probability given above. I use the approach of
+K\"asper, Laur and Lipmaa [KLL06].
+
+Let's consider first a simple setting. Let $U$ and $V$ be two finite sets,
+and let $G \subseteq U \times V$ be a set of `good pairs'. Suppose that $|G|
+= \epsilon |U| |V|$, i.e.,
+\[ \Pr[(u, v) \getsr U \times V : (u, v) \in G] = \epsilon. \]
+We now perform the following simple experiment. Choose $u$ and $v$ uniformly
+at random from $U$ and $V$ respectively. If $(u, v) \notin G$, then give up.
+Otherwise, choose $v' \inr V$. If $(u, v') \in G$ and $v' \ne v$ then we
+win. What's the probability that we win?
+
+Some more notation would be handy. For each $u \in U$, let $G_u = \{\, v \in
+V \mid (u, v) \in G \,\}$, and $n_u = |G_u|$. Then
+\[ |G| = \sum_{u \in U} n_u. \]
+Suppose we're given $(u, v) \inr G$, and $v' \inr V$. We want $\Pr[v' \in
+G_u \setminus \{v\}]$. Well,
+\begin{eqnarray*}[rl]
+ \Pr[v \in G_u]
+ & = \sum_{u^* \in U} \Pr[v' \in G_{u^*} \setminus \{v\}] \Pr[u = u^*] \\
+ & \ge \sum_{u^* \in U} \frac{n_{u^*} - 1}{|V|} \frac{n_{u^*}}{|G|} \\
+ & = \sum_{u^* \in U} \frac{n_{u^*}^2 - n_{u^*}}{|V| |G|}
+\end{eqnarray*}
+
+This is unpleasant, but the following lemma will sort us out.
+\begin{lemma}
+ Let $s_0$, $s_1$, \dots, $s_{n-1}$ be given, such that $\sum_{0 \le i < n}
+ s_i = t$. Then $\sum_{0 \le i < n} s^2$ is minimum when $s_i = t/n$ for
+ all $0 \le i < n$.
+\end{lemma}
+\begin{proof}
+ The proof is by induction. If $n = 1$, the lemma is obviously true. Let
+ $s_i$ be given for $0 < i < n + 1$ be given, so that $\sum_i s_i = t$; we
+ claim that $\sum_i s_i^2$ is minimum when $s_i = t/(n + 1)$ for all $i$.
+
+ Without loss of generality, suppose that $s_n \ge s_i$ for $0 \le i < n$.
+ Using the induction hypothesis, $\sum_{0\le i<n} s_i^2$ is minimum when
+ $s_i = (t - s_n)/n$ for all $i < n$. Let $s = s_n$. Then
+ \begin{eqnarray*}[rl]
+ \sum_{0 \le i \le n} s_i^2
+ & = s^2 + \sum_{0 \le i < n} \biggl( \frac{t - s}{n} \biggr)^2\\
+ & = s^2 + n \biggl( \frac{t^2 - 2 s t + s^2}{n^2} \biggr) \\
+ & = \frac{1}{n} \bigl(t^2 - 2 s t + s^2 (1 + n)\bigr).
+ \end{eqnarray*}
+ This is minimum when
+ \[ \frac{\d}{\d s} \bigl( t^2 - 2 s t + s^2 (1 + n) \bigr) =
+ 2 s (1 + n) - 2 t = 0 \]
+ i.e., when
+ \[ s = \frac{t}{n + 1} \]
+ as claimed.
+\end{proof}
+
+Recall that $\sum_{u^*} n_{u^*} = |G|$. The lemma tells us that
+\[ \sum_{u^* \in U} n_{u^*}^2 \ge \frac{|G|^2}{|U|}. \]
+Returning to our calculation, then:
+\[ \Pr[v \in G_u]
+ \ge \sum_{u^* \in U} \frac{n_{u^*}^2 - n_{u^*}}{|V| |G|} \\
+ \ge \frac{|G|}{|U| |V|} - \frac{1}{|V|}\\
+ = \epsilon - \frac{1}{|V|}.
+\]
+
+Now we have to apply this to the KCDSA impersonation problem. Let $U$ be the
+set of choices of public key $X$, adversary's random decisions, and the
+answers to the adversary's random-oracle queries. This is finite, since the
+running time of the adversary is bounded. Let $V = \Bin^t$ be the space of
+our possible challenges. Let $G$ be the set of `good' pairs, where the
+adversary successfully outputs a forgery. The probability that the adversary
+impersonates the first time is $\epsilon$. If this happens, we rewind and
+give a different response. Since we hold everything else fixed, the
+adversary's $r$ is the same. We choose a new $m' \ne m$ and give it to the
+adversary. Finally, the analysis we did above tells us that the probability
+that the adversary successfully forges again is at least $\epsilon - 2^{-t}$.
+The stated result follows.
+
+\begin{slide}
+ \head{Zero-knowledge \seq: KCDSA as identification protocol (cont.)}
+
+ \begin{itemize}
+ \item Zero-knowledge: we need a simulator. We work in the random oracle
+ model. The simulator is allowed to make up bits of the random oracle.
+ \begin{enumerate}
+ \item Simulator chooses $r \inr \Bin^t$ at random, and gives $r$ to the
+ verifier.
+ \item Simulator runs verifier, and is given $m$.
+ \item Simulator computes $u = (r \xor m) \bmod q$, and chooses $s \inr
+ \F_q$ at random.
+ \item If verifier has already queried $H(s A + u X)$, then we abort and
+ try again. Otherwise, it arranges that $H(s A + u X) = r$.
+ \end{enumerate}
+ If the verifier makes $n$ random oracle queries, the failure only occurs
+ with probability $n/q$.
+ \end{itemize}
+\end{slide}
+
+
+\begin{slide}
+ \head{Zero-knowledge \seq: complexity theory and ZK}
+
+ A \emph{language} $L \subseteq \Bin^*$ is a set of bit-strings.
+
+ A \emph{polynomial-time Turing machine} is a machine for which there is a
+ polynomial $p(\cdot)$ such that, on any input $x \in \Bin^*$, it always
+ halts within $p(|x|)$ steps.
+
+ A language $L \in \mathbf{P}$ if there is a polynomial-time Turing machine
+ $M$ such that $M(x) = 1$ iff $x \in L$.
+
+ A language $L \in \mathbf{NP}$ if there is a language $L' \in \mathbf{P}$
+ and, for any $x$, a $w$ such that $|w| \le \poly(|x|)$ and $(x, w) \in L'$
+ iff $x \in L$. The $w$ is a \emph{witness}.
+
+ Zero-knowledge proof systems exist for all languages in $\mathbf{NP}$.
+ Example statements:
+ \begin{itemize}
+ \item Two ciphertexts are encryptions of the same plaintext.
+ \item A ciphertext is the encryption of a given plaintext.
+ \end{itemize}
+\end{slide}
+
+\xcalways\subsection{Key-exchange security}\x
+
+\begin{slide}
+ \resetseq
+ \head{Key-exchange \seq: introduction}
+ \topic{introduction}
+
+ Key-exchange seems very difficult. Many protocols have subtle weaknesses.
+ It would be nice to `prove' security. There are two approaches:
+ \begin{enumerate}
+ \item Express messages and beliefs of the participants in a formal logic,
+ and use inference rules to show that the participants believe the right
+ things at the end [BAN88, Jac97]. This works if the inference rules are
+ correct, and the protocol is translated correctly.
+ \item Define a model for communication and a notion of security, and bound
+ the probability that any adversary within the model can defeat the
+ security of the protocol [BR93, BR95, BJM97, BM97, BCK98, Sho99, CK01].
+ OK if the security notion is right, and the adversary considered is
+ sufficiently powerful.
+ \end{enumerate}
+ Second approach can give useful, quantitative results, if we believe the
+ model is good.
+\end{slide}
+
+\begin{slide}
+ \head{Key-exchange \seq: introduction}
+
+ We describe the model of Canetti and Krawczyk [CK01] and their notion of
+ \emph{SK-security}.
+ \begin{itemize}
+ \item Earlier models were either too restrictive ([BR93] \emph{matching
+ conversations}, [Sho99] termination requirement) or flawed ([BR93]
+ again).
+ \item\relax [CK01] shows that SK-security suffices for implementing
+ \emph{secure channels} in a plausible model.
+ \item\relax [CK02] shows that SK-security is equivalent to a (slightly
+ relaxed) notion of security (`implementation of ideal functionality') in
+ Canetti's \emph{Universal Composition} (UC) framework [Can01], and can be
+ used to implement UC-secure channels.
+ \end{itemize}
+\end{slide}
+
+\begin{slide}
+ \head{Key-exchange \seq: model -- basics}
+ \topic{model}
+
+ There are two kinds of \emph{participants}:
+ \begin{itemize}
+ \item a number of \emph{parties}, $P_0$, $P_1$, \ldots, $P_{n-1}$, and
+ \item an \emph{adversary}.
+ \end{itemize}
+ A protocol $\Pi$ defines an \emph{initialization function} $I$, which
+ outputs:
+ \begin{itemize}
+ \item a common input $c$ for all participants (all parties and the
+ adversary), and
+ \item a private input $x_i$ for party $P_i$.
+ \end{itemize}
+\end{slide}
+
+\begin{slide}
+ \head{Key-exchange \seq: model -- sessions}
+
+ Protocol execution occurs in \emph{sessions}. A session $S = (P_i, P_j,
+ s)$ specifies
+ \begin{itemize}
+ \item an \emph{owner} $P_i$ -- the session knows $P_i$'s secret input
+ $x_i$;
+ \item a \emph{partner} $P_j$; and
+ \item a \emph{session-id} $s \in \Bin^{\ell_S}$.
+ \end{itemize}
+ The adversary can create sessions at will, by specifying $(i, j, s)$.
+
+ Restriction: sessions must be \emph{distinct}.
+
+ A pair of sessions $S = (P_i, P_j, s)$ and $T = (P_j, P_i, s)$ are said to
+ be \emph{matching}.
+\end{slide}
+
+\begin{slide}
+ \head{Key-exchange \seq: model -- communication}
+
+ When a session is created, it may output a message to be sent to its
+ matching session.
+
+ The \emph{adversary} is responsible for delivering all messages.
+
+ When a message is delivered to a session, the session may output
+ another message. It may also \emph{terminate}, either successfully
+ (outputting a session-key $K$ -- \emph{completing}), or unsuccessfully
+ (\emph{aborting}).
+
+ A session which has been created and has not terminated is \emph{running}.
+\end{slide}
+
+\begin{slide}
+ \head{Key-exchange \seq: model -- the adversary}
+
+ As well as creating sessions and delivering messages, the adversary has
+ other capabilities.
+ \begin{itemize}
+ \item It can \emph{expire} a completed session, wiping out any record of
+ its key.
+ \item It can \emph{reveal the state} of a running session.
+ \item It can \emph{reveal the key} of a completed, unexpired session.
+ \item It can \emph{corrupt} a party, learning all of its internal state,
+ including its long-term secret, the states of its running sessions, etc.
+ \end{itemize}
+ A session is \emph{locally exposed} if its state or key has been revealed,
+ or if its owner has been corrupted. A session is \emph{exposed} if it is
+ locally exposed, or it has a matching session which is exposed.
+\end{slide}
+
+\begin{slide}
+ \head{Key-exchange \seq: model -- session life cycle}
+
+ \[ \def\node#1{%
+ \vbox{%
+ \def\\{\small\crcr}%
+ \halign{\hfil\ignorespaces##\unskip\hfil\cr#1\crcr}%
+ }}
+ \begin{xy}
+ \xygraph{
+ *+[F]\node{Create session $(i, j, s)$}
+ :[d]
+ *+[F:<6pt>]\node{\bf Running\\(send message $\mu$)} ="run"
+ [d]
+ *+[F]\node{Deliver message $\mu'$} ="msg"
+ "msg" :[dll]
+ *+[F:<6pt>]\node{\bf Complete\\(output key $K$)}
+ :[d]
+ *+[F]\node{Expire session}
+ :[d]
+ *+[F:<6pt>]\node{\bf Expired}
+ "msg" :[drr]
+ *+[F:<6pt>]\node{\bf Aborted\\(no output)}
+ }
+ \ar @{->} @<1em> "run"; "msg"
+ \ar @{->} @<1em> "msg"; "run"
+ \end{xy} \]
+\end{slide}
+
+\begin{slide}
+ \head{Key-exchange \seq: model -- SK-security}
+
+ We play a game with the adversary. At the beginning, we choose a `hidden
+ bit' $b^* \inr \Bin$.
+
+ The adversary can choose a \emph{challenge session} -- any completed,
+ unexposed session. If $b^* = 1$, we give the adversary the session's key,
+ just as if it revealed it. If $b^* = 0$, we give the adversary a
+ freshly-generated random key.
+
+ The adversary may continue to create sessions, deliver messages, etc., but
+ may not \emph{expose} the challenge session. When it finishes, the
+ adversary outputs a \emph{guess} $b$.
+
+ A key-exchange protocol $\Pi$ is \emph{SK-secure} if no adversary can
+ \begin{itemize}
+ \item with non-negligible probability, cause two matching, unexposed
+ sessions to \emph{accept}, but output different keys, or
+ \item with probability non-negligibly greater than $\frac{1}{2}$, guess the
+ \emph{hidden bit}, i.e., $b = b^*$.
+ \end{itemize}
+\end{slide}
+
+\begin{slide}
+ \head{Key-exchange \seq: deniability}
+ \topic{deniability}
+
+ Many key-exchange protocols use digital signatures. For example:
+ \begin{protocol}
+ Alice & Bob \\
+ (Private key $\id{sk}_A$, public key $\id{pk}_A$) &
+ (Private key $\id{sk}_B$, public key $\id{pk}_B$) \\[\medskipamount]
+ $r_A \getsr \Nupto{|G|}$; & $r_B \getsr \Nupto{|G|}$; \\
+ $R_A \gets r_A P$; & $R_B \gets r_B P$; \\
+ \send{->}{R_A}
+ \send{<-}{R_B}
+ $\sigma_A \gets
+ S(\id{sk}_A, \texttt{Alice}, \texttt{Bob}, s, R_A, R_B)$; &
+ \llap{$\sigma_B \gets
+ S(\id{sk}_B, \texttt{Bob}, \texttt{Alice}, s, R_B, R_A)$;} \\
+ \send{->}{\sigma_A}
+ \send{<-}{\sigma_B}
+ Check: $V(\id{pk}_B, \sigma_B, \texttt{Bob}, \texttt{Alice}, s, R_B,
+ R_A)$
+ \\ &
+ \llap{Check: $V(\id{pk}_A, \sigma_A,
+ \texttt{Alice}, \texttt{Bob}, s, R_A, R_B)$}
+ \end{protocol}
+\end{slide}
+
+\begin{slide}
+ \head{Key-exchange \seq: deniability (cont.)}
+
+ Signatures have the property than anyone can verify them. So we could
+ later prove, to a judge maybe, that Alice and Bob were communicating.
+
+ This is \emph{bad} if you don't want to leave evidence that you were
+ communicating with someone.
+
+ A key-exchange protocol is \emph{deniable} if there's a simulator which
+ produces fake transcripts indistinguishable from real conversations
+ \begin{itemize}
+ \item \emph{without} being given the private keys of the parties involved,
+ and
+ \item even if some of the participants are corrupt.
+ \end{itemize}
+\end{slide}
+
+\endinput
+
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "wrslides"
+%%% End: