\xcalways\section{Message authentication codes}\x
\xcalways\subsection{Definitions and security notions}\x
\begin{slide}
\head{Definition of a MAC}
A MAC is a pair of algorithms $\mathcal{M} = (T, V)$:
\begin{itemize}
\item The \emph{tagging} algorithm $T\colon \{0, 1\}^k \times \{0, 1\}^*
\to \{0, 1\}^L$ is a probabilistic algorithm which, given a key and a
string, returns a \emph{tag}. We write $\tau \in T_K(m)$.
\item The \emph{verification} algorithm $V\colon \{0, 1\}^k \times \{0,
1\}^* \times \{0, 1\}^L \to \{0, 1\}$ is a deterministic algorithm which,
given a key, a message and a tag, returns $1$ if the tag is valid, or $0$
otherwise; i.e., we require that $V_K(m, \tau) = 1 \iff \tau \in T_K(m)$.
\end{itemize}
The basic idea is that it's hard for an adversary to \emph{forge} a tag for
a message it's not seen before.
\end{slide}
\begin{slide}
\topic{informal security notion}
\resetseq
\head{Strong MACs, \seq: informal security notion}
Our standard notion of security for MACs is \emph{strong unforgeability
against chosen message attack}, or SUF-CMA, for short
\cite{Abdalla:2001:DHIES, Bellare:2000:AER}. Let $A$ be an adversary which
is attempting to attack the MAC $\mathcal{M} = (T, V)$.
We play a game with the adversary. We invent a key $K \inr \{0, 1\}^k$.
The adversary \emph{wins} if, after requesting tags for some messages of
its choice, and checking some guesses, it can return a pair $(m, \tau)$
such that:
\begin{itemize}
\item the tag is correct, i.e., $V_K(m, \tau) = 1$; and
\item the tag is not one returned by the adversary's tagging oracle for
that message.
\end{itemize}
\end{slide}
\begin{slide}
\topic{strong MACs}
\head{Strong MACs, \seq: the experiment}
We perform the following experiment with the adversary.
\begin{program}
Experiment $\Expt{suf-cma}{\mathcal{M}}(A)$: \+ \\
$K \getsr \{0, 1\}^k$; \\
$\Xid{T}{list} \gets \emptyset$; \\
$(m, \tau) \gets A^{\id{tag}(\cdot), V_K(\cdot, \cdot)}$; \\
\IF $V_K(m, \tau) \land (m, \tau) \notin \Xid{T}{list}$
\THEN \RETURN $1$; \\
\ELSE \RETURN $0$; \- \\[\smallskipamount]
Oracle $\id{tag}(m)$: \+ \\
$\tau \gets T_K(m)$; \\
$\Xid{T}{list} \gets \Xid{T}{list} \cup \{(m, \tau)\}$; \\
\RETURN $\tau$;
\end{program}
\end{slide}
\begin{slide}
\head{Strong MACs, \seq: wrapping up the notation}
The \emph{success probability} of an adversary $A$ against the MAC
$\mathcal{M}$ in the sense of SUF-CMA is
\[ \Succ{suf-cma}{\mathcal{M}}(A) =
\Pr[\Expt{suf-cma}{\mathcal{M}}(A) = 1]. \]%
The \emph{insecurity} of a MAC $\mathcal{M}$ in the SUF-CMA sense is then
\[ \InSec{suf-cma}(\mathcal{M}; t, q_T, q_V) =
\max_A \Succ{suf-cma}{\mathcal{M}}(A) \]%
where the maximum is taken over all adversaries $A$ running in time $t$ and
making $q_T$ tagging and $q_V$ verification queries.
If $\InSec{suf-cma}(\mathcal{M}; t, q_T, q_V) \le \epsilon$ then we say
that $\mathcal{M}$ is a \emph{$(t, q_T, q_V, \epsilon)$-secure MAC in the
SUF-CMA sense}.
\end{slide}
\begin{slide}
\topic{other notions}
\head{Other security notions for MACs}
There are a number of weaker security notions in use:
\begin{itemize}
\item The definition of a \emph{weak MAC} restricts the adversary from
returning any message with which it queried its tagging oracle. The
strong MAC definition considers this OK, as long as the tag is different
from any returned by the oracle for that message.
\item Some definitions of MACs don't equip the adversary with a
verification oracle. Our definition considers these to be $(t, q_T, 0,
\epsilon)$-secure.
\item You can have a MAC with a bounded domain $\{0, 1\}^L$ rather than
$\{0, 1\}^*$ as shown previously.
\item Further quantification is possible, e.g., counting the total number
of bytes queried, or the maximum size of a tagging query.
\end{itemize}
\end{slide}
\xcalways\subsection{Basic results}\x
\begin{slide}
\topic{PRFs are MACs}
\resetseq
\head{PRFs are MACs, \seq}
If $F_K\colon \{0, 1\}^* \to \{0, 1\}^L$ is a $(t, q, \epsilon)$-secure
PRF, then it's also a $(t', q_T, q_V, \epsilon')$-secure MAC, with $q = q_T
+ q_V + 1$, $t = t' + O(q)$, and $\epsilon' \le \epsilon + (q_V + 1)
2^{-L}$. The constant hidden by the $O(\cdot)$ is small and depends on the
model of computation.
Suppose $A$ can break $F$ used as a MAC in time $t$ and with $q_T$ and
$q_V$ queries to its tagging and verification oracles respectively.
If we can construct an adversary which distinguishes $F_K$ from a random
function using $A$ as an essential component, then we can prove the
result.
\end{slide}
\begin{slide}
\head{PRFs are MACs, \seq: the distinguisher}
\begin{program}
Distinguisher $D^{F(\cdot)}$: \+ \\
$\Xid{T}{list} \gets \emptyset$; \\
$(m, \tau) \gets A^{T_F(\cdot), V_F(\cdot, \cdot)}$; \\
\IF $m \notin \Xid{T}{list} \land \tau = F(m)$
\THEN \RETURN $1$; \\
\ELSE \RETURN $0$; \- \\[\smallskipamount]
Oracle $T_F(m)$: \+ \\
$\Xid{T}{list} \gets \Xid{T}{list} \cup \{m\}$; \\
\RETURN $F(m)$; \- \\[\smallskipamount]
Oracle $V_F(m, \tau)$: \+ \\
\IF $\tau = F(m)$ \THEN \RETURN $1$; \\
\ELSE \RETURN $0$;
\end{program}
\end{slide}
\begin{slide}
\head{PRFs are MACs, \seq: wrapping up}
The distinguisher simulates the tagging and verification oracles for the
MAC forger, using its supplied oracle. If the forger succeeds, then the
distinguisher returns 1; otherwise it returns zero.
The probability that the distinguisher returns 1 given an instance $F_K$ of
the PRF is precisely $\Succ{suf-cma}{F}(A)$.
The probability that it returns 0 given a random function depends on what
$A$ does when it's given a random function. But we know that the
probability of it successfully guessing the MAC for a message for which it
didn't query $T$ can be at most $(q_V + 1) 2^{-L}$. So
\[ \Adv{prf}{F}(D) \ge \Succ{suf-cma}{F}(A) - (q_V + 1) 2^{-L}. \]
Let $q = q_T + q_V + 1$; then counting, rearranging, maximizing yields
\[ \InSec{suf-cma}(F; t, q_T, q_V) \le
\InSec{prf}(F; t + O(q), q) + (q_V + 1)2^{-L}. \]%
\end{slide}
\begin{slide}
\head{PRFs are MACs, \seq: MACs aren't PRFs}
The converse of our result is not true. Suppose $\mathcal{M} = (T, V)$ is
a deterministic MAC. Choose some integer $n$. Then define $\mathcal{M}' =
(T', V')$, as follows:
\[ T'_K(x) = 0^n \cat T_K(x); \qquad
V'_K(x, \tau) = \begin{cases}
1 & if $T'_K(x) = \tau$ \\
0 & otherwise
\end{cases}.
\]
$T'$ is obviously not a PRF: an adversary checking for the string of $n$
zero bits on the output will succeed with advantage $1 - 2^{-qn}$.
However, $\mathcal{M}'$ is a secure MAC. Suppose $A'$ attacks
$\mathcal{M}'$.
\begin{program}
Adversary $A^{T(\cdot), V(\cdot)}$: \+ \\
$(m, \tau') \gets A'^{\id{tag}(\cdot), \id{verify}(\cdot)}$; \\
\PARSE $\tau'$ \AS $n\colon z, \tau$; \\
\RETURN $(m, \tau)$;
\next
Oracle $\id{tag}(m)$: \+ \\
\RETURN $0^n \cat T(m)$; \- \\[\smallskipamount]
Oracle $\id{verify}(m, \tau')$: \+ \\
\PARSE $\tau'$ \AS $n\colon z, \tau$; \\
\IF $z \ne 0^n$ \THEN \RETURN $0$; \\
\ELSE \RETURN $V(m, \tau)$;
\end{program}
\end{slide}
\begin{exercise}
\begin{parenum}
\item Suppose that $F\colon \{0, 1\}^k \times \{0, 1\}^* \to \{0, 1\}^L$ is
a $(t, q, \epsilon)$-secure PRF. Let $T^{(\ell)}_K(x)$ be the leftmost
$\ell$~bits of $F_K(x)$ for $\ell \le L$. Demonstrate the security of
$T^{(\ell)}(\cdot)$ as a MAC.
\item Discuss the merits of truncating MAC tags in practical situations.
\end{parenum}
\answer%
\begin{parenum}
\item The follows exactly the same pattern as the `PRFs are MACs' proof in
the slides: $T^{(\ell)}$ is a $(t, q_T, q_V, \epsilon + (q_V +
1)2^{-\ell})$-secure MAC, where $q_T + q_V = q$.
\item Obviously, truncating tags saves bandwidth. There is a trade-off
between tag size and security, as the $2^{-\ell}$ term shows. Note that
this term represents the probability of the adversary guessing the
correct tag when it's actually attacking a random function, and
therefore, when this occurs, the adversary has one `by accident'.
Including sequence numbers in packets ensures that replay of accidental
forgery (or honest messages) will be detected. Hence, for some
applications, setting $\ell = 32$ or even lower is of no particular harm.
Perhaps more significantly, if the PRF isn't actually as good as it ought
to be, and (say) leaks key material very slowly, then truncating its
output can actually improve security.
\end{parenum}
\end{exercise}
\begin{exercise}
A traditional MAC construction is the \emph{CBC-MAC}: it works like this.
Suppose $F\colon \{0, 1\}^k \times \{0, 1\}^l \to \{0, 1\}^l$ is a PRF.
Split a message~$x$ into $l$-bit blocks $x_0, x_1, \ldots, x_{n-1}$
(applying some sort of padding if you need to). Then we define the CBC-MAC
as $F^{(n)}_K(x)$, where
\[ F^{(1)}_K(x) = F_K(x);
\qquad F^{(i+i)}(x) = F_K(x_i \xor F^{(i)}_K(x)). \]%
In \cite{Bellare:1994:SCB}, Mihir Bellare, Joe Kilian and Phil Rogaway
introduced the world to the concrete-security approach and, almost
incidentally, proved that the CBC-MAC is a PRF (and therefore a MAC) for
any \emph{fixed sized} input.
Show that the CBC-MAC is easily broken if you're allowed to choose messages
of different lengths.
\answer%
Request tags $\tau$ for the message $x = x_0, x_1, \ldots, x_{n-1}$ and
$\tau'$ for $x' = x'_0 \xor \tau, x'_1, \ldots, x'_{n'-1}$. Let $y = x_0,
x_1, \ldots, x_{n-1}, x'_0 , x'_1, \ldots, x'_{n'-1}$. Note that
$F^{(n)}_K(y) = \tau$, and $F^{(n+1)}_K(y) = F_K(x'_0 \xor F^{(n)}_K(x)) =
F^{(1)}_K(x')$. Hence, $F^{(n+n')}_K(y) = \tau'$, and we have a correct
forgery.
\end{exercise}
\begin{slide}
\topic{verification oracles}
\head{Verification oracles}
We can leave verification oracles out of our analysis. This simplifies
analysis, but produces slightly less satisfactory quantitative results.
Suppose that $\mathcal{M} = (T, V)$ is a $(t, q_T, 0, \epsilon)$-secure
MAC. Then, for any $q_V$,
\[ \InSec{suf-cma}(\mathcal{M}; t, q_T, q_V) \le
(q_V + 1)\InSec{suf-cma}(\mathcal{M}; t, q_T, 0). \]%
This bound is \emph{tight}: it's not possible for a general result like
this to do better.
\end{slide}
\begin{proof}
Consider an adversary $A$ which uses $q_V$ verification queries. We assume
the following properties of $A$'s behaviour:
\begin{itemize}
\item No verification query contains a message and a tag for that message
received from the tagging oracle.
\item If a verification query succeeds, the message is not given in a query
to the tagging oracle.
\item Once a verification query succeeds, all subsequent verification
queries also succeed and the adversary returns a correct forgery (e.g.,
by simply repeating the successful query).
\end{itemize}
It's clear that any adversary can be transformed into one which has these
properties and succeeds with probability at least as high.
Let $V$ be the event that at least one verification query succeeds, and let
$S$ be the event that $A$ succeeds. Then
\begin{eqnarray*}[rl]
\Succ{suf-cma}{\mathcal{M}}(A)
&= \Pr[S \mid V] \Pr[V] + \Pr[S \mid \lnot V] \Pr[\lnot V] \\
&= \Pr[V] + \Pr[S \mid \lnot V] \Pr[\lnot V].
\end{eqnarray*}
Now consider these two adversaries:
\begin{program}
Adversary $A'^{T(\cdot), V(\cdot, \cdot)}$: \+ \\
$i \gets 0$; \\
$(m, \tau) \gets A^{T(\cdot), \Xid{V}{sim}(\cdot, \cdot)}$; \\
\RETURN $(m^*, \tau^*)$; \- \\[\smallskipamount]
Oracle $\Xid{V}{sim}(m, \tau)$: \+ \\
$i \gets i + 1$; \\
\IF $i < q_V$ \THEN \RETURN $V(m, \tau)$; \\
$(m^*, \tau^*) \gets (m, \tau)$; \\
\RETURN $1$;
\next
Adversary $Z^{T(\cdot), V(\cdot, \cdot)}$: \+ \\
\\
$(m, \tau) \gets A^{T(\cdot), \Xid{V}{zero}(\cdot, \cdot)}$; \\
\RETURN $(m, \tau)$; \- \\[\smallskipamount]
Oracle $\Xid{V}{zero}(m, \tau)$: \+ \\
\RETURN $0$;
\end{program}
The adversary $A'$ uses $q_V - 1$ verification queries. It ignores the
output of $A$, returning instead $A$'s $q_V$-th verification query. Thus,
by our assumptions on the behaviour of $A$, we have that $A'$ succeeds
precisely whenever one of $A$'s verification queries succeeds. Thus:
\[ \Pr[V] = \Succ{suf-cma}{\mathcal{M}}(A')
\le \InSec{suf-cma}(\mathcal{M}; t, q_T, q_V - 1). \]%
Similarly, the adversary $Z$ succeeds with precisely the same probability
as $A$, given that all of its verification queries failed; i.e.,
\[ \Pr[S \mid \lnot V] \Pr[\lnot V] = \Succ{suf-cma}{\mathcal{M}}(Z)
\le \InSec{suf-cma}(\mathcal{M}; t, q_T, 0). \]%
Because $A$ was chosen arbitrarily, we can maximize:
\begin{eqnarray*}[rl]
\InSec{suf-cma}(\mathcal{M}; t, q_T, q_V)
& \le \InSec{suf-cma}(\mathcal{M}; t, q_T, q_V - 1) +
\InSec{suf-cma}(\mathcal{M}; t, q_T, 0) \\
& \le (q_V + 1)\InSec{suf-cma}(\mathcal{M}; t, q_T, 0)
\end{eqnarray*}
as required.
To show that the bound is tight, consider a random function $F$ used as a
MAC, with $L$-bit output. Then we claim that $\InSec{suf-cma}(F; t, q_T,
q_V) = (q_V + 1)/2^L$. To save typing, let $e_q = \InSec{suf-cma}(F; t,
q_T, q)$. We prove the claim by induction on $q$. Firstly, note that if
$q = 0$ then necessarily $e_q = 1/2^L$. Now suppose that $e_{q-1} =
q/2^L$. We're now allowed an extra query, so rather than returning the
result, we feed it to the verification oracle. If it answers `yes' then we
return it; otherwise we guess randomly from the remaining $2^L - q$
possibilities. Now
\begin{eqnarray*}[rl]
e_q &= e_{q-1} + \frac{1 - e_{q-1}}{2^L - q} \\
&= \frac{q}{2^L} + \frac{2^L - q}{2^L} \cdot \frac{1}{2^L - q} \\
&= \frac{q + 1}{2^L}
\end{eqnarray*}
as claimed.
\end{proof}
\xcalways\subsection{The HMAC construction}\x
\begin{slide}
\resetseq
\head{The HMAC construction \cite{Bellare:1996:KHF}, \seq: motivation}
It ought to be possible to construct a decent MAC using a hash function.
Many attempts have failed, however. For example, these constructions are
weak if used with standard one-pass Merkle-Damg\aa{}rd iterated hashes.
\begin{itemize}
\item Secret prefix: $T_K(m) = H(K \cat m)$. Given $H(K \cat m)$, it's
easy to compute $H(K \cat m \cat p \cat m')$ for a padding string $p$ and
arbitrary suffix $m'$.
\item Secret suffix: $T_K(m) = H(m \cat K)$. Finding a collision $H(m) =
H(m')$ yields $H(m \cat K) = H(m' \cat K)$. We saw earlier that
adversaries which know collisions \emph{exist} even if we don't know how
to describe them.
\end{itemize}
It would be nice to have a construction whose security was provably related
to some plausible property of the underlying hash function.
\end{slide}
\begin{slide}
\head{The HMAC construction, \seq: definition of NMAC}
Let $H\colon \{0, 1\}^* \to \{0, 1\}^k$ be an iterated hash, constructed
from the compression function $F\colon \{0, 1\}^k \times \{0, 1\}^L \to
\{0, 1\}^k$. We define a keyed version of $H$. Let $K \in \{0, 1\}^k$;
then we compute $H_K(x)$ as follows:
\begin{enumerate}
\item Pad and split $x$ into the $L$-bit blocks $x_0$, $x_1$, \ldots,
$x_{n-1}$ as before.
\item Set $I_0 = K$. Let $I_{i+1} = F(I_i \cat x_i)$ for $0 \le i < n$.
\item The result $H_K(x) = I_n$.
\end{enumerate}
The NMAC (nested-MAC) construction requires two independent $k$-bit keys
$K_0$ and $K_1$. The construction itself is simply:
\[ \Xid{T}{NMAC}^H_{K_0, K_1}(x) = H_{K_0}(H_{K_1}(x)). \]
NMAC is deterministic, so verification consists of computing the tag and
comparing.
\end{slide}
\begin{slide}
\head{The HMAC construction, \seq: security of NMAC}
Consider a function $F\colon \{0, 1\}^k \times \{0, 1\}^* \to \{0, 1\}^k$.
We say that $F$ is \emph{$(t, q, \epsilon)$-weakly collision resistant} if,
for any adversary $A$ constrained to run in time $t$ and permitted $q$
oracle queries,
\[ \Pr[K \getsr \{0, 1\}^k;
(x, y) \gets A^{F_K(\cdot)} :
x \ne y \land F_K(x) = F_K(y)] \le \epsilon \]%
If $H_K$ is a $(t, q_T, q_V, \epsilon)$-secure MAC on $k$-bit messages, and
moreover $(t, q_T + q_V, \epsilon')$-weakly collision resistant, then
$\Xid{T}{NMAC}^H$ is a $(t, q_T, q_V, \epsilon + \epsilon')$-secure MAC.
\end{slide}
\begin{slide}
\head{The HMAC construction, \seq: NMAC security proof}
Let $A$ be an adversary which forges a $\Xid{T}{NMAC}^H$ tag in time $t$,
using $q_T$ tagging queries and $q_V$ verification queries with probability
$\epsilon$. We construct an adversary $A'$ which forges an $H$-tag for a
$k$-bit message in essentially the same time.
\begin{program}
Adversary $A'^{T(\cdot), V(\cdot, \cdot)}$ \+ \\
$K \getsr \{0, 1\}^k$; \\
$(m, \tau) \gets A^{T(H_K(\cdot)), V(H_K(\cdot), \cdot)}$; \\
\RETURN $(H_K(m), \tau)$;
\end{program}
$A'$ might fail even though $A$ succeeded only if the message it returns,
$H_K(m)$, collides with one of its tagging queries. But since $H_K$ is
$(t, q_T + q_V, \epsilon')$-weakly collision resistant, this happens with
at most probability $\epsilon'$. Hence, $A'$ succeeds with probability at
least $\epsilon - \epsilon'$. Rearrangement yields the required result.
\end{slide}
\begin{slide}
\head{The HMAC construction, \seq: from NMAC to HMAC}
Implementing NMAC involves using strange initialization vectors and
generally messing about with your hash function. HMAC is an attempt to
capture the provable security properties using a plain ol' hash function.
Suppose $H$ is an iterated hash function with a $k$-bit output and $L$-bit
blocks (with $L \ge k$). We set $\id{ipad}$ to be the byte $\hex{36}$
repeated $L/8$ times, and $\id{opad}$ to be the byte $\hex{5C}$ repeated
$L/8$ times. Select a key $K$ of $L$ bits: if your key is shorter, pad it
by appending zero bits; if it's longer, hash it with $H$ and then pad.
The HMAC tagging function is then defined as
\[ \Xid{T}{HMAC}^H_K(m) =
H(K \xor \id{opad} \cat H(K \xor \id{ipad} \cat m)). \]%
\end{slide}
\begin{slide}
\head{The HMAC construction, \seq: comparison with NMAC}
Comparing the two constructions, we see that
\[ \Xid{T}{HMAC}^H_K =
\Xid{T}{NMAC}^{H'}_{F(I \cat K \xor \id{opad}),
F(I \cat K \xor \id{ipad})}. \]%
Here, $I$ is $H$'s initialization vector, $F$ is the compression function;
$H'$ denotes a keyed hash function that is `like' $H$ but performs padding
as if there were an extra initial block of message data for each message.
The proof of NMAC assumes that the two keys are random and independent.
This isn't the case in HMAC, but a little handwaving about pseudorandomness
of the compression function makes the problem go away.
\end{slide}
\begin{exercise}
Suppose that $F\colon \{0, 1\}^k \times \{0, 1\}^{t+\ell} \to \{0,
1\}^t$ is a PRF. Let $x \in \{0, 1\}^*$ be a message. We define the
function $H_K(x)$ as follows:
\begin{itemize}
\item Pad $x$ to a multiple of $\ell$ bits using some injective
mapping. Break the image of $x$ under this mapping into $\ell$-bit
blocks $x_0, x_1, \ldots, x_{n-1}$.
\item For $0 \le i \le n$, define $H^{(i)}_K(x)$ by
\[ H^{(0)}_K(x) = I; \qquad
H^{(i+1)}_K(x) = F_K(H^{(i)}(x) \cat x_i) \]%
where $I$ is some fixed $t$-bit string (e.g., $I = 0^t$).
\item Then set $H_K(x) = H^{(n)}_K(x)$.
\end{itemize}
We define two (deterministic) MACs $\mathcal{M}^i = (T^i, V^i)$ (for
$i \in \{0, 1\}$) using the $H_K$ construction. Verification in each
case consists of computing the tag and comparing to the one offered.
\begin{eqlines*}
T^0_K(x) = H_K(x); \qquad T^1_K(x) = H_K(x \cat K); \\
V^i_K(x, \tau) = \begin{cases}
1 & if $\tau = T^i_K(x)$ \\
0 & otherwise
\end{cases}.
\end{eqlines*}
Decide whether each of these constructions is secure. A full proof is
rather hard: an informal justification would be good.
\answer%
$\mathcal{M}^0$ is secure; $\mathcal{M}^1$ isn't, under the sole
assumption that $F$ is a PRF.
To see that $\mathcal{M}^0$ is secure, it suffices to show that $T^0$
is a PRF. This is actually quite involved. Given an adversary $A$
attacking $T^1$ as a PRF, we construct an adversary $B$ attacking $F$,
which simply computes $H$ as required, using the oracle supplied. To
complete the proof, we need to show a bound on the
information-theoretic security of $H$ when instantiated using a random
function $F$. For the sake of simplicity, we allow the adversary $A$
to query on \emph{padded} messages, rather than the raw unpadded
messages. We count the number $q'$ of individual message blocks.
As the game with $A$ progresses, we can construct a directed \emph{graph}
of the query results so far. We start with a node labelled $I$. When
processing an $H$-query, each time we compute $t' = F(t \cat x_i)$, we add
a node $t'$, and an edge $x_i$ from $t$ to $t'$. The `bad' event occurs
whenever we add an edge to a previously existing node. We must show,
firstly, that the adversary cannot distinguish $H$ from a random function
unless the bad event occurs; and, secondly, that the bad event doesn't
occur very often.
The latter is easier: our standard collision bound shows that the bad
event occurs during the game with probability at most $q'(q' - 1)/2^{t+1}$.
The former is trickier. This needs a lot more work to make it really
rigorous, but we show the idea. Assume that the bad event has not
occurred. Consider a query $x_0, x_1, \ldots, x_{n-1}$. If it's the same
as an earlier query, then $A$ learns nothing (because it could have
remembered the answer from last time). If it's a \emph{prefix} of some
earlier query, then the answer is the value of some internal node which
hasn't been revealed before; however, the value of that internal node was
chosen uniformly at random (we claim). Finally, if the query is not a
prefix of any previous query, then we add a new edge to our graph. If the
bad event doesn't occur, we must add a new node for the result, and the
value at that node will be uniformly random, because $F$ is a random
function being evaluated at a new point -- this is the only time we add new
nodes to the graph, justifying the claim made earlier.
At the end of all of this, we see that
\[ \InSec{prf}(T^0; t, q) \le
\InSec{prf}(F; t, q') + \frac{q'(q' - 1)}{2^{t+1}} \]%
and hence
\[ \InSec{suf-cma}(\mathcal{M}^0; t, q) \le
\InSec{prf}(F; t, q') + \frac{2 q'(q' - 1) + 1}{2^{t+1}}. \]%
Now we turn our attention to $T^1$. It's clear that we can't simulate
$T^1$ very easily using an oracle for $F$, since we don't know $K$
(and indeed there might not be a key $K$). The intuitive reason why
$T^1$ is insecure is that $F$ might have leak useful information if
its input matches its key. This doesn't affect the strength of $F$ as
a PRF because you have to know the key before you can exploit this
leakage; but $T^1$ already knows the key, and this can be exploited to
break the MAC.
To show that this is insecure formally, let $F'$ be defined as
follows:
\[ F'_K(x) = \begin{cases}
K & if $x = p \cat K \cat q$ where
$|p| = t$ and $|q| = \ell - k$ \\
F_K(x) & otherwise
\end{cases}. \]%
We choose a simple injective padding scheme: if $x$ is a message then
we form $x' = x \cat 1 \cat 0^n$, where $0 \le n < \ell$ and $|x'|$ is
a multiple of $\ell$. If $T^1$ is instantiated with this PRF then it
is insecure as a MAC: submitting a tagging query for the empty string
$\emptystring$ reveals the key $K$, which can be used to construct a
forgery.
To complete the proof, we must show that $F'$ is a PRF. Let $A$ be an
adversary attacking $F'$. We consider a series of games; for each
$\G{i}$, let $S_i$ be the event that $A$ returns $1$ in that game.
Game~$\G0$ is the standard attack game with $A$ given an oracle for a
random function; game~$\G1$ is the same, except that $A$ is given an
oracle for $F'_K$ for some $K \inr \{0, 1\}^k$. Then
$\Adv{prf}{F'}(A) = \Pr[S_1] - \Pr[S_0]$. Let game~$\G2$ be the same
as $\G1$, except that if $A$ makes any query of the form $p \cat K
\cat q$ with $|p| = t$ and $|q| = \ell - k$ then the game halts
immediately, and let $F_2$ be the event that this occurs. By
Lemma~\ref{lem:shoup} (slide~\pageref{lem:shoup}), then, $|{\Pr[S_2]} -
\Pr[S_1]| \le \Pr[F_2]$. Let game~$\G3$ be the same as $\G2$ except
that we give $A$ an oracle for $F_K$ rather than $F'_K$. Since $F$
and $F'$ differ only on queries of the form $p \cat K \cat q$, we have
$\Pr[S_3] = \Pr[S_2]$. But $\Pr[S_3] - \Pr[S_0] = \Adv{prf}{F}(A) \le
\InSec{prf}(F; t, q)$. Hence, $\Adv{prf}{F'}(A) \le \InSec{prf}{F}(A)
- \Pr[F_2]$.
Finally, we bound the probability of $F_2$. Fix an integer $n$.
Consider an adversary $B$ attacking $F$ which runs as follows. It
initially requests $F(0), F(1), \ldots, F(n - 1)$ from its oracle. It
then runs $A$, except that, for each oracle query $x$, it parses $x$
as $p \cat K' \cat q$ with $|p| = t$, $|K'| = k$ and $|q| = \ell - k$;
then, if $F_{K'}(0) = F(0) \land F_{K'}(1) = F(1) \land \cdots \land
F_{K'}(n - 1) = F(n - 1)$, $B$ immediately returns $1$, claiming that
its oracle $F$ is the function $F_{K'}$; if this never occurs, $B$
returns $0$. Clearly, if $B$ is given an instance $F_K$ of $F$ then
it succeeds with probability $\Pr[F_2]$; however, if $F$ is a random
function then $B$ returns $1$ with probability at most $q 2^{-nk}$.
Hence, $\Adv{prf}{F}(B) \le \Pr[F_2] - q 2^{-nk}$. $B$ issues $q + n$
queries, and takes time $t + O(n q)$. Wrapping everything up, we get
\[ \InSec{prf}(F'; t, q) \le
2\cdot\InSec{prf}(F; t + O(q n), q + n) + \frac{q}{2^{nk}}. \]%
This completes the proof of generic insecurity for $\mathcal{M}^1$.
\end{exercise}
\xcalways\subsection{Universal hashing}\x
\begin{slide}
\topic{almost-universal hash functions}
\resetseq
\head{Universal hashing, \seq: definition}
Consider a family of hash functions $H\colon \keys H \times \dom H \to
\ran H$. We define
\[ \InSec{uh}(H) =
\max_{x \ne y} \Pr[K \getsr \keys H : H_K(x) = H_K(y)]. \]%
If $\InSec{uh}(H) \le \epsilon$ then we say that $H$ is
\emph{$\epsilon$-almost universal}. Note that the concept of
almost-universality is not quantified by running times.
If $H$ is $1/|{\ran H}|$-almost universal, then we say that $H$ is
\emph{universal}. Sometimes it's said that this is the best possible
insecurity: this isn't true.
\end{slide}
\begin{proof}[Counterexample]
Here's a function $H\colon \{0, 1, 2\} \times \{0, 1, 2, 3\} \to \{0, 1\}$
which is $\frac{1}{3}$-almost universal, though $|{\ran H}| = 2$:
\begin{quote} \item
\begin{tabular}[C]{c|cccc}
& 0 & 1 & 2 & 3 \\ \hlx{vhv}
0 & 0 & 1 & 0 & 1 \\
1 & 0 & 0 & 1 & 1 \\
2 & 0 & 1 & 1 & 0
\end{tabular}
\end{quote}
\end{proof}
\begin{slide}
\topic{dynamic view}
\head{Universal hashing, \seq: a dynamic view}
Suppose that $H$ is $\epsilon$-almost universal. Consider this experiment:
\begin{program}
Experiment $\Expt{uh}{H}(A)$: \+ \\
$(x, y) \gets A$; \\
$K \getsr \keys H$; \\
\IF $x \ne y \land H_K(x) = H_K(y)$ \THEN \RETURN $1$; \\
\ELSE \RETURN $0$;
\end{program}
The adversary may make random decisions before outputting its selection
$x$, $y$. We show that $\Pr[\Expt{uh}{H}(A) = 1] \le \InSec{uh}(H) =
\epsilon$.
Let $\rho \in \{0, 1\}^*$ be $A$'s coin tosses: $A$ chooses $x$ and $y$ as
functions of $\rho$. For some \emph{fixed} $\rho$,
\[ \Pr[K \getsr \keys H : H_K(x(\rho)) = H_K(y(\rho))] \le
\InSec{uh}(H). \]%
\end{slide}
\begin{slide}
\head{Universal hashing, \seq: the dynamic view (cont.)}
Now we treat $\rho$ as a random variable, selected from some distribution
$P$ on the set $\{0, 1\}^*$. We see that
\begin{eqnarray*}[Ll]
\Pr[\rho \getsr P; K \getsr \keys H : H_K(x(\rho)) = H_K(y(\rho))] \\
&= \sum_{\rho \in \{0, 1\}^*}
P(\rho) \cdot \Pr[K \getsr \keys H : H_K(x(\rho)) = H_K(y(\rho))] \\
&\le \sum_{\rho \in \{0, 1\}^*} P(\rho) \cdot \InSec{uh}(H)
= \InSec{uh}(H).
\end{eqnarray*}
Thus, no adversary can succeed in producing a collision in an
$\epsilon$-almost universal hash with probability better than $\epsilon$.
But obviously the adversary can ignore its coin tosses and simply return
the best colliding pair. Hence the two notions are completely equivalent.
\end{slide}
\begin{slide}
\topic{composition}
\head{Universal hashing, \seq: composition}
Suppose that $G$ is $\epsilon$-almost universal, and $G'$ is
$\epsilon'$-almost universal, and $\dom G = \ran G'$. We define the
composition $G \compose G'$ to be the family $H\colon (\keys G \times
\keys G') \times \dom G' \to \ran G$ by $H_{k, k'}(m) =
G_k(G'_{k'}(m))$.
Then $H$ is $(\epsilon + \epsilon')$-almost universal. To see this, fix $x
\ne y$, and choose $K = (k, k') \inr \keys G \times \keys G'$. Let $x' =
G'_{k'}(x)$ and $y' = G'_{k'}(y)$. Following our previous result, we see:
\begin{eqnarray*}[rl]
\Pr[H_K(x) = H_K(y)]
&= \Pr[G_k(G'_{k'}(x)) = G_k(G'_{k'}(y))] \\
&= \Pr[G_k(x') = G_k(y')] \\
&= \Pr[G_k(x') = G_k(y') \mid x' \ne y'] \Pr[G'_{k'}(x) \ne G'_{k'}(y)]\\
&\le \epsilon + \epsilon'.
\end{eqnarray*}
\end{slide}
\begin{slide}
\topic{the collision game}
\head{Universal hashing, \seq: the collision game}
Suppose that, instead of merely a pair $(x, y)$, our adversary was allowed
to return a \emph{set} $Y$ of $q$ elements, and measure the probability
that $H_K(x) = H_K(y)$ for some $x \ne y$ with $x, y \in Y$, and for $K
\inr \keys H$.
Let $\InSec{uh-set}(H; q)$ be maximum probability achievable for sets $Y$
with $|Y| \le q$. Then
\[ \InSec{uh-set}(H; q) \le \frac{q(q - 1)}{2} \cdot \InSec{uh}(H) .\]
\end{slide}
\begin{proof}
This is rather tedious. We use the dynamic view. Suppose $A$ returns $(x,
Y)$ with $|Y| = q$, and succeeds with probability $\epsilon$. Consider
\begin{program}
Adversary $A'$: \+ \\
$(x, Y) \gets A$; \\
$y \getsr Y$; \\
\RETURN $(x, Y \setminus \{y\})$;
\end{program}
The worst that can happen is that $A'$ accidentally removes the one
colliding element from $Y$. This occurs with probability $2/q$. So
\[ \Succ{uh-set}{H}(A') \ge \frac{q - 2}{q} \Succ{uh-set}{H}(A). \]
Rearranging and maximizing gives
\[ \InSec{uh-set}(H; q) \le
\frac{q}{q - 2} \cdot \InSec{uh-set}(H; q - 1). \]
Note that $\InSec{uh-set}(H; 2) = \InSec{uh}(H)$ is our initial notion. A
simple inductive argument completes the proof.
\end{proof}
\begin{slide}
\topic{a MAC}
\head{Universal hashing, \seq: a MAC}
Suppose that $H\colon \{0, 1\}^k \times \{0, 1\}^* \to \{0, 1\}^l$ is an
almost universal hash function, and $F\colon \{0, 1\}^{k'} \times \{0,
1\}^l \to \{0, 1\}^L$ is a PRF\@. Define a MAC $\Xid{\mathcal{M}}{UH}^{H,
F} = (\Xid{T}{UH}^{H, F}, \Xid{V}{UH}^{H, F})$ where:
\begin{eqnarray*}[rl]
\Xid{T}{UH}^{H, F}_{K, K'}(m) &= F_{K'}(H_K(m)) \\
\Xid{V}{UH}^{H, F}_{K, K'}(m, \tau) &= \begin{cases}
1 & if $\tau = F_{K'}(H_K(m))$ \\
0 & otherwise
\end{cases}.
\end{eqnarray*}
We have
\begin{eqnarray*}[Ll]
\InSec{suf-cma}(\Xid{\mathcal{M}}{UH}^{H, F}; t, q_T, q_V) \\
& \le
(q_V + 1) \biggl(\InSec{prf}(F; t, q_T + 1) + \frac{1}{2^L} +
\frac{q_T(q_T - 1)}{2} \cdot \InSec{uh}(H)\biggr).
\end{eqnarray*}
\end{slide}
\begin{proof}
We shall prove the result for $q_V = 0$ and $q_T = q$, and appeal to the
earlier result on verification oracles.
Suppose $A$ attacks the scheme $\Xid{\mathcal{M}}{UH}^{H, F}$ in time $t$,
issuing $q$ tagging queries. Consider a distinguisher $D$, constructed
from a forger $A$:
\begin{program}
Distinguisher $D^{F(\cdot)}$: \+ \\
$K \getsr \{0, 1\}^k$; \\
$\Xid{T}{list} \gets \emptyset$; \\
$(m, \tau) \gets A^{\id{tag}(K, \cdot)}$; \\
\IF $m \notin \Xid{T}{list} \land \tau = F(H_K(m))$
\THEN \RETURN $1$; \\
\ELSE \RETURN $0$; \- \\[\smallskipamount]
Oracle $\id{tag}(K, m)$: \+ \\
$\Xid{T}{list} \gets \Xid{T}{list} \cup \{m\}$; \\
\RETURN $F(H_K(m))$; \- \\[\smallskipamount]
\end{program}
Note that $A$ isn't provided with a verification oracle: that's because we
didn't allow it any verification queries.
We can see immediately that
\[ \Pr[K \getsr \{0, 1\}^{k'} : D^{F_K(\cdot)} = 1] =
\Succ{suf-cma}{\Xid{\mathcal{M}}{UH}^{H, F}}(A). \]%
We must now find an upper bound for $\Pr[F \getsr \Func{l}{L} :
D^{F(\cdot)}]$. Suppose that the adversary returns the pair $(m^*,
\tau^*)$, and that its tagging oracle queries and their answers are $(m_i,
\tau_i)$ for $0 \le i < q$. Consider the event $C$ that $H_K(m) =
H_K(m')$ for some $m \ne m'$, with $m, m' \in \{m^*\} \cup \{\,m_i \mid 0
\le i < q\,\}$.
If $C$ doesn't occur, then $F$ has not been queried before at $H_K(m)$, but
there's a $2^{-L}$ probability that the adversary guesses right anyway. If
$C$ does occur, then we just assume that the adversary wins, even though it
might not have guessed the right tag.
By our result on the collision game, $\Pr[C] \le q \cdot \InSec{uh}(H)$.
Then
\[ \Succ{prf}{F}(D) \ge
\Succ{suf-cma}{\Xid{\mathcal{M}}{UH}^{H, F}}(A) -
\frac{1}{2^L} - \frac{q(q - 1)}{2} \cdot \InSec{uh}(H). \]%
The result follows.
\end{proof}
\begin{slide}
\topic{almost XOR-universality}
\resetseq
\head{Almost XOR-universality, \seq: definition}
Consider a family of hash functions $H\colon \keys H \times \dom H \to
\{0, 1\}^L$. Define
\[ \InSec{xuh}(H) =
\max_{x \ne y, \delta}
\Pr[K \getsr \keys H : H_K(x) \xor H_K(y) = \delta]. \]%
If $\InSec{xuh}(H) < \epsilon$ then we say that $H$ is
\emph{$\epsilon$-almost XOR-universal}, or \emph{AXU}. Setting $\delta =
0$ shows that
\begin{eqnarray*}[rl]
\InSec{xuh}(H)
& \ge \max_{x \ne y} \Pr[K \getsr \keys H : H_K(x) \xor H_K(y) = 0] \\
& = \InSec{uh}(H).
\end{eqnarray*}
We can take a dynamic view of almost XOR-universality using the same
technique as for almost universal functions.
If $H$ is $2^{-L}$-almost XOR universal then we say that $H$ is
\emph{XOR-universal}. This is the best achievable.
\end{slide}
\begin{proof}
Fix some pair $x \ne y$. Choose $\delta \inr \{0, 1\}^L$. Then for any
\emph{fixed} $K \in \keys H$, $H_K(x)$ and $H_K(y)$ are fixed, so $H_K(x)
\xor H_K(y)$ is fixed, and $\Pr[H_K(x) \xor H_K(y) = \delta] = 2^{-L}$.
Using the same trick as for proving the dynamic view:
\begin{eqnarray*}[Ll]
\Pr[\delta \getsr \{0, 1\}^L; K \getsr \keys H :
H_K(x) \xor H_K(y) = \delta] \\
& = \sum_{K \in \keys H} \frac{1}{|{\keys H}|}
\Pr[\delta \getsr \{0, 1\}^L; K \getsr \keys H :
H_K(x) \xor H_K(y) = \delta] \\
& = \sum_{K \in \keys H} \frac{1}{|{\keys H}|} 2^{-L} = 2^{-L}.
\end{eqnarray*}
Since $H$ is arbitrary, this proves the lower bound on the almost
XOR-universality.
\end{proof}
\begin{slide}
\topic{composition}
\head{Almost XOR-universality, \seq: composition}
We extend our result about composition of almost-universal functions.
Suppose that $G$ is $\epsilon$-almost XOR universal, and $G'$ is
$\epsilon'$-almost universal (it doesn't have to be almost XOR-universal),
and $\dom G = \ran G'$.
Then the composition $H = G \compose G'$ is $(\epsilon + \epsilon')$-almost
XOR-universal. The proof is simple, and very similar to the
almost-universal case.
\end{slide}
\begin{slide}
\topic{a better MAC}
\head{Almost XOR-universality, \seq: a better MAC}
The security result for the UH-based MAC contains a factor $q_T$, which
it'd be nice to remove. Our new scheme uses an AXU hash $H\colon \keys H
\times \{0, 1\}^* \to \{0, 1\}^l$ and a PRF $F\colon \keys F \times \{0,
1\}^l \to \{0, 1\}^L$.
We first present a stateful version $\Xid{\mathcal{M}}{XUH}^{H, F}$.
Choose $(K, K') \inr \keys H \times \keys F$, and initialize a counter $i
\gets 0$. The tagging and verification algorithms are then:
\begin{program}
Algorithm $\Xid{T}{XUH}^{H, F}_{K, K'}(m)$: \+ \\
$\tau \gets (i, H_K(m) \xor F_{K'}(i))$; \\
$i \gets i + 1$; \\
\RETURN $\tau$;
\next
Algorithm $\Xid{V}{XUH}^{H, F}_{K, K'}(m, \tau)$: \+ \\
$(s, \sigma) \gets \tau$; \\
\IF $\sigma = H_K(m) \xor F_{K'}(i)$ \THEN \RETURN $1$; \\
\ELSE \RETURN $0$;
\end{program}
Note that verification is stateless.
\end{slide}
\begin{slide}
\head{Almost XOR-universality, \seq: security of AXU-based MACs}
For the stateful scheme presented earlier, provided $q_T \le 2^l$, we have
\begin{eqnarray*}[Ll]
\InSec{suf-cma}(\Xid{\mathcal{M}}{XUH}^{H, F}; t, q_T, q_V) \\
& \le (q_V + 1)(\InSec{prf}(F; t, q_T + 1) + \InSec{xuh}(H) + 2^{-L}).
\end{eqnarray*}
\end{slide}
\begin{slide}
\head{Almost XOR-universality, \seq: randomized AXU-based MAC}
We can avoid statefulness by using randomization. This new scheme is
$\Xid{\mathcal{M}}{XUH$\$$}^{H, F} = (\Xid{T}{XUH$\$$}^{H, F},
\Xid{V}{XUH$\$$}^{H, F})$:
\begin{program}
Algorithm $\Xid{T}{XUH$\$$}^{H, F}_{K, K'}(m)$: \+ \\
$s \getsr \{0, 1\}^l$; \\
$\tau \gets (s, H_K(m) \xor F_{K'}(s))$; \\
\RETURN $\tau$;
\next
Algorithm $\Xid{V}{XUH$\$$}^{H, F}_{K, K'}(m, \tau)$: \+ \\
$(s, \sigma) \gets \tau$; \\
\IF $\sigma = H_K(m) \xor F_{K'}(i)$ \THEN \RETURN $1$; \\
\ELSE \RETURN $0$;
\end{program}
\begin{eqnarray*}[Ll]
\InSec{suf-cma}(\Xid{\mathcal{M}}{XUH$\$$}^{H, F}; t, q_T, q_V) \\
& \le (q_V + 1)
\Bigl(\InSec{prf}(F; t, q_T + 1) + \InSec{xuh}(H) + 2^{-L} +
\frac{q_T(q_T - 1)}{2^{l+1}}\Bigr).
\end{eqnarray*}
\end{slide}
\begin{proof}
We prove the result with $q_V = 0$ and $q_T = q$, and appeal to the result
on verification oracles. Let $m_i$ be the message specified in the $i$-th
tagging query ($0 \le i < q$), and let $(s_i, \sigma_i) = (s_i, H_K(m) \xor
F_{K'}(s_i))$ be the tag returned. We call the $s_i$ the \emph{nonce}.
We prove the result for the stateless scheme. The bound $q \le 2^l$
ensures that the nonces are all distinct (we have $s_i = i$). The security
bound for the randomized version merely has as an extra term upper bound
for the probability of a nonce collision.
Let $A$ be an adversary attacking the MAC in time $t$, and using $q$
tagging queries. Then we present the following pair of adversaries:
\begin{program}
Distinguisher $D^{F(\cdot)}$: \+ \\
$i \gets 0$; \\
$\Xid{T}{list} \gets \emptyset$; \\
$K \getsr \keys H$; \\
$(m, \tau) \gets A^{\id{tag}}$; \\
$(s, \sigma) \gets \tau$; \\
\IF $(m, \tau) \notin \Xid{T}{list} \land
\sigma = H_K(m) \xor F(s)$
\THEN \RETURN $1$; \\
\ELSE \RETURN $0$; \- \\[\smallskipamount]
Oracle $\id{tag}(m)$: \+ \\
$\tau \gets (i, H_K(m) \xor F(i))$; \\
$\Xid{T}{list} \gets \Xid{T}{list} \cup \{(m, \tau)\}$; \\
$i \gets i + 1$;
\RETURN $\tau$;
\next
Collision-finder $C$: \+ \\
$i \gets 0$; \\
$(m, \tau) \gets A^{\id{tag}}$; \\
$(s, \sigma) \gets \tau$; \\
\IF $s \ge i \lor m = m_s$ \THEN \ABORT; \\
\RETURN $(m, m_s, \sigma \xor \sigma_s)$; \- \\[\smallskipamount]
Oracle $\id{tag}(m)$: \+ \\
$m_i \gets m$; \\
$\sigma_i \getsr \{0, 1\}^L$; \\
$\tau \gets (i, \sigma_i)$; \\
$i \gets i + 1$; \\
\RETURN $\tau$;
\end{program}
We need to find a lower bound on the advantage of $D$. If $F$ is chosen
from the PRF then $D$ returns $1$ precisely when $A$ finds a valid
forgery. We now examine the setting in which $F$ is a random function.
Let $S$ be the event that $A$ succeeds in returning a valid forgery when
$F$ is random, and let $N$ be the event that the nonce $s$ returned by $A$
is not equal to any nonce $s_i$ returned by the tagging oracle. Suppose
$N$ occurs: then the random function $F$ has never been queried before at
$F$, and $\Pr[F(s) = \sigma \xor H_K(m)]$ is precisely $2^{-L}$.
So suppose instead that $N$ doesn't occur. Then, since the $s_i$ are
distinct, there is a unique $i$ such that $s = s_i$. For $A$ to win, we
must have $m \ne m_i$ (for if $m = m_i$ then the only valid tag is
$H_K(m_i) \xor F(s_i) = \sigma_i$, which was already returned by the
tagging oracle). If $A$'s forgery is successful then
\[ \sigma = H_K(m) \xor F(s)
\qquad \text{and} \qquad
\sigma_i = H_K(m_i) \xor F(s_i) \]%
but $s = s_i$, whence
\[ H_K(m_i) \xor H_K(m) = \sigma \xor \sigma_i. \]
Since the $s_i$ are distinct and $F$ is a random function, the $\sigma_i$
are independent uniformly-distributed random strings from $\{0, 1\}^L$.
Hence the collision-finder $C$ succeeds with probability $\Pr[S \land
\lnot N] \le \InSec{xuh}(H)$.
Wrapping up, we have
\begin{eqnarray*}[rl]
\Adv{prf}{F}(D)
& \ge \Succ{suf-cma}{\Xid{\mathcal{M}}{XUH}^{H, F}}(A) -
(\Pr[S \mid N] \Pr[N] + \Pr[S \mid \lnot N] \Pr[\lnot N]) \\
& \ge \Succ{suf-cma}{\Xid{\mathcal{M}}{XUH}^{H, F}}(A) -
(2^{-L} + \InSec{xuh}(H)).
\end{eqnarray*}
Maximizing and rearranging yields the required result.
\end{proof}
\begin{remark*}
Note that our bound has a $2^{-L}$ term in it that's missing from
\cite{Goldwasser:1999:LNC}. We believe that their proof is wrong in its
handling of the XOR-collision probability.
\end{remark*}
\endinput
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "ips"
%%% End: