wrestlers.tex: Change citation keys to match `mdw-crypto.bib' changes.
[doc/wrestlers] / wrestlers.tex
CommitLineData
a6e375a6
MW
1%%% -*-latex-*-
2%%%
3%%% The Wrestlers Protocol: secure, deniable key-exchange
4%%%
5%%% (c) 2006 Mark Wooding
6%%%
7
b12a993f
MW
8\makeatletter
9\def\@doit#1{#1}
10\ifx\iffancystyle\xxundefined\expandafter\@doit\else\expandafter\@gobble\fi
11{\newif\iffancystyle\fancystyletrue}
12\ifx\ifshort\xxundefined\expandafter\@doit\else\expandafter\@gobble\fi
13{\newif\ifshort\shortfalse}
14\iffancystyle\expandafter\@gobble\else\expandafter\@doit\fi{\newif\ifpdfing}
15\makeatother
32609df3
MW
16
17\typeout{Options:}
18\typeout{ Fancy style: \iffancystyle ON\else OFF\fi}
19\typeout{ Short version: \ifshort ON\else OFF\fi}
20
a6e375a6
MW
21\errorcontextlines=\maxdimen
22\showboxdepth=\maxdimen
23\showboxbreadth=\maxdimen
24
25\iffancystyle
32609df3
MW
26 \documentclass{strayman}
27 \parskip=0pt plus 1pt \parindent=1.2em
a6e375a6
MW
28 \usepackage[T1]{fontenc}
29 \usepackage[palatino, helvetica, courier, maths=cmr]{mdwfonts}
30 \usepackage[within = subsection, mdwmargin]{mdwthm}
31 \usepackage{mdwlist}
32 \usepackage{sverb}
32609df3 33 \ifpdfing\else
d8139f2c
MW
34 \PassOptionsToPackage{dvips}{xy}
35 \fi
a6e375a6 36\else
32609df3
MW
37 \PassOptionsToClass{runningheads}{llncs}
38 \documentclass{llncs}
a6e375a6
MW
39\fi
40
41\PassOptionsToPackage{show}{slowbox}
42%\PassOptionsToPackage{hide}{slowbox}
32609df3 43\usepackage{mdwtab, mdwmath, crypto}
a6e375a6
MW
44\usepackage{slowbox}
45\usepackage{amssymb, amstext}
46\usepackage{url, multicol}
47\usepackage{tabularx}
48\DeclareUrlCommand\email{\urlstyle{tt}}
49\ifslowboxshow
50 \usepackage[all]{xy}
51 \turnradius{4pt}
52\fi
32609df3 53\usepackage{mathenv}
a6e375a6 54
32609df3 55\newcommand{\Nupto}[1]{\{0, 1, \ldots, #1 - 1\}}
a6e375a6 56\iffancystyle
32609df3 57 \let\next\title
a6e375a6 58\else
32609df3 59 \def\next[#1]{\titlerunning{#1}\title}
a6e375a6
MW
60\fi
61\next
32609df3
MW
62 [The Wrestlers Protocol]
63 {The Wrestlers Protocol%
64 \ifshort\thanks{This is an extended abstract; the full version
081da1b1 65 \cite{wooding-2006:wrestlers} is available from
32609df3 66 \texttt{http://eprint.iacr.org/2006/386/}.}\fi \\
a6e375a6
MW
67 A simple, practical, secure, deniable protocol for key-exchange}
68\iffancystyle
69 \author{Mark Wooding \\ \email{mdw@distorted.org.uk}}
70\else
71 \author{Mark Wooding}
72 \institute{\email{mdw@distorted.org.uk}}
73\fi
74
75\iffancystyle
76 \bibliographystyle{mdwalpha}
77 \let\epsilon\varepsilon
78 \let\emptyset\varnothing
79 \let\le\leqslant\let\leq\le
80 \let\ge\geqslant\let\geq\ge
81 \numberwithin{table}{section}
82 \numberwithin{figure}{section}
83 \numberwithin{equation}{subsection}
84 \let\random\$
85\else
32609df3 86 \bibliographystyle{splncs}
a6e375a6
MW
87 \expandafter\let\csname claim*\endcsname\claim
88 \expandafter\let\csname endclaim*\endcsname\endclaim
89\fi
90
a6e375a6
MW
91\let\Bin\Sigma
92\let\emptystring\lambda
93\edef\Pr{\expandafter\noexpand\Pr\nolimits}
94\newcommand{\bitsto}{\mathbin{..}}
95\newcommand{\E}{{\mathcal{E}}}
96\newcommand{\M}{{\mathcal{M}}}
97\iffancystyle
98 \def\description{%
99 \basedescript{%
100 \let\makelabel\textit%
101 \desclabelstyle\multilinelabel%
102 \desclabelwidth{1in}%
103 }%
104 }
105\fi
106\def\fixme#1{\marginpar{FIXME}[FIXME: #1]}
107\def\hex#1{\texttt{#1}_{x}}
108
32609df3
MW
109\newenvironment{longproof}[1]{%
110 \ifshort#1\expandafter\ignore
111 \else\proof\fi
112}{%
113 \ifshort\else\endproof\fi
114}
115
a6e375a6
MW
116\def\dbox#1{%
117 \vtop{%
118 \def\\{\unskip\egroup\hbox\bgroup\strut\ignorespaces}%
119 \hbox{\strut#1}%
120 }%
121}
122
123\def\Wident{\Xid{W}{ident}}
124\def\Wkx{\Xid{W}{kx}}
125\def\Wdkx{\Xid{W}{dkx}}
126\def\Func#1#2{\mathcal{F}[#1\to#2]}
127\def\diff#1#2{\Delta_{#1, #2}}
128\def\Hid{H_{\textit{ID}}}
129
130%% protocol run diagrams
131\def\PRaction#1{#1\ar[r]}
132\def\PRcreatex#1{\PRaction{\textsf{Create session\space}#1}}
133\def\PRcreate#1#2#3{\PRcreatex{(\text{#1},\text{#2},#3)}}
134\def\PRreceivex#1{\PRaction{\textsf{Receive\space}#1}}
135\def\PRreceive#1#2{\PRreceivex{\msg{#1}{#2}}}
136\def\PRsession#1{\relax\mathstrut#1\ar[r]}
137\def\msg#1#2{(\cookie{#1},#2)}
138\def\PRresult#1{#1}
139\def\PRsendx#1{\PRresult{\textsf{Send\space}#1}}
140\def\PRsend#1#2{\PRsendx{\msg{#1}{#2}}}
141\def\PRcomplete#1{\textsf{Complete:\space}#1}
142\def\PRstate#1{\textsf{State:\space}#1}
143\def\PRignore{\textsf{(ignored)}}
144\def\PRreveal{\textsf{Session-state reveal}\ar[r]}
145\def\protocolrun#1{\[\xymatrix @R=0pt @C=2em {#1}\]}
146
32609df3
MW
147\def\protocol{%
148 \unskip\bigskip
149 \begin{tabular*}{\linewidth}%
150 {@{\qquad}l@{\extracolsep{0ptplus1fil}}r@{\qquad}}}
151\def\endprotocol{\end{tabular*}}
152\def\send#1#2{\noalign{%
153 \centerline{\xy\ar @{#1}|*+{\mathstrut#2}<.5\linewidth, 0pt>\endxy}}}
154
a6e375a6
MW
155%% class-ids for proof of extractor lemma
156\let\Cid=\Lambda
157\let\Csession=S
158\let\Cindex=r
159\let\Cquery=Q
160\let\Chash=H
161\let\Ccheck=c
162\let\Ccheckset=V
163\let\Ccount=\nu
164
dff0fad2
MW
165\def\HG#1{\mathbf{H}_{#1}}
166
32609df3
MW
167\iffancystyle\else
168 \let\xsssec\subsubsection\def\subsubsection#1{\xsssec[#1]{#1.}}
169\fi
170
a6e375a6
MW
171\begin{document}
172
173%%%--------------------------------------------------------------------------
174
175\maketitle
d8139f2c 176\iffancystyle \thispagestyle{empty} \fi
a6e375a6
MW
177
178\begin{abstract}
179 We describe and prove (in the random-oracle model) the security of a simple
180 but efficient zero-knowledge identification scheme, whose security is based
181 on the computational Diffie-Hellman problem. Unlike other recent proposals
182 for efficient identification protocols, we don't need any additional
183 assumptions, such as the Knowledge of Exponent assumption.
184
185 From this beginning, we build a simple key-exchange protocol, and prove
186 that it achieves `SK-security' -- and hence security in Canetti's Universal
187 Composability framework.
188
189 Finally, we show how to turn the simple key-exchange protocol into a
190 slightly more complex one which provides a number of valuable `real-life'
191 properties, without damaging its security.
192\end{abstract}
193
194\iffancystyle
195\newpage
d8139f2c 196\thispagestyle{empty}
a6e375a6
MW
197\columnsep=2em \columnseprule=0pt
198\tableofcontents[\begin{multicols}{2}\raggedright][\end{multicols}]
199%%\listoffigures[\begin{multicols}{2}\raggedright][\end{multicols}]
200%% \listoftables[\begin{multicols}{2}\raggedright][\end{multicols}]
201\newpage
202\fi
203
204%%%--------------------------------------------------------------------------
205
206\section{Introduction}
207
dff0fad2
MW
208This paper proposes protocols for \emph{identification} and
209\emph{authenticated key-exchange}.
a6e375a6 210
dff0fad2
MW
211An identification protocol allows one party, say Bob, to be sure that he's
212really talking to another party, say Alice. It assumes that Bob has some way
213of recognising Alice; for instance, he might know her public key. Our
214protocol requires only two messages -- a challenge and a response -- and has
215a number of useful properties. It is very similar to, though designed
32609df3 216independently of, a recent protocol by Stinson and Wu
081da1b1
MW
217\cite{stinson-wu-2006:two-flow-zero-knowledge}; we discuss their protocol and
218compare it to ours in \ifshort the full version of this paper. \else
32609df3 219section~\ref{sec:stinson-ident}. \fi
a6e375a6 220
dff0fad2 221Identification protocols are typically less useful than they sound. As Shoup
081da1b1
MW
222\cite{shoup-1999:formal-model-key-exchange} points out, it provides a `secure
223ping', by which Bob can know that Alice is `there', but provides no guarantee
224that any other communication was sent to or reached her. However, there are
4331bf84
MW
225situations where this an authentic channel between two entities -- e.g., a
226device and a smartcard -- where a simple identification protocol can still be
227useful.
a6e375a6 228
dff0fad2
MW
229An authenticated key-exchange protocol lets Alice and Bob agree on a shared
230secret, known to them alone, even if there is an enemy who can read and
231intercept all of their messages, and substitute messages of her own. Once
232they have agreed on their shared secret, of course, they can use standard
233symmetric cryptography techniques to ensure the privacy and authenticity of
234their messages.
a6e375a6
MW
235
236
237\subsection{Desirable properties of our protocols}
238
239Our identification protocol has a number of desirable properties.
240\begin{itemize}
241\item It is \emph{simple} to understand and implement. In particular, it
242 requires only two messages.
243\item It is fairly \emph{efficient}, requiring two scalar multiplications by
244 each of the prover and verifier.
245\item It is provably \emph{secure} (in the random oracle model), assuming the
246 intractability of the computational Diffie-Hellman problem.
247\end{itemize}
248
dff0fad2 249Our key-exchange protocol also has a number of desirable
d8139f2c 250properties.
a6e375a6
MW
251\begin{itemize}
252\item It is fairly \emph{simple} to understand and implement, though there
253 are a few subtleties. In particular, it is \emph{symmetrical}. We have
254 implemented a virtual private network system based on this protocol.
255\item It is \emph{efficient}, requiring four scalar multiplications by each
256 participant. The communication can be reduced to three messages by
257 breaking the protocol's symmetry.
258\item It is provably \emph{secure} (again, in the random oracle model),
259 assuming the intractability of the computational Diffie-Hellman problem,
260 and the security of a symmetric encryption scheme.
261\item It provides \emph{perfect forward secrecy}. That is, even if a user's
262 long-term secrets are compromised, past sessions remain secure.
263\item It is \emph{deniable}. It is possible to construct simulated
264 transcripts of protocol executions between any number of parties without
265 knowing any of their private keys. The simulated transcripts are (almost)
266 indistinguishable from real protocol transcripts. Hence, a transcript
267 does not provide useful evidence that a given party was really involved in
268 a given protocol execution.
269\end{itemize}
270
32609df3 271\ifshort\else
dff0fad2 272\subsection{Asymptotic and concrete security results}
a6e375a6 273
dff0fad2
MW
274Most security definitions for identification (particularly zero-knowledge)
275and key-exchange in the literature are \emph{asymptotic}. That is, they
276consider a family of related protocols, indexed by a \emph{security
277parameter}~$k$; they that any \emph{polynomially-bounded} adversary has only
278\emph{negligible} advantage. A polynomially-bounded adversary is one whose
279running time is a bounded by some polynomial~$t(k)$. The security definition
280requires that, for any such polynomially-bounded adversary, and any
281polynomial $p(k)$, the adversary's advantage is less than $p(k)$ for all
282sufficiently large values of $k$.
283
284Such asymptotic notions are theoretically interesting, and have obvious
285connections to complexity theory. Unfortunately, such an asymptotic result
286tells us nothing at all about the security of a particular instance of a
287protocol, or what parameter sizes one needs to choose for a given level of
288security against a particular kind of adversary. Koblitz and Menezes
081da1b1
MW
289\cite{koblitz-menezes-2006:another-look-provable-security-ii} (among other
290useful observations) give examples of protocols, proven to meet asymptotic
291notions of security, whose security proofs guarantee nothing at all for the
292kinds of parameters typically used in practice.
dff0fad2
MW
293
294Since, above all, we're interested in analysing a practical and implemented
295protocol, we follow here the `practice-oriented provable security' approach
296largely inspired by Bellare and Rogaway, and exemplified by
081da1b1
MW
297\cite{bellare-1994:security-cbc,bellare-1995:xor-macs,%
298bellare-rogaway-1995:oaep,bellare-rogaway-1996:exact-security-sigs,%
299bellare-1996:hmac,bellare-1997:concrete-symmetric}; see also
300\cite{bellare-1999:practice-oriented-provable-security}.
dff0fad2
MW
301Rather than attempting to say, formally, whether or not a protocol is
302`secure', we associate with each protocol an `insecurity function' which
303gives an upper bound on the advantage of any adversary attacking the protocol
304within given resource bounds.
32609df3 305\fi
a6e375a6 306
dff0fad2 307\subsection{Formal models for key-exchange}
a6e375a6 308
32609df3
MW
309\ifshort
310
311The first model for studying the \emph{computational} security of
312key-exchange protocols (rather than using protocol-analysis logics like that
081da1b1
MW
313of \cite{burrows-1989:logic-authn}) was given by Bellare and Rogaway
314\cite{bellare-rogaway-1994:entity-authn-key-distrib}; the model has since
315been enhanced, both by the original authors and others, in
316\cite{bellare-rogaway-1995:session-key-distrib,%
317 blake-wilson-1997:key-agreement,%
318 blake-wilson-menezes-1998:asymm-key-transport}.
319The model defines security in terms of a game: key-exchange protocols are
320secure if an adversary can't distinguish the key agreed by a chosen
321`challenge session' from a key chosen independently at random. Other models
322for key-exchange have been proposed in
323\cite{bellare-1998:modular-key-exchange} and
324\cite{shoup-1999:formal-model-key-exchange}; these use a different notion of
325security, involving implementation of an ideal functionality.
32609df3
MW
326
327\else
328
dff0fad2
MW
329Many proposed key-exchange protocols have turned out to have subtle security
330flaws. The idea of using formal methods to analyse key-exchange protocols
081da1b1
MW
331begins with the logic of Burrows, Abadi and Needham
332\cite{burrows-1989:logic-authn}. Their approach requires a `formalising'
333step, in which one expresses in the logic the contents of the message flows,
334and the \emph{beliefs} of the participants.
335
336Bellare and Rogaway \cite{bellare-rogaway-1994:entity-authn-key-distrib}
337describe a model for studying the computational security of authentication
338and key-exchange protocols in a concurrent setting, i.e., where multiple
339parties are running several instances of a protocol simultaneously. They
340define a notion of security in this setting, and show that several simple
341protocols achieve this notion. Their original paper dealt with pairs of
342parties using symmetric cryptography; they extended their definitions in
343\cite{bellare-rogaway-1995:session-key-distrib} to study three-party
344protocols involving a trusted key-distribution centre.
345
346Blake-Wilson, Johnson and Menezes \cite{blake-wilson-1997:key-agreement}
347applied the model of \cite{bellare-rogaway-1994:entity-authn-key-distrib} to
348key-exchange protocols using asymmetric cryptography, and Blake-Wilson and
349Menezes \cite{blake-wilson-menezes-1998:asymm-key-transport} applied it to
350protocols based on the Diffie-Hellman protocol.
351
352The security notion of \cite{bellare-rogaway-1994:entity-authn-key-distrib}
353is based on a \emph{game}, in which an adversary nominates a \emph{challenge
354session}, and is given either the key agreed by the participants of the
355challenge session, or a random value independently sampled from an
356appropriate distribution. The adversary's advantage -- and hence the
357insecurity of the protocol -- is measured by its success probability in
358guessing whether the value it was given is really the challenge key. This
359challenge-session notion was also used by the subsequent papers described
360above.
361
362Bellare, Canetti and Krawczyk \cite{bellare-1998:modular-key-exchange}
363described a pair of models which they called the \textsc{am} (for
364`authenticated links model') and \textsc{um} (`unauthenticated links model').
365They propose a modular approach to the design of key-exchange protocols,
366whereby one first designs a protocol and proves its security in the
367\textsc{am}, and then applies a authenticating `compiler' to the protocol
368which they prove yields a protocol secure in the realistic \textsc{um}.
369Their security notion is new. They define an `ideal model', in which an
370adversary is limited to assigning sessions fresh, random and unknown keys, or
371matching up one session with another, so that both have the same key. They
372define a protocol to be secure if, for any adversary~$A$ in the \textsc{am}
373or \textsc{um}, there is an ideal adversary~$I$, such that the outputs of $A$
374and $I$ are computationally indistinguishable.
375
376In \cite{shoup-1999:formal-model-key-exchange}, Shoup presents a new model
377for key-exchange, also based on the idea of simulation. He analyses the
378previous models, particularly
379\cite{bellare-rogaway-1994:entity-authn-key-distrib} and
380\cite{bellare-1998:modular-key-exchange}, and highlights some of their inadequacies.
dff0fad2 381
32609df3
MW
382\fi
383
081da1b1
MW
384Canetti and Krawczyk
385\cite{canetti-krawczyk-2001:secure-channels-eprint,%
386 canetti-krawczyk-2001:secure-channels}
387describe a new notion of security in the model of
388\cite{bellare-1998:modular-key-exchange}, based on the challenge-session
389notion of \cite{bellare-rogaway-1994:entity-authn-key-distrib}. The security
390notion, called `SK-security', seems weaker in various ways than those of
391earlier works such as \cite{bellare-rogaway-1994:entity-authn-key-distrib} or
392\cite{shoup-1999:formal-model-key-exchange}. However, the authors show that
393their notion suffices for constructing `secure channel' protocols, which they
394also define.
395
396\ifshort\else In \cite{canetti-2001:uc-security-eprint,%
397 canetti-2001:uc-security},
398Canetti describes the `universal composition' framework. Here, security
399notions are simulation-based: one defines security notions by presenting an
400`ideal functionality'. A protocol securely implements a particular
401functionality if, for any adversary interacting with parties who use the
402protocol, there is an adversary which interacts with parties using the ideal
403functionality such that no `environment' can distinguish the two. The
404environment is allowed to interact freely with the adversary throughout,
405differentiating this approach from that of
406\cite{bellare-1998:modular-key-exchange} and
407\cite{shoup-1999:formal-model-key-exchange}, where the distinguisher was
408given only transcripts of the adversary's interaction with the parties. With
409security defined in this way, it's possible to prove a `universal composition
410theorem': one can construct a protocol, based upon various ideal
411functionalities, and then `plug in' secure implementations of the ideal
412functionalities and appeal to the theorem to prove the security of the entire
413protocol. The UC framework gives rise to very strong notions of security,
414due to the interactive nature of the `environment' distinguisher. \fi
415
416Canetti and Krawczyk \cite{canetti-krawczyk-2002:uc-key-exchange} show that
417the SK-security notion of \cite{canetti-krawczyk-2001:secure-channels} is
418\emph{equivalent} to a `relaxed' notion of key-exchange security in the UC
419framework\ifshort\space of \cite{canetti-2001:uc-security}\fi, and suffices
420for the construction of UC secure channels.
421
422The result of \cite{canetti-krawczyk-2002:uc-key-exchange} gives us
423confidence that SK-security is the `right' notion of security for
424key-exchange protocols. Accordingly, SK-security is the standard against
425which we analyse our key-exchange protocol.
a6e375a6
MW
426
427
428\subsection{Outline of the paper}
429
430The remaining sections of this paper are as follows.
431\begin{itemize}
432\item Section \ref{sec:prelim} provides the essential groundwork for the rest
433 of the paper. It introduces important notation, and describes security
434 notions and intractability assumptions.
435\item Section \ref{sec:zk-ident} describes our zero-knowledge identification
436 protocol and proves its security.
437\item Section \ref{sec:kx} describes the simple version of our key-exchange
438 protocol, and proves its security and deniability. It also describes some
439 minor modifications which bring practical benefits without damaging
32609df3 440 security.
dff0fad2 441\item Finally, section \ref{sec:conc} presents our conclusions.
a6e375a6
MW
442\end{itemize}
443
32609df3
MW
444\ifshort
445The full version of this paper describes how to make our protocols
446identity-based by using bilinear pairings using the techniques introduced in
081da1b1
MW
447\cite{boneh-franklin-2003:ibe-weil-pairing}. It also contains proofs of the
448various theorems stated here.
32609df3
MW
449\fi
450
a6e375a6
MW
451%%%--------------------------------------------------------------------------
452
453\section{Preliminaries}
454\label{sec:prelim}
455
32609df3
MW
456\ifshort
457\subsection{Basics}
458\let\prelimsec\subsubsection
459\else
460\let\prelimsec\subsection
461\fi
462
463\prelimsec{Miscellaneous notation}
a6e375a6
MW
464
465We write $\Func{D}{R}$ for the set of all functions with domain $D$ and range
466$R$.
a6e375a6 467
32609df3 468\prelimsec{Groups}
a6e375a6
MW
469
470Let $(G, +)$ be a cyclic group\footnote{
471 We find that additive group notation is easier to read. In particular, in
472 multiplicative groups, one ends up with many interesting things tucked away
473 in little superscripts.}%
474of prime order $q$, and generated by an element $P$. We shall write the
475identity of $G$ as $0_G$, or simply as $0$ when no ambiguity is likely to
476arise. Thus, we have $\langle P \rangle = G$ and $q P = 0$. Any $X \in G$
32609df3 477can be written as $X = x P$ for some $x \in \{0, 1, \ldots, q - 1\}$.
a6e375a6 478
32609df3
MW
479We consider a cyclic group of order $n$ as a $\Z/n\Z$-module, and in
480particular our group $G$ can be seen as a vector space over $\gf{q}$. This
481makes the notation slightly more convenient.
a6e375a6 482
32609df3 483\prelimsec{Bit strings and encodings}
a6e375a6
MW
484\label{sec:bitenc}
485
486Let $\Bin = \{0, 1\}$ be the set of binary digits. Then $\Bin^n$ is the set
487of $n$-bit strings, and $\Bin^*$ the set of all (finite) bit strings. If $x
488\in \Bin^n$ is a bit string, we write its length as $|x| = n$. For a bit
489string $x \in \Bin^n$, and for $0 \le i < n$, we write $x[i]$ as the $i$th
490bit of $x$. The empty string is denoted $\emptystring$.
491
492Let $x$ and $y$ be two bit strings. If $|x| = |y| = n$, we write $x \xor y$
32609df3
MW
493to mean the bitwise exclusive-or of $x$ and $y$\ifshort\else: if $z = x \xor
494y$ then $|z| = n$, and $z[i] = (x[i] + y[i]) \bmod 2$ for $0 \le i < n$\fi.
495We write $x \cat y$ to mean the concatenation of $x$ and $y$\ifshort\else: if
496$z = x \cat y$ then $|z| = |x| + |y|$ and $z[i] = x[i]$ if $0 \le i < |x|$
497and $z[i] = y[i - |x|]$ if $|x| < i \le |x| + |y|$\fi.
a6e375a6
MW
498
499Finally, we let $\bot$ be a value distinct from any bit string.
500
501We shall want to encode group elements $X \in G$ and indices $x \in I =
32609df3
MW
502\gf{q}$ as bit strings.
503\ifshort
504To this end, we shall assume the existence of efficient, unambiguous
505encodings of group elements as $\ell_G$-bit strings, and indices as
506$\ell_I$-bit strings. To reduce clutter, we shall leave encoding and
507decoding as implicit operations.
508\else
509To this end, we shall assume the existence of
a6e375a6
MW
510integers $\ell_G, \ell_I > 0$ and functions
511\[
512 e_S\colon S \to \Bin^{\ell_S}
513 \quad \textrm{and} \quad
514 d_S\colon \Bin^{\ell_S} \to S \cup \{ \bot \}
515 \qquad
32609df3 516 \textrm{for } S \in \{ G, \F \}.
a6e375a6
MW
517\]
518with the following properties.
519\begin{itemize}
520\item The functions are \emph{unique} and \emph{unambiguous}, i.e., for any
521 $t \in \Bin^{\ell_S}$, we have
522 \[ d_S(t) = \begin{cases}
523 s & if there is some $s \in S$ such that $t = e_S(s)$, or \\
524 \bot & if no such $s$ exists.
525 \end{cases}
526 \]
527\item The functions should be \emph{efficient} to compute. Indeed, we shall
528 be assuming that the time taken for encoding and decoding is essentially
529 trivial.
530\end{itemize}
531Note that, as we have defined them, all encodings of group elements are the
532same length, and similarly for encodings of indices. This is necessary for
533the security of our protocols.
534
535We shall frequently abuse notation by omitting the encoding and decoding
536functions where it is obvious that they are required.
32609df3 537\fi
a6e375a6 538
32609df3
MW
539\ifshort\else
540\prelimsec{Games, adversaries, and oracles}
a6e375a6
MW
541\label{sec:games}
542
543Many of the security definitions and results given here make use of
544\emph{games}, played with an \emph{adversary}. An adversary is a
545probabilistic algorithm. In some games, the adversary is additionally
546equipped with \emph{oracles}, which perform computations with values chosen
547by the adversary and secrets chosen by the game but not revealed to the
548adversary. We impose limits on the adversary's resource usage: in
549particular, the total time it takes, and the number of queries it makes to
550its various oracles. Throughout, we include the size of the adversary's
551program as part of its `time', in order to model adversaries which contain
552large precomputed tables.
553
554The games provide models of someone trying to attack a construction or
555protocol. For security, we will either define a notion of `winning' the
556game, and require that all adversaries have only a very small probability of
557winning, or we consider two different games and require that no adversary can
32609df3 558distinguish between the two except with very small probability.
a6e375a6
MW
559
560Our proofs make frequent use of sequences of games; see
081da1b1
MW
561\cite{bellare-rogaway-2006:triple-enc,shoup-2004:sequences-of-games}.
562The presentation owes much to Shoup \cite{shoup-2004:sequences-of-games}. We
563begin with a game $\G0$ based directly on a relevant security definition, and
564construct a sequence of games $\G1$, $\G2$, \dots, each slightly different
565from the last. We define all of the games in a sequence over the same
566underlying probability space -- the random coins tossed by the algorithms
567involved -- though different games may have slightly differently-defined
568events and random variables. Our goal in doing this is to bound the
569probability of the adversary winning the initial game $\G0$ by simultaneously
570(a) relating the probability of this event to that of corresponding events in
571subsequent games, and (b) simplifying the game until the probability of the
572corresponding event can be computed directly.
573
574The following simple lemma from \cite{shoup-2001:oaep-reconsidered} will be
575frequently useful.
a6e375a6
MW
576\begin{lemma}[Difference Lemma]
577 \label{lem:shoup}
32609df3
MW
578 Let $S$, $T$, $F$ be events. Suppose $\Pr[S \mid \bar F] =
579 \Pr[T \mid \bar F]$. Then $|\Pr[S] - \Pr[T]| \le \Pr[F]$.
a6e375a6
MW
580\end{lemma}
581\begin{proof}
582 A simple calculation:
583 \begin{eqnarray*}[rl]
584 |\Pr[S] - \Pr[T]|
32609df3
MW
585 & = |(\Pr[S \mid F]\Pr[F] + \Pr[S \mid \bar F]\Pr[\bar F]) -
586 (\Pr[T \mid F]\Pr[F] + \Pr[T \mid \bar F]\Pr[\bar F])| \\
587 & = \Pr[F] \cdot |\Pr[S \mid F] - \Pr[T \mid F]| \\
a6e375a6
MW
588 & \le \Pr[F]
589 \end{eqnarray*}
590 and we're done!
591\end{proof}
32609df3 592\fi
a6e375a6
MW
593
594
32609df3 595\prelimsec{The random oracle model}
a6e375a6
MW
596\label{sec:ro}
597
32609df3 598\ifshort\else
a6e375a6 599In particular, most of our results will make use of the \emph{random oracle}
081da1b1
MW
600model \cite{bellare-rogaway-1993:random-oracles}, in which all the
601participants, including the adversary, have access to a number of `random
602oracles'. A random oracle with domain $D$ and range $R$ is an oracle which
603computes a function chosen uniformly at random from the set of all such
604functions. (In the original paper
605\cite{bellare-rogaway-1993:random-oracles}, random oracles are considered
606having domain $\Bin^*$ and range $\Bin^\omega$; we use finite random oracles
607here, because they're easier to work with.)
a6e375a6
MW
608
609Given a protocol proven secure in the random oracle model, we can instantiate
610each random oracle by a supposedly-secure hash function and be fairly
611confident that either our protocol will be similarly secure, or one of the
612hash functions we chose has some unfortunate property.
613
614Proofs in the random oracle must be interpreted carefully. For example,
081da1b1
MW
615Canetti, Goldreich and Halevi \cite{canetti-2004:rand-oracle-revisit} show
616that there are schemes which can be proven secure in the random oracle model
617but provably have no secure instantiation in the standard model.
32609df3 618\fi
a6e375a6 619
081da1b1
MW
620The random oracle model \ifshort\cite{bellare-rogaway-1993:random-oracles}
621\fi is useful for constructing reductions and simulators for two main
622reasons.
a6e375a6
MW
623\begin{enumerate}
624\item One can use the transcript of an adversary's queries to random oracles
625 in order to extract knowledge from it.
626\item One can `program' a random oracle so as to avoid being bound by prior
627 `commitments', or to guide an adversary towards solving a selected instance
628 of some problem.
629\end{enumerate}
630Our proofs only make use of the first feature. This becomes particularly
631important when we consider issues of zero-knowledge and deniability in a
632concurrent setting, because we want to be able to claim that we retain these
633features when the random oracle is instantiated using a cryptographic hash
634function, and hash functions definitely aren't `programmable' in this way!
635The former property seems rather more defensible -- one would indeed hope
636that the only sensible way of working out (anything about) the hash of a
637particular string is to actually compute the hash function, and the random
638oracle model is, we hope, just giving us a `hook' into this process.
639
32609df3 640\ifshort\else
a6e375a6
MW
641(Our protocols can be modified to make use of bilinear pairings so as to
642provide identity-based identification and key-exchange, using the techniques
081da1b1
MW
643of \cite{boneh-franklin-2003:ibe-weil-pairing}. Proving the security of the
644modifications we discuss would involve `programming' random oracles, but this
645doesn't affect the zero-knowledge or deniability of the resulting protocols.)
32609df3 646\fi
a6e375a6
MW
647
648
32609df3
MW
649\ifshort\else
650\prelimsec{Notation for algorithms}
a6e375a6
MW
651
652We shall have occasion to describe algorithms by means of a pseudocode. Our
653choice of pseudocode is unlikely to be particularly controversial. We let $x
654\gets y$ denote the action of setting $x$ to the value $y$; similarly, $x
655\getsr Y$ denotes the action of sampling $x$ from the set $Y$ uniformly at
656random.
657
658The expression $a \gets A^{O(\cdot, x)}(y)$ means `assign to $a$ the value
659output by algorithm $A$ on input $y$, and with oracle access to the algorithm
660which, given input $z$, computes $O(z, x)$'.
661
662We make use of conditional (\IF-\ELSE) and looping (\FOR-\DO and \WHILE-\DO)
663constructions; in order to reduce the amount of space taken up, the bodies of
664such constructions are shown by indentation only.
665
666We don't declare the types of our variables explicitly, assuming that these
667will be obvious by inspection; also, we don't describe our variables' scopes
668explicitly, leaving the reader to determine these from context.
669
670Finally, the notation $\Pr[\textit{algorithm} : \textit{condition}]$ denotes
671the probability that \textit{condition} is true after running the given
672\textit{algorithm}.
32609df3 673\fi
a6e375a6 674
32609df3 675\prelimsec{Diffie-Hellman problems}
a6e375a6
MW
676\label{sec:dhp}
677
678The security of our protocols is related to the hardness of the
679computational, decisional, and gap Diffie-Hellman problems in the group $G$.
680We define these problems and what it means for them to be `hard' here.
681
682The \emph{computational} Diffie-Hellman problem (CDH) is as follows: given
683two group elements $X = x P$ and $Y = y P$, find $Z = x y P$.
32609df3 684\ifshort\else
a6e375a6
MW
685\begin{definition}[The computational Diffie-Hellman problem]
686 Let $(G, +)$ be a cyclic group generated by $P$. For any adversary $A$, we
687 say that $A$'s \emph{success probability} at solving the computational
688 Diffie-Hellman problem in $G$ is
689 \[ \Succ{cdh}{G}(A) =
32609df3 690 \Pr[ x \getsr I; y \getsr \Z/\#G\Z : A(x P, y P) = x y P ]
a6e375a6
MW
691 \]
692 where the probability is taken over the random choices of $x$ and $y$ and
693 any random decisions made by $A$. We say that the \emph{CDH insecurity
694 function} of $G$ is
695 \[ \InSec{cdh}(G; t) = \max_A \Succ{cdh}{G}(A) \]
696 where the maximum is taken over adversaries which complete in time $t$.
697\end{definition}
698Certainly, if one can compute discrete logarithms in the group $G$ (i.e.,
699given $x P$, find $x$), then one can solve the computational Diffie-Hellman
081da1b1
MW
700problem. The converse is not clear, though. Shoup
701\cite{shoup-1997:dh-lower-bounds} gives us some confidence in the difficulty
702of the problem by showing that a \emph{generic} adversary -- i.e., one which
703makes no use of the specific structure of a group -- has success probability
704no greater than $q^2/\#G$.
a6e375a6
MW
705
706This isn't quite sufficient for our purposes. Our proofs will be able to
707come up with (possibly) a large number of guesses for the correct answer, and
708at most one of them will be correct. Unfortunately, working out which one is
709right seems, in general, to be difficult. This is the \emph{decision}
081da1b1
MW
710Diffie-Hellman problem (DDH), which \cite{shoup-1997:dh-lower-bounds} shows,
711in the generic group model, is about as hard as CDH. (See
712\cite{boneh-1998:ddh} for a survey of the decision Diffie-Hellman problem.)
32609df3 713\par\fi
a6e375a6
MW
714Our reference problem will be a `multiple-guess computational Diffie-Hellman
715problem' (MCDH), which is captured by a game as follows. An adversary is
716given a pair of group elements $(x P, y P)$, and an oracle $V(\cdot)$ which
717accepts group elements as input. The adversary wins the game if it queries
718$V(x y P)$.
a6e375a6
MW
719
720\begin{figure}
721 \begin{program}
722 $\Game{mcdh}{G}(A)$: \+ \\
723 $w \gets 0$; \\
32609df3 724 $x \getsr \Z/\#G\Z$; $y \getsr \Z/\#G\Z$; \\
a6e375a6
MW
725 $A^{V(\cdot)}(x P, y P)$; \\
726 \RETURN $w$;
727 \next
728 Function $V(Z)$: \+ \\
729 \IF $Z = x y P$ \THEN \\ \ind
730 $w \gets 1$; \\
731 \RETURN $1$; \- \\
732 \RETURN $0$;
733 \end{program}
734
735 \caption{The multiple-guess computational Diffie-Hellman problem:
736 $\Game{mcdh}{G}(A)$}
737 \label{fig:mcdh}
738\end{figure}
739
32609df3
MW
740\begin{definition}[The multiple-guess computational Diffie-Hellman problem]
741 \label{def:mcdh}
742 Let $(G, +)$ be a cyclic group generated by $P$. For some adversary $A$,
743 we say that $A$'s \emph{success probability} at solving the multiple-guess
744 computational Diffie-Hellman problem in $G$ is
745 \[ \Succ{mcdh}{G}(A) = \Pr[\Game{mcdh}{G}(A) = 1] \]
746 where $\Game{mcdh}{G}(A)$ is shown in figure~\ref{fig:mcdh}. We say that
747 the \emph{MCDH insecurity function of $G$} is
748 \[ \InSec{mcdh}(G; t, q_V) = \max_A \Succ{mcdh}{G}(A) \]
749 where the maximum is taken over adversaries which complete in time $t$ and
750 make at most $q_V$-oracle queries.
751\end{definition}
752\ifshort
753We can (loosely) relate the difficulty of MCDH to the difficulty of
754the standard CDH problem, in which the adversary is allowed only a single
755guess.
756\else
a6e375a6
MW
757Note that our MCDH problem is not quite the `gap Diffie-Hellman problem'
758(GDH). The gap problem measures the intractibility of solving CDH even with
759the assistance of an oracle for solving (restricted) decision Diffie-Hellman
760problems in the group. Specifically, the adversary is given $(X, Y) = (x P,
761y P)$ and tries to find $Z = x y P$, as for CDH, but now it has access to an
762oracle $D(R, S)$ which answers $1$ if $S = x R$ and $0$ otherwise.
763
764Clearly MCDH is at least as hard as GDH, since our simple verification oracle
765$V(Z)$ can be simulated with the gap problem's DDH oracle, as $D(Y, Z)$.
766However, we can (loosely) relate the difficulty of MCDH to the difficulty of
767CDH.
32609df3 768\fi
a6e375a6
MW
769\begin{proposition}[Comparison of MCDH and CDH security]
770 For any cyclic group $(G, +)$,
32609df3
MW
771 \[ \InSec{mcdh}(G; t, q_V) \le
772 \ifshort q_V\,\InSec{mcdh}(G; t + O(q_V), 1) \else
773 q_V\,\InSec{cdh}(G; t + O(q_V)) \fi.
774 \]
a6e375a6 775\end{proposition}
32609df3
MW
776\begin{longproof}{The proof of this proposition may be found in the full
777 version of this paper.}
a6e375a6
MW
778 Let $A$ be an adversary attacking the multiple-guess computational
779 Diffie-Hellman problem in $G$, and suppose that it runs in time $t$ and
780 issues $q_V$ queries to its verification oracle.
781
782 We use a sequence of games. Game $\G0$ is the original MCDH attack game.
783 In each game $\G{i}$, we let the event $S_i$ be the probability that the
784 adversary wins the game.
785
786 Game $\G1$ is the same as $\G0$, except that we change the behaviour of the
787 verification oracle. Specifically, we make the oracle always return $0$.
788 We claim that this doesn't affect the adversary's probability of winning,
789 i.e., $\Pr[S_1] = \Pr[S_0]$. To see this, note that if none of the
790 adversary's $V(\cdot)$ queries was correct, then there is no change in the
791 game; conversely, if any query was correct, then the adversary will have
792 won regardless of its subsequent behaviour (which may differ arbitrarily
793 between the two games).
794
795 We are now ready to construct from $A$ an adversary $B$ attacking the
796 standard computational Diffie-Hellman problem.
797 \begin{program}
798 Adversary $B(X, Y)$: \+ \\
799 $n \gets 0$; \\
800 \FOR $i \in \Nupto{q_V}$ \DO $Q_i \gets 0$; \\
801 $A^{V(\cdot)}$; \\
802 $r \getsr \Nupto{n}$; \\
803 \RETURN $Q_r$;
804 \next
805 Function $D(Z')$: \+ \\
806 $Q_n \gets Z'$; \\
807 $n \gets n + 1$; \\
808 \RETURN $0$;
809 \end{program}
810 Observe that $B$ provides $A$ with an accurate simulation of game $\G1$.
32609df3
MW
811 Moreover, at the end of the algorithm, we have $0 < n \le q_V$, and the
812 values $Q_0$, $Q_1$, \dots, $Q_{n-1}$ are the values of $A$'s oracle
813 queries. Hence, with probability $Pr[S_1]$, at least of one of the $Q_i$
814 is the correct answer to the CDH problem. Let $\epsilon = \Pr[S_1] =
815 \Pr[S_0]$; we claim that $B$'s probability of success is at least
816 $\epsilon/q_V$. The proposition follows directly from this claim and that,
817 because $A$ was chosen arbitrarily, we can maximize and count resources.
a6e375a6
MW
818
819 We now prove the above claim. For $0 \le i < q_V$, let $W_i$ be the
820 event that $Q_i = x y P$, i.e., that $Q_i$ is the correct response. A
821 simple union bound shows that
822 \[ \sum_{0\le i<j} \Pr[W_i \mid n = j] \ge \epsilon. \]
823 We now perform a calculation:
824 \begin{eqnarray*}[rl]
825 \Succ{cdh}{G}(B)
826 & = \sum_{0\le i<q_V} \Pr[W_i \land r = i] \\
827 & = \sum_{0<j\le q_V} \Pr[n = j]
828 \biggl( \sum_{0\le i<j} \Pr[W_i \land r = i \mid n = j] \biggr) \\
829 & = \sum_{0<j\le q_V} \Pr[n = j]
830 \biggl( \frac{1}{j} \sum_{0\le i<j} \Pr[W_i \mid n = j] \biggr) \\
831 &\ge \sum_{0<j\le q_V} \Pr[n = j] \frac{\epsilon}{j} \\
832 &\ge \frac{\epsilon}{q_V} \sum_{0<j\le q_V} \Pr[n = j] \\
833 & = \frac{\epsilon}{q_V}.
834 \end{eqnarray*}
835 which completes the proof.
32609df3 836\end{longproof}
a6e375a6 837
32609df3
MW
838\ifshort\else
839\prelimsec{Example groups and encodings}
a6e375a6
MW
840
841For nonnegative integers $0 \le n < 2^\ell$, there is a natural binary
842encoding $N_\ell\colon \Nupto{2^\ell} \to \Bin^\ell$ which we can define
843recursively as follows.
844\[ N_0(0) = \emptystring \qquad
845 N_\ell(n) = \begin{cases}
846 N_{\ell-1}(n) \cat 0 & if $0 \le n < 2^{\ell-1}$ \\
847 N_{\ell-1}(n - 2^{\ell-1}) \cat 1 & if $2^{\ell-1} \le n < 2^\ell$.
848 \end{cases}
849\]
850Given an encoding $a = N_\ell(n)$ we can recover $n$ as
851\[ n = \sum_{0\le i<\ell} a[i] 2^i. \]
852Hence, given some limit $L \le 2^\ell$, we can encode elements of $\Nupto{L}$
853using the functions $(e, d)$:
854\[ e(L, \ell, n) = N_\ell(n) \qquad
855 d(L, \ell, a) = \begin{cases}
856 N_\ell(a) & if $N_\ell(a) < L$ \\
857 \bot & otherwise
858 \end{cases}
859\]
860The reader can verify that the functions $e(L, \ell, \cdot)$ and $d(L, \ell,
861\cdot)$ satisfy the requirements of section~\ref{sec:bitenc}.
862
32609df3
MW
863Given some $q$ with $q < 2^{\ell_I}$, then, we can define an encoding
864$(e_\F, d_\F)$ by $e_\F(n) = e(q, \ell_I, n)$ and $d_\F(a) = d(q, \ell_I,
865a)$.
866
867Let $p$ and $q$ be primes, with $q \mid (p - 1)$. Then there is an order-$q$
868subgroup of $\F_p^*$. In practice, an order-$q$ element can be found easily
869by taking elements $h \in \F_p^*$ at random and computing $g = h^{(p-1)/2}$
870until $g \ne 1$; then $G = \langle g \rangle$ is a group of $q$ elements.
871Assuming that $p$ and $q$ are sufficiently large, the Diffie-Hellman problems
872seem to be difficult in $G$. Some texts recommend additional restrictions on
873$p$, in particular that $(p - 1)/2q$ be either prime or the product of large
874primes. Primes of this form protect against small-subgroup attacks; but our
875protocols are naturally immune to these attacks, so such precautions are
876unnecessary here. Elements of $G$ can be encoded readily, since each element
877$n + p\Z$ of $\F_p = \Z/p\Z$ has an obvious `representative' integer $n$ such
878that $0 \le n < p$, and given $2^{\ell_G} > p$, we can encode $n$ as $e(p,
879\ell_G, n)$, as above.
a6e375a6
MW
880
881Alternatively, let $\F = \gf{p^f}$ be a finite field, and $E$ be an elliptic
882curve defined over $\F$ such that the group $E(\F)$ of $\F$-rational points
883of $E$ has a prime-order cyclic subgroup $G$. Elements of $G$ can be
884represented as pairs of elements of $\F$. If $f = 1$, i.e., $\F = \Z/p\Z$
885then field elements can be encoded as above. If $p = 2$, we can represent
886the field as $\F_2/(p(x))$ for some irreducible polynomial $p(x) \in \F_2[x]$
887of degree $f$. An element $r \in \F$ can then be represented by a polynomial
888$r(x)$ with degree less than $f$, and coefficients $c_i \in \{0, 1\}$, i.e.,
889\[ r(x) = \sum_{0\le i<f} c_i x^i \]
890and hence we can uniquely encode $r$ as an $f$-bit string $a$ such that $a[i]
891= c_i$.
32609df3 892\fi
a6e375a6
MW
893
894
32609df3 895\prelimsec{Symmetric encryption}
a6e375a6
MW
896\label{sec:sym-enc}
897
898Our key-exchange protocol requires a symmetric encryption scheme. Our
899definition is fairly standard, except that, rather than specifying a
900key-generation algorithm, we assume that key generation simply involves
901selecting a string of a given length uniformly at random.
902\begin{definition}[Symmetric encryption schemes]
903 A \emph{symmetric encryption scheme} $\E = (\kappa, E, D)$ consists of:
904 \begin{itemize}
905 \item an integer $\kappa \ge 0$,
906 \item a randomized \emph{encryption algorithm} $E$ which, on input $K \in
907 \Bin^\kappa$ and $p \in \Bin^*$ outputs some $c \in \Bin^*$, written $c
908 \gets E_K(p)$;
909 \item a \emph{decryption algorithm} $D$ which, on input $K \in \Bin^\kappa$
910 and $c \in \Bin^*$ outputs some $p' \in \Bin^* \cup \{\bot\}$, written
911 $p' \gets D_K(c)$.
912 \end{itemize}
913 Furthermore, a symmetric encryption scheme must be \emph{sound}: that is,
914 if $c \gets E_K(p)$ for some $K \in \Bin^\kappa$ and $p \in \Bin^*$, and
915 $p' \gets D_K(c)$ then $p = p'$.
916\end{definition}
917Our security notion for symmetric encryption is the standard notion of
918left-or-right indistinguishability of ciphertexts under chosen-ciphertext
919attack.
32609df3 920\begin{definition}[IND-CCA]
dff0fad2 921 \label{def:ind-cca}
a6e375a6
MW
922 Let $\E = (\kappa, E, D)$ be a symmetric encryption scheme, and $A$ be an
923 adversary. Let $\id{lr}_b(x_0, x_1) = x_b$ for $b \in \{0, 1\}$. Let
924 \[ P_b =
dff0fad2
MW
925 \Pr[K \getsr \Bin^\kappa;
926 b \gets A^{E_K(\id{lr}_b(\cdot, \cdot)), D_K(\cdot)}() :
927 b = 1]
a6e375a6
MW
928 \]
929 An adversary is \emph{valid} if
930 \begin{itemize}
931 \item for any query to its encryption oracle $E_K(\id{lr}_b(x_0, x_1))$ we
932 have $|x_0| = |x_1|$, and
933 \item no query to the decryption oracle $D_K(\cdot)$ is equal to any reply
934 from an encryption query.
935 \end{itemize}
936 If $A$ is valid, then we define its \emph{advantage} in attacking the
937 security of $\E$ as follows
938 \[ \Adv{ind-cca}{\E} = P_1 - P_0. \]
939 Further, we define the \emph{IND-CCA insecurity function of $\E$} to be
940 \[ \InSec{ind-cca}(\E; t, q_E, q_D) = \max_A \Adv{ind-cca}{\E}(A) \]
941 where the maximum is taken over all valid adversaries $A$ which run in time
942 $t$, and issue at most $q_E$ encryption and $q_D$ decryption queries.
943\end{definition}
944
945
946\subsection{Simulations}
947\label{sec:sim}
948
949In section~\ref{sec:zk-ident}, we shall prove that our identification
950protocol is zero-knowledge; in section~\ref{sec:denial}, we show that our
951key-exchange protocol is deniable. In both of these proofs, we shall need to
32609df3
MW
952demonstrate \emph{simulatability}.
953
954\ifshort
955
956We consider an adversary~$A$ interacting with a `world'~$W$; we model both as
957probabilistic algorithms. Both $A$ and~$W$ are given a common input~$c$; the
958world is additionally given a private input~$w$; these are chosen by a
959randomized initialization function $I$. The adversary is additionally given
960an auxiliary input~$u$ computed from $w$ by a randomized algorithm~$U$. All
961these algorithms -- the adversary and the world, but also the initialization
962and auxiliary-input algorithms $I$ and~$U$ -- have access to a number of
963random oracles $\mathcal{H} = (H_0, H_1, \ldots, H_{n-1})$. The adversary
964eventually decides to stop interacting, and produces an output~$a$.
965
966A \emph{simulator} for $A$'s interaction with $W$ is an algorithm $S^A$ which
967attempts to produce a similar output distribution, but without interacting
968with $W$. The simulator is given the same inputs $(c, u)$ as we gave
969to~$A$, and $S$ is also allowed to query the random oracles~$\mathcal{H}$.
970
971To measure the effectiveness of a simulator, we consider a distinguisher~$D$
972which is given $(c, u, a)$, and access to $\mathcal{H}$, and returns a bit
973$b$ representing its verdict as to whether the output $a$ was produced by the
974adversary or the simulator.
975
976\else
a6e375a6
MW
977
978\subsubsection{General framework}
979Consider a game in which an adversary~$A$ interacts with some `world'~$W$,
980which we shall represent as a probabilistic algorithm. The world may in fact
981represent a number of honest parties communicating in a concurrent fashion,
982but we can consider them as a single algorithm for our present purposes.
983
984Initially the world and the adversary are both given the same \emph{common
985input}~$c$; in addition, the world is given a \emph{private input}~$w$.
986Both $c$ and~$w$ are computed by an \emph{initialization function}~$I$, which
987is considered to be part of the definition of the game. Finally, the
988adversary decides somehow that it has finished interacting, and outputs a
989value~$a$. All this we notate as
990\[ (w, c) \gets I(); a \gets A^{W(w, c)}(c). \]
991This game is \emph{simulatable} if there is an algorithm~$S$ -- the
992\emph{simulator} -- which can compute the same things as~$A$, but all by
993itself without interacting with the world. That is, we run the simulator on
994the common input~$c$, allowing it to interact in some way with the
995adversary~$A$, and finally giving us an output~$s$.
996\[ (w, c) \gets I(); s \gets S^A(c). \]
997We shall say that the simulator is \emph{effective} if it's difficult to tell
998whether a given string was output by the adversary after interacting with the
999world, or by the simulator running by itself. That is, for any algorithm~$D$
1000-- a \emph{distinguisher} -- running in some bounded amount of time, its
1001advantage
1002\begin{spliteqn*}
1003 \Pr[(w, c) \gets I(); a \gets A^{W(w, c)}(c);
1004 b \gets D(c, a) : b = 1] - {} \\
1005 \Pr[(w, c) \gets I(); s \gets S^A(c); b \gets D(c, s) : b = 1]
1006\end{spliteqn*}
1007is small. (Note that we gave the distinguisher the common input as well as
1008the output of the adversary or the simulator.)
1009
1010It's usual to study \emph{transcripts} of interactions in these kinds of
1011settings. We are considering arbitrary adversarial outputs here, so this
1012certainly includes adversaries which output a transcript of their
1013interactions. Indeed, for any adversary~$A$, we could construct an
1014adversary~$A_T$ which performs the same computation, and outputs the same
1015result, but also includes a complete transcript of $A$'s interaction with the
1016world. Therefore we're just providing additional generality.
1017
1018\subsubsection{Random oracles}
1019We shall be considering interactions in which all the parties have access to
1020several random oracles. We could simply say that the random oracles are part
1021of the world~$W$. In the setting described above, only the adversary
1022actually interacts with the world (and therefore would be able to query
1023random oracles). The simulator would be forced to `make up' its own random
1024oracle, and the distinguisher would have to study the distributions of the
1025random-oracle queries and their responses to make up its mind about which it
1026was given.
1027
1028However, this would be a poor model for the real world, since once we
1029instantiate the random oracle with a hash function, we know that everyone
1030would in actually be able to compute the hash function for themselves. Thus
1031a distinguisher in the real world would be able to tell the difference
1032immediately between a real interaction and the simulated transcript, since
1033the `random oracle' queries recorded in the latter would be wrong!
1034
1035Therefore we decide not to include the random oracles as part of the world,
1036but instead allow all the participants -- adversary, simulator and
1037distinguisher -- access to them. If we denote by~$\mathcal{H} = (H_0, H_1,
1038\ldots, H_{n-1})$ the collection of random oracles under consideration, the
1039expression for the distinguisher's advantage becomes
1040\begin{spliteqn*}
1041 \Pr[(w, c) \gets I(); a \gets A^{W(w, c), \mathcal{H}}(c);
1042 b \gets D^{\mathcal{H}}(c, a) : b = 1] - {} \\
1043 \Pr[(w, c) \gets I(); s \gets S^{A, \mathcal{H}}(c);
1044 b \gets D^{\mathcal{H}}(c, s) : b = 1].
1045\end{spliteqn*}
1046
1047\subsubsection{Auxiliary inputs}
1048If an adversary's output can be effectively simulated, then we can
1049confidently state that the adversary `learnt' very little of substance from
1050its interaction, and certainly very little it can \emph{prove} to anyone
1051else. However, as we have described the setting so far, we fix an adversary
1052before we choose inputs to the world, so our model says little about what an
1053adversary which has already acquired some knowledge might learn beyond that.
1054For example, an adversary might overhear some other conversation between
1055honest parties and be able to use this to its advantage.
1056
1057To this end, we give the adversary an \emph{auxiliary input}~$u$, computed by
1058an algorithm~$U$. We give $U$ both $c$ and $w$, in order to allow the
1059adversary to gain some (possibly partial) knowledge of the secrets of the
1060other parties. We also allow $U$ access to the random oracles~$\mathcal{H}$,
1061because clearly in the `real world' it would be ridiculous to forbid such an
1062algorithm from computing a publicly-known hash function.
1063
1064The simulator and distinguisher are also given the auxiliary input. The
1065simulator is meant to represent the adversary's ability to compute things on
1066its own, without interacting with the world, and since the adversary is given
1067the auxiliary input, the simulator must be too. The distinguisher must be
32609df3
MW
1068given the auxiliary input because otherwise the simulator could just `make
1069up' plausible-looking inputs.
1070
1071\fi
1072
1073\ifshort
1074Because we're interested in a concrete, quantitative analysis, we must
1075constrain the resource usage of the various algorithms described above.
1076Specifically, we shall be interested in
1077\else
a6e375a6
MW
1078
1079\subsubsection{Resource limits}
1080We shall not allow our algorithms to perform completely arbitrary
1081computations and interactions. Instead, we impose limits on the amount of
1082time they are allowed to take, the number of random-oracle queries they make,
1083and so on. Specifically, we are interested in
32609df3 1084\fi
a6e375a6
MW
1085\begin{itemize}
1086\item the time $t_A$ taken by the adversary and $t_D$ taken by the
1087 distinguisher,
1088\item the number of oracle queries $\mathcal{Q}_A = (q_{A,0}, q_{A,1},
1089 \ldots, q_{A,n-1})$ made by the adversary, and $\mathcal{Q}_D$ made by the
1090 distinguisher,
1091\item a number of resource bounds $\mathcal{R}$ on the adversary's
1092 interaction with the world (e.g., number of messages of various kinds sent
1093 and received), and
1094\item a number of bounds $\mathcal{U}$ on the contents of the adversary's
1095 auxiliary input~$u$.
1096\end{itemize}
1097Sometimes we shall not be interested in proving simulatability of adversaries
1098with auxiliary inputs. We write $\mathcal{U} = 0$ to indicate that auxiliary
32609df3
MW
1099input is not allowed.
1100
1101\ifshort\else
a6e375a6
MW
1102
1103\subsubsection{World syntax}
1104It will be worth our while being more precise about what a `world' actually
1105is, syntactically. We define a world to be a single, randomized algorithm
1106taking inputs $(\iota, \sigma, \tau, \mu) \in (\Bin^*)^4$; the algorithm's
1107output is a pair $(\sigma', \rho) \in (\Bin^*)^2$. We show how the
1108adversary's interaction is mapped on to this world algorithm in
1109figure~\ref{fig:sim-world}.
1110\begin{itemize}
1111\item The `input' $\iota$ is the result of the initialization function~$I$.
1112 That is, it is the pair $(w, c)$ of the world's private input and the
1113 common input.
1114\item The `state' $\sigma$ is empty on the world's first invocation; on each
1115 subsequent call, the value of the world's output $\sigma'$ is passed back.
1116 In this way, the world can maintain state.
32609df3 1117\item The `type $\tau$ is a token giving the type of invocation this is.
a6e375a6
MW
1118\item The `message' $\mu$ is any other information passed in; its form will
1119 typically depend on the type~$\tau$ of the invocation.
1120\item The `new state' $\sigma'$ is the value of $\sigma$ to pass to the next
1121 invocation of the world.
1122\item The `reply $\rho$ is the actual output of the invocation.
1123\end{itemize}
1124There are two special invocation types. The adversary is \emph{forbidden}
1125from making special invocations.
1126\begin{itemize}
1127\item The special invocation type $\cookie{init}$ is used to allow the world to
1128 prepare an initial state. The world is invoked as
1129 \[ W^{\mathcal{H}}(\iota, \emptystring, \cookie{init}, \emptystring) \]
1130 and should output an initial state $\sigma'$. The world's reply $\rho$ is
1131 ignored. (Recall that $\emptystring$ represents the empty string.)
1132\item The special invocation type $\cookie{random}$ is used to inform the
1133 world that the adversary has issued a random oracle query. The world is
1134 invoked as
1135 \[ W^{\mathcal{H}}(\iota, \sigma, \cookie{random}, (i, x, h)) \]
1136 to indicate that the adversary has queried its random oracle $H_i(\cdot)$
1137 on the input $x$, giving output~$h$. The world may output an updated state
1138 $\sigma'$; its reply $\rho$ is ignored.
1139\end{itemize}
1140The latter special query is a technical device used to allow the `fake-world'
1141simulators we define below to be aware of the adversary's random oracle
1142queries without being able to `program' the random oracle. Including it here
1143does little harm, and simplifies the overall exposition.
1144
1145\begin{figure}
1146 \begin{program}
1147 Interaction $A^{W(w, c), \mathcal{H}}(c, u)$: \+ \\
1148 $(\sigma, \rho) \gets
1149 W((w, c), \emptystring, \cookie{init}, \emptystring)$; \\
1150 $a \gets A^{\id{world}(\cdot, \cdot),
1151 \id{random}(\cdot, \cdot)}(c, u)$; \\
1152 \RETURN $a$;
1153 \newline
1154 Function $\id{world}(\tau, \mu)$: \+ \\
1155 \IF $\tau \in \{\cookie{init}, \cookie{random}\}$ \THEN
1156 \RETURN $\bot$; \\
1157 $(\sigma, \rho) \gets W((w, c), \sigma, \tau, \mu)$; \\
1158 \RETURN $\rho$; \-
1159 \next
1160 Function $\id{random}(i, x)$: \+ \\
1161 $h \gets H_i(x)$; \\
1162 $(\sigma, \rho) \gets
1163 W((w, c), \sigma, \cookie{random}, (i, x, h))$; \\
1164 \RETURN $h$;
1165 \end{program}
1166
1167 \caption{Interacting with a world: Interaction $A^{W, \mathcal{H}}$}
1168 \label{fig:sim-world}
1169\end{figure}
1170
1171\subsubsection{Definitions}
1172We are now ready to begin making definitions.
32609df3
MW
1173\fi
1174
a6e375a6
MW
1175\begin{definition}[Simulation security]
1176 \label{def:sim}
1177 Consider the game described above, with the initialization function~$I$,
1178 and the world~$W$: let $A$ be an adversary, and let~$U$ be an
1179 auxiliary-input function; let $S$ be a simulator, and let $D$ be a
1180 distinguisher. We define $D$'s \emph{advantage against $S$'s simulation of
1181 $A$'s interaction with~$W$ with auxiliary inputs provided by~$U$} to be
1182 \[ \Adv{sim}{W, I, S}(A, U, D) =
1183 \Pr[\Game{real}{W, I, S}(A, U, D) = 1] -
1184 \Pr[\Game{sim}{W, I, S}(A, U, D)= 1]
1185 \]
1186 where the games are as shown in figure~\ref{fig:sim}.
1187 Furthermore, we define the \emph{simulator's insecurity function} to be
1188 \[ \InSec{sim}(W, I, S;
1189 t_D, t_A, \mathcal{Q}_D, \mathcal{Q}_A, \mathcal{R}, \mathcal{U}) =
1190 \max_{D, A, U} \Adv{sim}{W, I, S}(A, U, D)
1191 \]
1192 where the maximum is taken over all distinguishers~$D$ running in
1193 time~$t_D$ and making at most $\mathcal{Q}_D$ random-oracle queries, and
1194 all adversaries~$A$ running in time~$t_A$, making at most $\mathcal{Q}_A$
1195 random-oracle queries, not exceeding the other stated resource
1196 bounds~$\mathcal{R}$ on its interaction with~$W$, and auxiliary-input
1197 functions producing output not exceeding the stated bounds~$\mathcal{U}$.
1198\end{definition}
1199\begin{remark}
1200 The usual definitions of zero-knowledge, for example, require the simulator
1201 to work for all choices of inputs (common, private and auxiliary), rather
1202 than for random choices. Our definition therefore looks weaker. Our proof
1203 of zero-knowledge actually carries through to the traditional
1204 stronger-looking definition. Critically, however, the standard
1205 universal quantification over inputs fails to capture deniability in the
1206 random oracle model, since the inputs can't therefore depend on the random
1207 oracle. Our formulation therefore actually gives \emph{stronger}
32609df3 1208 deniability than the usual one.
a6e375a6
MW
1209\end{remark}
1210
1211\begin{figure}
1212 \begin{program}
1213 $\Game{real}{W, I, S}(A, U, D)$: \+ \\
1214 $(w, c) \gets I()$; \\
1215 $u \gets U^{\mathcal{H}}(w, c)$; \\
1216 $a \gets A^{W(w, c), \mathcal{H}}(c, u)$; \\
1217 $b \gets D^{\mathcal{H}}(c, u, a)$; \\
1218 \RETURN $b$;
1219 \next
1220 $\Game{sim}{W, I, S}(A, U, D)$: \+ \\
1221 $(w, c) \gets I()$; \\
1222 $u \gets U^{\mathcal{H}}(w, c)$; \\
1223 $s \gets S^{A, \mathcal{H}}(c, u)$; \\
1224 $b \gets D^{\mathcal{H}}(c, u, s)$; \\
1225 \RETURN $b$;
1226 \end{program}
1227
1228 \caption{Games for simulation: $\Game{real}{W, I}$ and $\Game{sim}{W, I}$}
1229 \label{fig:sim}
1230\end{figure}
1231
32609df3 1232\ifshort\else
a6e375a6
MW
1233\subsubsection{Fake-world simulators}
1234The simulators we shall be considering in the present paper are of a specific
1235type which we call `fake-world simulators'. They work by running the
1236adversary in a fake `cardboard cut-out' world, and attempting to extract
1237enough information from the adversary's previous interactions and random
1238oracle queries to maintain a convincing illusion.
1239
1240That is, the behaviour of a fake-world simulator~$S$ is simply to allow the
1241adversary to interact with a `fake world'~$W'$, which was not given the world
1242private input. That is, there is some world $W'$ such that
1243\[ S^{A, \mathcal{H}}(c, u) \equiv A^{W'(u, c), \mathcal{H}}(c, u) \]
1244Fake-world simulators are convenient because they allow us to remove from
1245consideration the distinguisher~$D$ as the following definition shows.
1246\begin{definition}[Fake-world simulation security]
1247 \label{def:fakesim}
1248 Let $I$, $W$ and $U$ be as in definition~\ref{def:sim}. Let $A$ be an
1249 adversary which outputs a single bit. Let $S$ be a fake-world simulator.
1250 We define $A$'s \emph{advantage against $S$'s fake-world simulation of $W$
1251 with auxiliary inputs provided by~$U$} to be
1252 \begin{spliteqn*}
1253 \Adv{fw}{W, I, S}(A, U) =
1254 \Pr[(w, c) \gets I(); u \gets U^{\mathcal{H}}(w, c);
1255 b \gets A^{W(w, c), \mathcal{H}}(c, u) : b = 1] - {} \\
1256 \Pr[(w, c) \gets I(); u \gets U^{\mathcal{H}}(w, c);
1257 b \gets S^{A, \mathcal{H}}(c, u) : b = 1]
1258 \end{spliteqn*}
1259 Furthermore, we define the \emph{simulator's insecurity function} to be
1260 \[ \InSec{fw}(W, I, S;
1261 t_D, t, \mathcal{Q}, \mathcal{R}, \mathcal{U}) =
1262 \max_{A, U} \Adv{fw}{W, I, S}(A, U)
1263 \]
1264 where the maximum is taken over all adversaries~$A$ running in time~$t$,
1265 making at most $\mathcal{Q}$ random-oracle queries, not exceeding the other
1266 stated resource bounds~$\mathcal{R}$ on its interaction with~$W$, and
1267 auxiliary-input functions producing output not exceeding the stated
1268 bounds~$\mathcal{U}$.
1269\end{definition}
1270It remains for us to demonstrate that this is a valid way of analysing
1271simulators; the following simple proposition shows that this is indeed the
1272case.
1273\begin{proposition}[Fake-world simulation]
1274 \label{prop:fakesim}
1275 Let $I$ be an initialization function and let $W$ be a world. Then, for
1276 any fake-world simulator~$S$,
1277 \[ \InSec{sim}(W, I, S; t_D, t_A, \mathcal{Q}_D, \mathcal{Q}_A,
1278 \mathcal{R}, \mathcal{U}) \le
1279 \InSec{fw}(W, I, S; t_A + t_D, \mathcal{Q}_D + \mathcal{Q}_A,
1280 \mathcal{R}, \mathcal{U})
1281 \]
1282 (where addition of query bounds $\mathcal{Q}$ is done elementwise).
1283\end{proposition}
1284\begin{proof}
1285 Let $W$ and $I$ as in the proposition statement be given; also let a
1286 distinguisher~$D$ running in time~$t_D$ and making $\mathcal{Q}_D$
1287 random-oracle queries, an adversary~$A$ running in time~$t_A$ and making
1288 $\mathcal{Q}_A$ random-oracle queries and interacting with its world within
1289 the stated bounds~$\mathcal{R}$, an auxiliary-input function~$U$ satisfying
1290 the constraints~$\mathcal{U}$ on its output, and a fake-world simulator~$S$
1291 all be given.
1292
1293 We construct an adversary~$B$ outputting a single bit as follows
1294 \begin{program}
1295 Adversary $B^{W, \mathcal{H}}(c, u)$: \+ \\
1296 $a \gets A^{W, \mathcal{H}}(c, u)$; \\
1297 $b \gets D^{\mathcal{H}}(c, u, a)$; \\
1298 \RETURN $b$;
1299 \end{program}
1300 A glance at definitions \ref{def:sim} and~\ref{def:fakesim} and the
1301 resources used by $B$ shows that
1302 \[ \Adv{sim}{W, I, S}(A, U) = \Adv{fw}{W, I, S}(B, U)
1303 \le \InSec{fw}(W, I, S; t_D + t_A, \mathcal{Q}_D + \mathcal{Q}_A,
1304 \mathcal{R}, \mathcal{U})
1305 \]
1306 as required.
1307\end{proof}
32609df3 1308\fi
a6e375a6
MW
1309
1310%%%--------------------------------------------------------------------------
1311
1312\section{A zero-knowledge identification scheme}
1313\label{sec:zk-ident}
1314
1315
1316\subsection{Description}
1317
1318Here we present a simple zero-knowledge identification scheme. Fix some
32609df3
MW
1319group $G$ with prime order $q = \#G$. Suppose Alice chooses a private key $x
1320\inr \gf{q}$, and publishes the corresponding public key $X = x P$. Let
1321$H_I\colon G^2 \to \Bin^{\ell_I}$ be a secure hash function. Here's a simple
1322protocol which lets her prove her identity to Bob.
a6e375a6 1323\begin{enumerate}
32609df3
MW
1324\item Bob selects a random $r \inr \gf{q}$, and computes $R = r P$, $Y = r X$,
1325 and $c = r \xor H_I(R, Y)$. He sends the pair $(R, c)$ to Alice as his
1326 \emph{challenge}.
a6e375a6
MW
1327\item Alice receives $(R, c)$. She computes $Y' = x R$ and $r' = c \xor
1328 H_I(R', Y')$, and checks that $R = r' P$. If so, she sends $Y'$ as her
1329 \emph{response}; otherwise she sends $\bot$.
1330\item Bob receives $Y'$ from Alice. He checks that $Y' = Y$. If so, he
1331 accepts that he's talking to Alice; otherwise he becomes suspicious.
1332\end{enumerate}
1333We name this the Wrestlers Identification Protocol in~$G$, $\Wident^G$ (we
1334drop the superscript to refer to the protocol in general, or when no
1335ambiguity is likely to result). A summary is shown in
1336figure~\ref{fig:wident}.
1337
1338\begin{figure}
1339 \begin{description}
32609df3 1340 \item[Setup] Group $G = \langle P \rangle$; $\#G = q$ is prime.
a6e375a6 1341 $H_I(\cdot, \cdot)$ is a secure hash.
32609df3 1342 \item[Private key] $x \inr \gf{q}$.
a6e375a6 1343 \item[Public key] $X = x P$.
32609df3 1344 \item[Challenge] $(R, c)$ where $r \inr \gf{q}$, $R = r P$, $c = r \xor
a6e375a6
MW
1345 H_I(R, r X)$.
1346 \item[Response] $x R = r X$ if $R = (c \xor H_I(R, x R)) P$; otherwise
1347 $\bot$.
1348 \end{description}
1349
1350 \caption{Summary of the Wrestlers Identification Protocol, $\Wident$}
1351 \label{fig:wident}
1352\end{figure}
1353
1354
1355\subsection{Security}
1356
1357In order to evaluate the security of our protocol, we present a formal
1358description of the algorithms involved in figure~\ref{fig:wident}. Here, the
1359hash function $H_I(\cdot, \cdot)$ is modelled as a random oracle.
1360
1361\begin{figure}
1362 \begin{program}
1363 Function $\id{setup}()$: \+ \\
32609df3 1364 $x \getsr \gf{q}$; \\
a6e375a6
MW
1365 $X \gets x P$; \\
1366 \RETURN $(x, X)$;
32609df3 1367 \ifshort\newline\else\next\fi
a6e375a6 1368 Function $\id{challenge}^{H_I(\cdot, \cdot)}(R, c, X)$: \+ \\
32609df3 1369 $r \getsr \gf{q}$; \\
a6e375a6
MW
1370 $R \gets r P$; $Y \gets r X$; \\
1371 $h \gets H_I(R, Y)$; $c \gets r \xor h$; \\
1372 \RETURN $(Y, R, c)$; \- \\[\medskipamount]
1373 Function $\id{verify}(Y, Y')$: \+ \\
1374 \IF $Y' = Y$ \THEN \RETURN $1$; \\
1375 \RETURN $0$;
1376 \next
1377 Function $\id{response}^{H_I(\cdot, \cdot)}(R, c, x)$: \+ \\
1378 $Y' \gets x R$; \\
1379 $h \gets H_I(R', Y')$; $r' \gets c \xor h$; \\
1380 \IF $R \ne r' P$ \THEN \RETURN $\bot$; \\
1381 \RETURN $Y'$;
1382 \end{program}
1383
1384 \caption{Functions implementing $\Wident$ in the random oracle model}
1385 \label{fig:wident-ro}
1386\end{figure}
1387
1388\subsubsection{Completeness}
1389Suppose that Bob really is talking to Alice. Note that $Y' = x R = x (r P) =
1390r (x P) = r X = Y$. Hence $r' = c \xor H_I(R', Y') = c \xor H_I(R, Y) = r$,
1391so $r' P = r P = R$, so Alice returns $Y' = Y$ to Bob. Therefore $\Wident$
1392is \emph{complete}: if Bob really is communicating with Alice then he
1393accepts.
1394
1395\subsubsection{Soundness}
1396We next show that impersonating Alice is difficult. The natural way to prove
1397this would be to give an adversary a challenge and prove that its probability
1398of giving a correct response is very small. However, we prove a stronger
1399result: we show that if the adversary can respond correctly to any of a large
1400collection of challenges then it can solve the MCDH problem.
1401
1402Consider the game $\Game{imp}{\Wident}$ shown in
1403figure~\ref{fig:wident-sound}. An adversary's probability of successfully
1404impersonating Alice in our protocol, by correctly responding to any one of
1405$n$ challenges, is exactly its probability of winning the game (i.e., causing
1406it to return $1$).
1407
1408\begin{figure}
1409 \begin{program}
1410 $\Game{imp-$n$}{\Wident}(A)$: \+ \\
1411 $H_I \getsr \Func{G^2}{\Bin^{\ell_I}}$; \\
1412 $(x, X) \gets \id{setup}()$; \\
1413 $\id{win} \gets 0$; \\
1414 $\Xid{R}{map} \gets \emptyset$; \\
1415 $\mathbf{c} \gets \id{challenges}(n)$; \\
1416 $(R', Y') \gets A^{H_I(\cdot, \cdot), \id{check}(\cdot, \cdot)}
1417 (X, \mathbf{c})$; \\
1418 \RETURN $\id{win}$;
1419 \newline
1420 Function $\id{challenges}(n)$: \+ \\
1421 \FOR $i \in \Nupto{n}$ \DO \\ \ind
1422 $(Y, R, c) \gets \id{challenge}^{H_I(\cdot, \cdot)}$; \\
1423 $\Xid{R}{map} \gets \Xid{R}{map} \cup \{ R \mapsto Y \}$; \\
1424 $\mathbf{c}[i] \gets (R, c)$; \- \\
1425 \RETURN $\mathbf{c}$; \next
1426 Function $\id{check}(R', Y')$: \\
1427 \IF $R' \notin \dom \Xid{R}{map}$ \THEN \RETURN $0$; \\
1428 $Y \gets \Xid{R}{map}(R')$; \\
1429 \IF $\id{verify}(Y, Y')$ \THEN \\ \ind
1430 $\id{win} \gets 1$; \\
1431 \RETURN $1$; \- \\
1432 \RETURN $0$;
1433 \end{program}
1434
1435 \caption{Soundness of $\Wident$: $\Game{imp-$n$}{\Wident}(A)$}
1436 \label{fig:wident-sound}
1437\end{figure}
1438
1439\begin{theorem}[Soundness of $\Wident$]
1440 \label{thm:wident-sound}
1441 Let $A$ be any adversary running in time $t$ and making $q_I$ queries to
1442 its random oracle, and $q_V$ queries to its verification oracle. Let $G$
1443 be a cyclic group. Then
1444 \[ \Pr[\Game{imp-$n$}{\Wident^G}(A) = 1] \le
1445 \InSec{mcdh}(G; t', q_I + q_V)
1446 \]
1447 where $t' = t + O(q_I) + O(q_V)$.
1448\end{theorem}
1449\begin{remark}
1450 Note that the security bound here is \emph{independent} of the value of
1451 $n$.
1452\end{remark}
32609df3
MW
1453\begin{longproof}{The proof of this theorem can be found in the full version
1454 of the paper.}
a6e375a6
MW
1455 We prove this by defining a sequence of games $\G{i}$. The first will be
1456 the same as the attack game $\Game{imp-$n$}{\Wident}(A)$ and the others
1457 will differ from it in minor ways. In each game $\G{i}$, let $S_i$ be the
1458 event that $A$ wins the game -- i.e., that it successfully impersonates the
1459 holder of the private key~$x$.
32609df3 1460
a6e375a6
MW
1461 Let game $\G0$ be the attack game $\Game{imp}{\Wident}(A)$, and let $(R',
1462 Y')$ be the output of $A$ in the game.
1463
1464 We define a new game $\G1$ which is the same as $\G0$, except that we query
1465 the random oracle $H_I$ at $(R', Y')$ whenever the adversary queries
1466 $\id{check}(R', Y')$. (We don't charge the adversary for this.) This
1467 obviously doesn't affect the adversary's probability of winning, so
1468 $\Pr[S_1] = \Pr[S_0]$.
1469
1470 Game $\G2$ is like $\G1$, except that we change the way we generate
1471 challenges and check their responses. Specifically, we new functions
1472 $\id{challenges}_2$ and $\id{check}_2$, as shown in
1473 figure~\ref{fig:wident-sound-2}.
1474
1475 \begin{figure}
1476 \begin{program}
1477 Function $\id{challenges}_2(n)$: \+ \\
1478 $r^* \getsr I$; $R^* \gets r^* P$; $Y^* \gets r^* X$; \\
1479 \FOR $i \in \Nupto{n}$ \DO \\ \ind
1480 $r \getsr I$; $R \gets r R^*$; $Y \gets r Y^*$; \\
1481 $h \gets H_I(R, Y)$; $c \gets r \xor h$; \\
1482 $\Xid{R}{map} \gets \Xid{R}{map} \cup \{ R \mapsto r \}$; \\
1483 $\mathbf{c}[i] \gets (R, c)$; \- \\
1484 \RETURN $\mathbf{c}$;
1485 \next
1486 Function $\id{check}_2(R', Y')$: \+ \\
1487 \IF $R' \notin \dom \Xid{R}{map}$ \THEN \RETURN $0$; \\
1488 $r \gets \Xid{R}{map}(R')$; \\
1489 \IF $\id{verify}(Y^*, Y'/r)$ \THEN \\ \ind
1490 $\id{win} \gets 1$; \\
1491 \RETURN $1$; \- \\
1492 \RETURN $0$;
1493 \end{program}
1494
1495 \caption{Soundness of $\Wident$: $\id{challenges}_2$ and $\id{check}_2$}
1496 \label{fig:wident-sound-2}
1497 \end{figure}
1498
1499 While we're generating and checking challenges in a more complicated way
1500 here, we're not actually changing the distribution of the challenges, or
1501 changing the winning condition. Hence $\Pr[S_2] = \Pr[S_1]$.
1502
1503 Now we change the rules again. Let $\G3$ be the same as $\G2$ except that
1504 we change the winning condition. Instead, we say that the adversary wins
1505 if any of the queries to its random oracle $H_I(R', Y')$ would be a correct
1506 response -- i.e., $\id{check}_2(R', Y')$ would return $1$. Since we query
1507 the oracle on $(R', Y')$ on its behalf at the end of the game, no adversary
1508 can do worse in this game than it does in $\G2$, so we have $\Pr[S_3] \ge
1509 \Pr[S_2]$. (It's not hard to see that this only helps quite stupid
1510 adversaries. We can transform any adversary into one for which equality
1511 holds here.)
1512
1513 Finally, let $\G4$ be the same as $\G3$ except that we change the way we
1514 generate challenges again: rather than computing $h$ and setting $c \gets h
1515 \xor r$, we just choose $c$ at random. Specifically, we use the new
1516 function, $\id{challenges}_4$, shown in figure~\ref{fig:wident-sound-4}.
1517
1518 \begin{figure}
1519 \begin{program}
1520 Function $\id{challenges}_4(n)$: \+ \\
1521 $r^* \getsr I$; $R^* \gets r^* P$; $Y^* \gets r^* X$; \\
1522 \FOR $i \in \Nupto{n}$ \DO \\ \ind
1523 $r \getsr I$; $R \gets r R^*$; \\
1524 $c \getsr \Bin^{\ell_I}$; \\
1525 $\Xid{R}{map} \gets \Xid{R}{map} \cup \{ R \mapsto r \}$; \\
1526 $\mathbf{c}[i] \gets (R, c)$; \- \\
1527 \RETURN $\mathbf{c}$;
1528 \end{program}
1529
1530 \caption{Soundness of $\Wident$: $\id{challenges}_4$}
1531 \label{fig:wident-sound-4}
1532 \end{figure}
1533
1534 Since $H_I(\cdot, \cdot)$ is a random function, the adversary can only
1535 distinguish $\G4$ from $\G3$ if it queries its random oracle at some $(R, r
1536 Y^*)$. But if it does this, then by the rule introduced in $\G3$ it has
1537 already won. Therefore we must have $\Pr[S_4] = \Pr[S_3]$.
1538
1539 Our $\id{challenges}_4$ function is interesting, since it doesn't actually
1540 make use of $r^*$ or $Y^*$ when generating its challenges. This gives us
1541 the clue we need to bound $\Pr[S_4]$: we can use adversary $A$ to solve the
1542 multiple-guess Diffie-Hellman problem in $G$ by simulating the game $\G4$.
1543 Specifically, we define the adversary $B$ as shown in
1544 figure~\ref{fig:wident-sound-cdh}. That is, for each query $A$ makes to
1545 its random oracle at a new pair $(R', Y')$, we see whether this gives us
1546 the answer we're looking for. We have $\Pr[S_0] \le \Pr[S_4] =
1547 \Succ{mcdh}{G}(B) \le \InSec{gdh}(G; t', q_I + q_V)$ as required.
1548
1549 \begin{figure}
1550 \begin{program}
1551 Adversary $B^{V(\cdot)}(X, R^*)$: \+ \\
1552 $F \gets \emptyset$; $\Xid{R}{map} \gets \emptyset$; \\
1553 \FOR $i \in \Nupto{n}$ \DO \\ \ind
1554 $r \getsr I$; $R \gets r R^*$; $c \getsr \Bin^{\ell_I}$; \\
1555 $\Xid{R}{map} \gets \Xid{R}{map} \cup \{ R \mapsto r \}$; \\
1556 $\mathbf{c}[i] \gets (R, c)$; \- \\
1557 $(R', Y') \gets A^{H_I(\cdot, \cdot), \id{check}(\cdot, \cdot)}
1558 (X, \mathbf{c})$; \\
1559 \IF $Y' \neq \bot$ \THEN $H_I(R', Y')$;
1560 \next
1561 Oracle $H_I(R', Y')$: \+ \\
1562 \IF $(R', Y') \in \dom F$ \THEN \\ \quad
1563 $h \gets F(x)$; \\
1564 \ELSE \\ \ind
1565 $\id{check}(R', Y')$; \\
1566 $h \getsr \Bin^{\ell_I}$;
1567 $F \gets F \cup \{ (R', Y') \mapsto h \}$; \- \\
1568 \RETURN $h$;
1569 \- \\[\medskipamount]
1570 Oracle $\id{check}(R', Y')$: \+ \\
1571 \IF $R' \in \dom \Xid{R}{map}$ \THEN
1572 $V(Y'/\Xid{R}{map}(R'))$;
1573 \end{program}
1574
1575 \caption{Soundness of $\Wident$: reduction from MCDH}
1576 \label{fig:wident-sound-cdh}
1577 \end{figure}
32609df3 1578\end{longproof}
a6e375a6
MW
1579
1580\subsubsection{Zero-knowledge}
1581Finally we must prove that $\Wident$ is (statistical) zero-knowledge -- i.e.,
1582that, except with very small probability, Bob learns nothing of use to him
1583except that he's interacting with Alice. To do this, we show that, for any
1584algorithm $B$ which Bob might use to produce his challenge to the real Alice,
1585there exists a simulator $S$ which produces transcripts distributed very
1586similarly to transcripts of real conversations between $B$ and Alice, the
1587difference being that $S$ doesn't know Alice's key. We shall show that the
1588statistical difference between the two distributions is $2^{-\ell_I}$.
1589
1590The intuition here is that Bob ought to know what answer Alice is going to
1591give him when he constructs his challenge. This is certainly true if he's
1592honest: his challenge is $R = r P$ for some $r$ he knows, so he won't learn
1593anything useful when Alice responds with $x R = r X$. However, if Bob sends
1594a challenge $R$ when he doesn't know the corresponding $r$, he learns
1595something potentially useful. The accompanying check value $c = r \xor
1596H_I(R, r X)$ keeps him honest.
1597
1598To show this, we present an \emph{extractor} which, given any challenge $(R,
1599c)$ Bob can construct, and his history of random-oracle queries, either
1600returns a pair $(r, Y)$ such that $R = r P$ and $Y = r X$, or $\bot$;
1601moreover, the probability that Alice returns a response $Y' \ne \bot$ given
1602the challenge $(R, c)$ is $2^{-\ell}$. We can, of course, readily convert
1603this extractor into a simulator to prove the zero-knowledge property of our
1604protocol.
1605
1606We shall actually consider a slightly more complex setting. We grant Bob
1607access to an oracle which produces random, correctly-formed challenges. We
1608require this to model the legitimate challenges of other parties when we
32609df3 1609analyse the security of our key exchange protocol.
a6e375a6
MW
1610
1611\begin{definition}[Discrete-log extractors]
1612 Let $T$, $B$ be randomized algorithms. Define the game
1613 $\Game{dl-ext}{G}(T, B)$ as shown in figure~\ref{fig:dlext}. The
1614 \emph{success probability of $T$ as a discrete-log extractor against $B$}
1615 is defined as
1616 \[ \Succ{dl-ext}{G}(T, B) = \Pr[\Game{dl-ext}{G}(T, B) = 1]. \]
1617\end{definition}
a6e375a6
MW
1618
1619\begin{figure}
1620 \begin{program}
1621 $\Game{dl-ext}{G}(T, B):$ \+ \\
1622 $H_I \getsr \Func{G^2}{\Bin^{\ell_I}}$;
1623 $Q_H \gets \emptyset$; $Q_C \gets \emptyset$; \\
1624 $(x, X) \gets \id{setup}()$; \\
1625 $(R, c) \gets B^{\Xid{H_I}{trap}(\cdot, \cdot), C()}(x, X)$; \\
1626 $(r, Y) \gets T(R, c, Q_H)$; \\
1627 $Y' \gets x R$; $h' \gets H_I(R, Y')$; $r' \gets c \xor h'$; \\
1628 \IF $r \ne \bot$ \THEN \\ \quad
1629 \IF $Y = \bot \lor R \ne r P \lor Y \ne Y'$ \THEN \RETURN $0$; \\
32609df3 1630 \IF $R = r' P$ \THEN $(r^*, Y^*) \gets (r', Y')$; \\
a6e375a6
MW
1631 \ELSE $(r^*, Y^*) \gets (\bot, \bot)$; \\
1632 \IF $(R, c) \in Q_C$ \THEN \RETURN $1$; \\
1633 \IF $(r, Y) = (r', Y')$ \THEN \RETURN $1$; \\
1634 \RETURN $0$;
1635 \next
1636 Oracle $\Xid{H_I}{trap}(R', Y')$: \+ \\
1637 $h \gets H_I(R', Y')$; \\
1638 $Q_H \gets Q_H \cup \{(R', Y', h)\}$; \\
1639 \RETURN $h$; \- \\[\medskipamount]
1640 Oracle $C()$: \+ \\
32609df3 1641 $r \getsr \gf{q}$; \\
a6e375a6
MW
1642 $R \gets r P$; $c \gets r \xor H_I(R, r X)$; \\
1643 $Q_C \gets Q_C \cup \{(R, c)\}$; \\
32609df3 1644 \RETURN $(R, c)$
a6e375a6
MW
1645 \end{program}
1646
1647 \caption{Discrete log extraction game: $\Game{dl-ext}{G}(T, B)$}
1648 \label{fig:dlext}
1649\end{figure}
1650
32609df3
MW
1651Let's unpack this definition slightly. We make the following demands of our
1652extractor.
1653\begin{itemize}
1654\item It is given a bare `transcript' of $B$'s execution. In particular, it
1655 is given only its output and a list of $B$'s random-oracle queries in no
1656 particular order.
1657\item While the extractor is not given the private key~$x$, the adversary~$B$
1658 is given the private key.
1659\item We require that, if the extractor produces values $r, Y \ne \bot$ then
1660 $r$ and $Y$ are \emph{correct}; i.e., that $R = r P$ and $Y = x R$.
1661\item The extractor is explicitly \emph{not} given the outputs of the
1662 challenge-generation oracle $C()$, nor of the random-oracle queries issued
1663 by $C()$. However, we allow the extractor to fail (i.e., return $\bot$) if
1664 $B$ simply parrots one of its $C$-outputs.
1665\item The extractor is allowed -- indeed \emph{required} -- to fail if the
1666 challenge $(R, c)$ is \emph{invalid} (i.e., Alice would return $\bot$ given
1667 the challenge).
1668\end{itemize}
1669The resulting definition bears a striking similarity to the concept of
081da1b1 1670\emph{plaintext awareness} in \cite{bellare-1998:pub-enc-notions}.
32609df3 1671
a6e375a6
MW
1672Such an extractor indeed exists, as the following lemma states.
1673\begin{lemma}[Effectiveness of extractor $T_\Wident$]
1674 \label{lem:dlext}
32609df3
MW
1675 There exists a \emph{universal discrete-log extractor} $T_\Wident$, shown
1676 in figure~\ref{fig:twident}, such that, for any algorithm $B$,
a6e375a6
MW
1677 \[ \Succ{dl-ext}{G}(T_\Wident, B) \ge 1 - \frac{1}{2^{\ell_I}}. \]
1678 Moreover, if $B$ issues at most $q_H$ random-oracle queries, then the
1679 running time of $T_\Wident$ is $O(q_H)$.
1680\end{lemma}
32609df3
MW
1681\ifshort
1682The proof of this lemma is given in the full version of this paper.
1683\else
a6e375a6
MW
1684We prove this result at the end of the section. For now, let us see how to
1685prove that $\Wident$ is zero-knowledge.
32609df3
MW
1686\fi
1687
1688\begin{figure}
1689 \begin{program}
1690 Extractor $T_\Wident(R, c, Q_H)$: \+ \\
1691 \FOR $(R', Y', h)$ \IN $Q_H$ \DO \\ \ind
1692 $r \gets h \xor c$; \\
1693 \IF $R = R' = r P \land Y' = r X$ \THEN \RETURN $(r, Y')$; \- \\
1694 \RETURN $(\bot, \bot)$;
1695 \end{program}
1696
1697 \caption{The discrete-log extractor $T_\Wident$}
1698 \label{fig:twident}
1699\end{figure}
a6e375a6
MW
1700
1701We use the set-up described in section~\ref{sec:sim}. Our initialization
32609df3 1702function~$I_\Wident$ just chooses a random $x \in \gf{q}$ as the world
a6e375a6
MW
1703private input and sets $X = x P$ as the common input. In the `real world',
1704the adversary is allowed to submit a (single) challenge to the prover; it is
1705given the prover's response, and must then compute its output. This is shown
1706on the left hand side of figure~\ref{fig:wident-sim}.
1707
1708The zero-knowledge property of the scheme is described by the following
1709theorem.
1710\begin{theorem}[Statistical zero-knowledge of $\Wident$]
1711 \label{thm:wident-zk}
1712 Let $I_\Wident$, $W_\Wident$ and $S_\Wident$ be the real-prover world and
1713 simulator shown in figure~\ref{fig:wident-sim}. Then, for any~$t$,
1714 $q_I$ and $q_C$,
1715 \[ \InSec{sim}(W_\Wident, I_\Wident, S_\Wident; t, q_I, q_C, 0) \le
1716 \frac{q_C}{2^\ell_I}.
1717 \]
1718 where $q_C$ is the maximum number of challenges allowed by the adversary.
1719\end{theorem}
32609df3 1720\begin{longproof}{}
a6e375a6
MW
1721 The simulator simply uses the extractor~$T_\Wident$ to extract the answer
1722 from the adversary's history of random oracle queries. Observe that
1723 $S_\Wident$ is a fake-world simulator. By lemma~\ref{lem:dlext}, the
1724 extractor fails with probability only $2^{-\ell_I}$. The theorem follows
1725 by a simple union bound and proposition~\ref{prop:fakesim}.
32609df3 1726\end{longproof}
a6e375a6 1727
32609df3 1728%\ifshort\else
a6e375a6
MW
1729\begin{figure}
1730 \begin{program}
1731 Initialization function $I_\Wident()$: \+ \\
32609df3 1732 $x \getsr \gf{q}$; \\
a6e375a6
MW
1733 $X \gets x P$; \\
1734 \RETURN $(x, X)$;
1735 \- \\[\medskipamount]
1736 Real-prover world $W_\Wident^{H_I(\cdot, \cdot)}
1737 ((x, X), \sigma, \tau, \mu)$: \+ \\
1738 \IF $\tau = \cookie{challenge}$ \THEN \\ \ind
1739 $(R, c) \gets \mu$; \\
1740 $Y \gets \id{response}^{H_I(\cdot, \cdot)}(R, c, x)$; \\
1741 \RETURN $(1, Y)$; \- \\
1742 \ELSE \\ \ind
1743 \RETURN $(\sigma, \bot)$;
1744 \next
1745 Simulator $S_\Wident$'s fake world \\
1746 \hspace{1in} $W_{\text{sim}}^{H_I(\cdot, \cdot)}
1747 ((X, u), \sigma, \tau, \mu)$: \+ \\
1748 \IF $\tau = \cookie{init}$ \THEN \\ \ind
1749 \RETURN $(\emptyset, \emptystring)$; \- \\
1750 $Q_H \gets \sigma$; \\
1751 \IF $\tau = \cookie{challenge}$ \THEN \\ \ind
1752 $(R, c) \gets \mu$; \\
1753 $(r, Y) \gets T_\Wident(R, c, Q_H)$; \\
1754 \RETURN $(Q_H, Y)$; \- \\
1755 \ELSE \IF $\tau = \cookie{random}$ \THEN \\ \ind
1756 $(i, (R', Y'), h) \gets \mu$; \\
1757 $Q_H \gets Q_H \cup \{(R', Y', h)\}$; \\
1758 \RETURN $(Q_H, \emptystring)$; \- \\
1759 \ELSE \\ \ind
1760 \RETURN $(\sigma, \bot)$;
1761 \end{program}
1762
1763 \caption{Real-prover and simulator for zero-knowledge of $\Wident$}
1764 \label{fig:wident-sim}
1765\end{figure}
32609df3 1766%\fi
a6e375a6 1767
32609df3
MW
1768\ifshort\else
1769We now return to proving that the extractor $T_\Wident$ functions as claimed.
dff0fad2 1770The following two trivial lemmata will be useful, both now and later.
a6e375a6
MW
1771\begin{lemma}[Uniqueness of discrete-logs]
1772 \label{lem:unique-dl}
1773 Let $G = \langle P \rangle$ be a cyclic group. For any $X \in G$ there is
32609df3 1774 a unique $x \in \gf{q}$ where $X = x P$.
a6e375a6
MW
1775\end{lemma}
1776\begin{proof}
1777 Certainly such an $x$ exists, since $G$ is cyclic and finite. Suppose $X =
1778 x P = x' P$: then $0 = x P - x' P = (x - x') P$. Hence $(x - x')$ is a
32609df3 1779 multiple of $q$, i.e., $x = x'$.
a6e375a6
MW
1780\end{proof}
1781\begin{lemma}[Uniqueness of check values]
1782 \label{lem:unique-c}
32609df3
MW
1783 Let $G = \langle P \rangle$ be a cyclic group of prime order $q$; let $H_I$
1784 be a function $H_I\colon \Bin^{2\ell_G} \to \Bin^{\ell_I}$. Fix some $x
1785 \in \gf{q}$ and define the set
a6e375a6
MW
1786 \[ V_x = \bigl\{\, (R, c) \in G \times \Bin^{\ell_I} \bigm|
1787 R = \bigl( c \xor H_I(R, x R) \bigr) P \,\bigr\}.
1788 \]
1789 Then, for any $R$, $c$, $c'$, if $(R, c) \in V_x$ and $(R, c') \in V_x$
32609df3 1790 then $c = c'$.
a6e375a6
MW
1791\end{lemma}
1792\begin{proof}
32609df3
MW
1793 From lemma~\ref{lem:unique-dl}, we see that there is a unique $r \in \gf{q}$
1794 for which $R = r P$. Now, if $(R, c) \in V_x$, we must have $r = c \xor
1795 H_I(R, x R)$. It follows that $c = r \xor H_I(R, x R)$.
a6e375a6
MW
1796\end{proof}
1797
1798\begin{proof}[Proof of lemma~\ref{lem:dlext}]
a6e375a6
MW
1799 Let $B$ be any randomized algorithm, and let $(R, c, Q_H)$ be as given to
1800 the extractor by $\Game{dl-ext}{G}(T_\Wident, B)$. Let the quantities
1801 $H_I$, $Q_C$, $r$, $r'$, $x$ and $X$ be as in that game.
1802
1803 Suppose that the extractor returns values $(r, Y) \ne (\bot, \bot)$. Let
1804 $h = r \xor c$; then there must be a query $(R, Y, h) \in Q_H$, and we have
1805 $R = r P$ and $Y = r X = r (x P) = x (r P) = x R = Y'$, so the extractor's
1806 output must be correct unless it fails.
1807
1808 Furthermore, in the case where the extractor did not fail, we have $h =
1809 H_I(R, Y) = H_I(R, Y')$ and $c = r \xor h$, so the challenge was valid.
1810 Therefore, if the challenge was invalid, the extractor will fail.
1811
1812 We now deal with the challenge-generation oracle. Suppose that $(R, c')
1813 \in Q_C$ for some $c'$. Now, if $c = c'$ then $(R, c')$ is a repeat of
1814 some challenge from the challenge-generation oracle, and the extractor is
1815 permitted to fail. On the other hand, suppose $c \ne c'$; then, the
1816 challenge $(R, c)$ must be invalid by lemma~\ref{lem:unique-c}, so the
1817 extractor is required to fail, and we have established that indeed it will.
1818 From now on, suppose that $R$ is distinct from all the $R$-values returned
1819 by $C()$.
1820
1821 Let $Y = x R$. Suppose that $B$ queried its random oracle at $(R, Y)$.
1822 Let $h = H_I(Y)$, so $r' = c \xor h$. If the challenge is valid then $R =
1823 r' P$; therefore $Y = x R = x r' P = r' X$, so we have $(R, Y, h) \in Q_H$
1824 with $R = r P$ and $Y = r X$. Hence the extractor returns $r = r'$ as
1825 required.
1826
1827 It remains to deal with the case where there is no random-oracle query at
1828 $(R, Y)$. But then $h = H_I(R, Y)$ is uniformly distributed, and
1829 independent of the entire game up to this point. Let $r$ be the correct
1830 discrete log of $R$; by lemma~\ref{lem:unique-dl} there is only one
1831 possible value. The extractor always fails under these circumstances, but
1832 a correct responder would reply with probability
1833 \[ \Pr[h = c \xor r] = \frac{1}{2^{\ell_I}}. \]
1834 This concludes the proof.
1835\end{proof}
1836\begin{remark}
1837 Note that the fact that the algorithm~$B$ was given the private key is
1838 irrelevant to the above argument. However, we shall need this property
1839 when we come to prove deniability for the key-exchange protocol.
1840\end{remark}
1841\begin{remark}
1842 It's easy to see from the above proof that the extractor works flawlessly
1843 on the `honest verifier' algorithm $\id{challenge}$ shown in
1844 figure~\ref{fig:wident-ro}. This shows that $\Wident$ is perfect
1845 zero-knowledge against honest verifiers. We're much more interested in
1846 dishonest verifiers, though.
1847\end{remark}
32609df3 1848\fi
a6e375a6
MW
1849
1850
32609df3 1851\ifshort\else
a6e375a6
MW
1852\subsection{An identity-based identification scheme}
1853\label{sec:wident-id}
1854
081da1b1
MW
1855Boneh and Franklin \cite{boneh-franklin-2003:ibe-weil-pairing} showed how to
1856construct an identity-based encryption scheme using bilinear pairings. The
1857resulting encryption scheme looks somewhat like a pairing-based version of
1858ElGamal's encryption scheme \cite{elgamal-1985:dlog-enc-sign}. We can
1859easily apply their techniques to our identification protocol, and thereby
1860obtain an identity-based identification scheme. Providing the necessary
1861formalisms to prove theorems analogous to our theorems~\ref{thm:wident-sound}
a6e375a6
MW
1862and~\ref{thm:wident-zk} would take us too far from our objectives; but given
1863appropriate security notions, we can readily adapt our existing proofs to the
1864new setting.
1865
1866\subsubsection{Bilinear pairings}
1867Before we describe the necessary modifications to the protocol, we first give
1868a (very brief!) summary of cryptographic pairings. (The Boneh-Franklin paper
081da1b1
MW
1869\cite{boneh-franklin-2003:ibe-weil-pairing} gives more detail; also
1870\cite{menezes-2005:intro-pairing-crypto} provides a useful introduction to
1871the topic.)
a6e375a6
MW
1872
1873Let $(G, +)$, $(G', +)$ and $(G_T, \times)$ be cyclic groups with prime order
1874$q$; let $P \in G$ and $P' \in G'$ be elements of order $q$ in $G$ and $G'$
1875respectively. We say that a mapping $\hat{e}\colon G \times G' \to G_T$ is a
1876\emph{non-degenerate bilinear pairing} if it satisfies the following
1877properties.
1878\begin{description}
1879\item[Bilinearity] For all $R \in G$ and $S', T' \in G'$, we have $\hat{e}(R,
dff0fad2
MW
1880 S' + T') = \hat{e}(R, S')\,\hat{e}(R, T')$; and for all $R, S \in G$ and $T'
1881 \in G'$ we have $\hat{e}(R + S, T') = \hat{e}(R, T')\,\hat{e}(S, T')$.
a6e375a6
MW
1882\item[Non-degeneracy] $\hat{e}(P, P') \ne 1$.
1883\end{description}
1884For practical use, we also want $\hat{e}(\cdot, \cdot)$ to be efficient to
1885compute. The reader can verify that $\hat{e}(a P, b P') = \hat{e}(P,
1886P')^{ab}$. It is permitted for the two groups $G$ and $G'$ to be equal.
1887
1888We require a different intractability assumption, specifically that the
1889\emph{bilinear} Diffie-Hellman problem (BDH) -- given $(a P, b P, a P', c P')
1890\in G^2 \times G'^2$, find $\hat{e}(P, P')^{abc} \in G_T$ -- is difficult.
1891This implies the difficulty of the computational Diffie-Hellman problem in
1892all three of $G$, $G'$ and~$G_T$.
1893
1894\subsubsection{The identity-based scheme}
081da1b1
MW
1895We need a trusted authority; following \cite{schneier-1996:applied-crypto} we
1896shall call him Trent. Trent's private key is $t \in \gf{q}$; his public key
1897is $T = t P$.
a6e375a6
MW
1898
1899Finally, we need cryptographic hash functions $H_I\colon G \times G_T \to
1900\Bin^{\ell_I}$ and $\Hid\colon \Bin^* \to G'$; a formal security analysis
1901would model these as random oracles.
1902
1903Alice's public key is $A = \Hid(\texttt{Alice}) \in G'$. Her private key is
1904$K_A = t A \in G'$ -- she needs Trent to give this to her. Bob can interact
1905with Alice in order to verify her identity as follows.
1906\begin{enumerate}
1907\item Bob computes $\gamma_A = \hat{e}(T, A) \in G_T$. (He can do this once
1908 and store the result if he wants, but it's not that onerous to work it out
1909 each time.)
32609df3 1910\item Bob chooses $r \inr \gf{q}$, and sets $R = r P$. He also computes
a6e375a6
MW
1911 $\psi = \gamma_A^r$, $h = H_I(R, \psi)$ and $c = r \xor h$. He sends his
1912 challenge $(R, c)$ to Alice.
1913\item Alice receives $(R', c')$. She computes $\psi' = \hat{e}(R, K_A)$, $h'
1914 = H_I(R', \psi')$, and $r' = c' \xor h')$. She checks that $R' = r' P$; if
1915 so, she sends $\psi'$ back to Bob; otherwise she refuses to talk to him.
1916\item Bob receives $\psi'$. If $\psi = \psi'$ then he accepts that he's
1917 talking to Alice.
1918\end{enumerate}
1919This works because $\psi = \gamma_A^r = \hat{e}(T, A)^r = \hat{e}(t P, A)^r =
1920\hat{e}(r P, A)^t = \hat{e}(R, t A) = \psi'$.
1921
1922\subsubsection{Informal analysis}
1923An analogue to lemma~\ref{lem:dlext} can be proven to show how to extract $r$
1924from a verifier's random-oracle queries; statistical zero knowledge would
1925then follow easily, as in theorem~\ref{thm:wident-zk}. Soundness is
1926intuitively clear, since an adversary must compute $\psi = \hat{e}(P,
1927P')^{art}$ given $A = a P'$, $R = r P$ and $T = t P$, which is an instance of
1928the BDH problem. An analogue of theorem~\ref{thm:wident-sound} would have to
1929prove this for an adversary capable of making identity requests as well as
1930obtaining challenges. Finally, our key-exchange protocol can be constructed
1931out of this identity-based identification scheme, yielding an identity-based
1932authenticated key-exchange protocol. We leave it to the reader to work
1933through the details.
32609df3 1934\fi
a6e375a6
MW
1935
1936
32609df3 1937\ifshort\else
a6e375a6
MW
1938\subsection{Comparison with the protocol of Stinson and Wu}
1939\label{sec:stinson-ident}
1940
1941Our protocol is similar to a recent proposal by Stinson and Wu
081da1b1
MW
1942\cite{stinson-wu-2006:two-flow-zero-knowledge}. They restrict their
1943attention to Schnorr groups $G \subset \F_p^*$. Let $\gamma$ be an element
1944of order $q = \#G$. The prover's private key is $a \inr \gf{q}$ and her
1945public key is $\alpha = \gamma^a$. In their protocol, the challenger chooses
4331bf84
MW
1946$r \inr \gf{q}$, computes $\rho = \gamma^r$ and $\psi = \alpha^r$, and sends
1947a challenge $(\rho, H(\psi))$. The prover checks that $\rho^q \ne 1$,
1948computes $\psi = \rho^a$, checks the hash, and sends $\psi$ back by way of
1949response. They prove their protocol's security in the random-oracle model.
a6e375a6
MW
1950
1951Both the Wrestlers protocol and Stinson-Wu require both prover and verifier
1952to compute two exponentiations (or scalar multiplications) each. The
32609df3 1953sizes of the messages used by the two protocols are also identical.
a6e375a6
MW
1954
1955(An earlier version of the Stinson-Wu protocol used a cofactor
1956exponentiation: if we set $f = (p - 1)/q$, then we use $\psi = \alpha^{rf}) =
1957\rho^{af} = \gamma^{afr}$. This is more efficient in typical elliptic curve
1958subgroups, since the cofactor of such subgroups is usually small: indeed,
081da1b1
MW
1959\cite{certicom-2000:sec1} recommends rejecting groups with cofactor $f > 4$.
1960However, in the Schnorr groups used by Stinson and Wu, the cofactor is much
1961larger than $q$, and their new variant is more efficient.)
a6e375a6
MW
1962
1963We note that the zero-knowledge property of the Stinson-Wu protocol requires
1964the Diffie-Hellman knowledge of exponent assumption (KEA). Very briefly:
1965suppose $A$ is a randomized algorithm which takes as input $X \in G$ and
1966outputs a pair $(r P, r X)$; intuitively, the KEA asserts $A$ must have done
1967this by choosing $r$ somehow and then computing its output from it.
1968Formally, it asserts the existence of an `extractor' algorithm which takes as
1969input the element $X$ and the random coins used by $A$ and outputs $r$ with
1970high probability. This is a very strong assumption, and one which is
1971unnecessary for our protocol, since we can present an \emph{explicit}
1972extractor.
1973
081da1b1
MW
1974The KEA assumption as stated in
1975\cite{stinson-wu-2006:two-flow-zero-knowledge} allows the extractor to fail
1976with some negligible probability, over and above the probability that a
1977dishonest verifier managed to guess the correct $h = H(\psi)$ without making
1978this random-oracle query. Not only does our protocol achieve zero- knowledge
1979without the KEA, our extractor is, in this sense, `perfect'.
a6e375a6
MW
1980
1981Our protocol is just as strong as Stinson-Wu under attack from active
1982intruders: see table~\ref{tab:wident-active} for a very brief sketch of the
1983case-analysis which would be the basis of a proof of this.
1984
1985\begin{table}
1986 \begin{tabular}[C]{|*{3}{c|}p{8cm}|}
1987 \hlx{hv[1]}
1988 \multicolumn{2}{|c|}{\textbf{Challenge}} &
1989 \textbf{Response} &
1990 \textbf{Security}
1991 \\ \hlx{v[1]hv}
1992 %% unpleasant hacking to make the R and c columns the same width :-(
1993 \settowidth{\dimen0}{\textbf{Challenge}}%
32609df3 1994 \dimen0=.5\dimen0
a6e375a6
MW
1995 \advance\dimen0by-\tabcolsep
1996 \advance\dimen0by-.5\arrayrulewidth
1997 \hbox to\dimen0{\hfil$R$\hfil}
1998 & $c$ & $Y$ & Nothing to prove. \\ \hlx{v}
1999 $R$ & $c'$ & --- & Prover rejects by lemma~\ref{lem:unique-c};
2000 $Y'$ probably wrong by
2001 theorem~\ref{thm:wident-sound}. \\ \hlx{v}
2002 $R$ & $c$ & $Y'$ & Response is incorrect. \\ \hlx{v}
2003 $R'$ & --- & $Y$ & Response is incorrect. \\ \hlx{v}
2004 $R'$ & $c$ & $Y'$ & Prover rejects with probability $1 - 2^{-\ell_I}$;
2005 $Y'$ probably wrong by
2006 theorem~\ref{thm:wident-sound}. \\ \hlx{v}
2007 $R'$ & $c'$ & $Y'$ & Simulate prover using extractor
2008 (lemma~\ref{lem:dlext}); $Y'$ probably wrong by
2009 theorem~\ref{thm:wident-sound}. \\ \hlx{vh}
2010 \end{tabular}
32609df3 2011
a6e375a6
MW
2012 \caption{Security of $\Wident$ against active intruders (summary)}
2013 \label{tab:wident-active}
2014\end{table}
2015
2016An identity-based analogue of Stinson-Wu can be defined using a bilinear
2017pairing, just as we did in section~\ref{sec:wident-id}. However, to prove
2018the zero-knowledge property, one needs to make a bilinear analogue of the
2019knowledge of exponent assumption.
2020
2021We suspect that a key-exchange protocol like ours can be constructed using
2022Stinson-Wu rather than the Wrestlers identification scheme. We haven't,
2023however, gone through all the details, since we believe our protocol is just
2024as efficient and is based on much more conservative assumptions.
32609df3 2025\fi
a6e375a6
MW
2026
2027%%%--------------------------------------------------------------------------
2028
2029\section{A simple key-exchange protocol}
2030\label{sec:kx}
2031
2032In this section, we describe a simple key-exchange protocol built out of the
2033identification protocol shown previously.
2034
2035The key-exchange protocol arises from the following observation. If Bob
2036sends a challenge, presumably to Alice, and gets a correct response, then not
2037only did he really send the challenge to Alice but he knows that she received
2038it correctly.
2039
2040So, if Alice and Bob authenticate each other, by the end of it, they should
2041each have chosen a random private value, sent the corresponding public value
2042to the other, and been convinced that it arrived safely.
2043
2044Unfortunately, life isn't quite this kind, and we have to do some more work
2045to make this scheme secure.
2046
2047
2048Our key exchange protocol essentially consists of two parallel instances of
2049the identification protocol. If Alice receives a correct response to her
2050challenge, she will know that Bob received her challenge correctly, and
2051\emph{vice versa}. If we let Alice's challenge be $R_0 = r_0 P$ and Bob's
2052challenge be $R_1 = r_1 P$ then each can compute a shared secret $Z = r_0 R_1
2053= r_0 r_1 P = r_1 R_0$ unknown to an adversary. There are, unfortunately, a
2054few subtleties involved in turning this intuition into a secure key-exchange
2055protocol, which we now describe.
2056
2057
2058\subsection{Overview}
2059\label{sec:kx-overview}
2060
2061We present a quick, informal description of our basic key-exchange protocol.
2062In addition to our group $G$, we shall also need a secure symmetric
2063encryption scheme $\E = (\kappa, E, D)$, and two secure hash functions
2064$H_I\colon \Bin^{2\ell_G} \to \Bin^{\ell_I}$ and $H_K\colon \Bin^{\ell_G+1}
2065\to \Bin^\kappa$.
2066
2067Suppose that Alice's and Bob's private keys are $a$ and $b$ respectively, and
2068their public keys are $A = a P$ and $B = b P$.
2069\begin{enumerate}
32609df3
MW
2070\item Alice chooses a random index $r \inr \gf{q}$. She computes $R = r P$ and
2071 $c = r \xor H_I(R, r B)$. She sends the pair $(R, c)$ to Bob.
2072\item Similarly, Bob chooses a random $s \inr \gf{q}$. He computes $S = s P$
2073 and $d = s \xor H_I(S, s A)$. He sends $(S, d)$ to Alice.
a6e375a6
MW
2074\item Alice receives $(S', d')$ from Bob. She computes $s' = d' \xor H_I(S',
2075 a S')$, and verifies that $S' = s' P$. If so, she computes $K_A = H_K(0
2076 \cat r S')$, and sends $R, E_{K_A}(a S')$ to Bob.
2077\item Similarly, Bob receives $(R', c')$ from Alice. He verifies that $R' =
2078 \bigl( c' \xor H_I(R', b R') \bigr) P$. If so, he computes $K_B = H_K(0
2079 \cat s R')$ and sends S, $E_{K_B}(b R')$ to Alice.
2080\item Alice receives a ciphertext $(S'', \chi_B)$ from Bob. She checks that
2081 $S'' = S'$, decrypts $\chi_B$, and checks that $D_{K_A}(\chi_B) = r B$. If
2082 so, she uses $H_K(1 \cat r S')$ as her shared secret.
2083\item Similarly, Bob receives $(R'', \chi_A)$ from Alice, and checks $R'' =
2084 R'$ and $D_{K_B}(\chi_A) = s A$. If so, he uses $H_K(1 \cat s R')$ as his
2085 shared secret.
2086\end{enumerate}
2087This is the Wrestlers Key Exchange protocol, $\Wkx^{G, \E}$ (again, we omit
2088the superscripts when referring to the general protocol, or when confusion is
2089unlikely). A diagrammatic summary of the protocol is shown in
2090figure~\ref{fig:wkx}.
2091
2092\begin{figure}
2093 \begin{description}
32609df3 2094 \item[Setup] Group $G = \langle P \rangle$; $\#G = q$ is prime.
a6e375a6
MW
2095 $H_I(\cdot, \cdot)$ and $H_K(\cdot)$ are secure hashes. $\E = (\kappa,
2096 E, D)$ is an IND-CCA2 symmetric encryption scheme.
2097 \item[Parties] $U_i$ for $0 \le i < n$.
32609df3 2098 \item[Private keys] $x_i \inr \gf{q}$.
a6e375a6
MW
2099 \item[Public keys] $X_i = x_i P$.
2100 \end{description}
32609df3
MW
2101 \begin{protocol}
2102 $r_i \getsr I$; $R_i \gets r_i P$; &
2103 $r_j \getsr I$; $R_j \gets r_j P$; \\
2104 $c_i \gets r_i \xor H_I(R_i, r_i X_j)$; &
2105 $c_j \gets r_j \xor H_I(R_j, r_j X_i)$; \\
2106 \send{->}{(R_i, c_i)}
2107 \send{<-}{(R_j, c_j)}
2108 Check $R_j = \bigl(c_j \xor H_I(x_i R_j)\bigr) P$; &
2109 Check $R_i = \bigl(c_i \xor H_I(x_j R_i)\bigr) P$; \\
2110 $Z \gets r_i R_j$; $(K_0, K_1) \gets H_K(Z)$; &
2111 $Z \gets r_j R_i$; $(K_0, K_1) \gets H_K(Z)$; \\
2112 $\chi_i \gets E_{K_0}(x_i R_j)$; &
2113 $\chi_j \gets E_{K_0}(x_j R_i)$; \\
2114 \send{->}{(R_i, \chi_i)}
2115 \send{<-}{(R_j, \chi_j)}
2116 Check $D_{K_0}(\chi_j) = r_i X_j$; &
2117 Check $D_{K_0}(\chi_i) = r_j X_i$; \\
2118 Shared key is $K_1$. & Shared key is $K_1$.
2119 \end{protocol}
a6e375a6
MW
2120
2121 \caption{Summary of the Wrestlers Key Exchange protocol, $\Wkx$}
2122 \label{fig:wkx}
2123\end{figure}
2124
2125Assume, for the moment, that Alice and Bob's messages are relayed honestly.
2126Then:
2127\begin{itemize}
2128\item $a S' = a S = a (s P) = s (a P) = s A$, so $s' = d' \xor H_I(S' a S') =
2129 d \xor H_I(S, s A) = s$, and $S' = S = s P = s' P$, and therefore Alice
2130 responds to Bob's message;
2131\item similarly $b R' = r B$, so $r' = r$ and $R' = r' P$, and therefore Bob
2132 responds to Alice's message;
2133\item $b R' = b R = b (r P) = r (b P) = r B$, and $a S' = a S = a (s P) = s
2134 (a P) = s A$, and therefore both parties compute their responses correctly;
32609df3 2135 and
a6e375a6
MW
2136\item $r S' = r S = r (s P) = s (r P) = s R = s R'$, so $K_A = K_B$, and
2137 therefore they can decrypt each others' responses, and agree the same
2138 shared secret.
2139\end{itemize}
2140This shows that the protocol is basically valid, but says little about its
2141security. The remainder of this section will describe our protocol in more
2142formal detail, and prove its security in a model with multiple parties and an
2143adversary who controls the network.
2144
2145Observe that the protocol as we've presented here is \emph{symmetrical}.
2146There's no notion of `initiator' or `responder'. There are a total of four
2147messages which must be sent before both parties accept. However, this can be
2148reduced to three by breaking the symmetry of the protocol and combining one
2149or other party's challenge and response messages. We choose to analyse the
2150symmetrical version, since to do so, it suffices to consider only the two
2151different kinds of messages. Since our security model allows the messages to
2152be adversarially delayed and reordered, it is easy to show that the security
2153of an optimized, asymmetrical protocol is no worse than the symmetrical
2154version we present here.
2155
2156
2157\subsection{Security model and security definition}
2158\label{sec:um}
2159
2160Our model is very similar to that of Canetti and Krawczyk
081da1b1
MW
2161\cite{canetti-krawczyk-2001:secure-channels}, though we have modified it in
2162two ways.
a6e375a6
MW
2163\begin{enumerate}
2164\item We allow all the participants (including the adversary) in the protocol
2165 access to the various random oracles required to implement it.
2166\item Since we want to analyse a specific, practical scheme, asymptotic
2167 results are useless. We measure the adversary's resource usage carefully,
2168 and produce a quantitative bound on the adversary's advantage in the
2169 SK-security game.
2170\end{enumerate}
2171
32609df3
MW
2172\ifshort
2173
2174Readers interested in the details of the model should see Canetti and
081da1b1
MW
2175Krawczyk's paper \cite{canetti-krawczyk-2001:secure-channels}, or the full
2176version of this paper.
32609df3
MW
2177
2178\else
2179
a6e375a6
MW
2180\subsubsection{Overview}
2181We briefly describe our modified model, pointing out the changes we have
2182made, and how they apply to our protocol. Much of Canetti and Krawczyk's
2183model (for example, the local and global outputs) is useful for proving more
2184general security properties such as demonstrating that SK-security suffices
2185for constructing secure channels, and we shall not concern ourselves with
2186such details. Other parts deal with issues such as security parameters and
2187ensuring that all the computation is polynomially bounded, which are
2188irrelevant since we are dealing with a single concrete protocol rather than a
2189family of them.
2190
2191The entities in the model are the \emph{adversary}~$A$, and a (fixed) number
2192of \emph{parties}~$P_i$, for $0 \le i < n$. If the protocol under
2193consideration makes use of random oracles, then all the participants -- the
2194adversary and the parties -- are all allowed access to the random oracles.
2195
2196The parties and the adversary play a `game'. At the beginning of the game,
2197the participants are given some inputs computed by a randomized
2198\emph{initialization procedure}~$\id{init}$. This produces as output a pair
2199$(i_U, \mathbf{i})$; the value $i_U$ is the \emph{global input}, and is given
2200to all the participants including the adversary. The vector $\mathbf{i}$ has
2201$n$ components, and party $P_i$ is given $(i_U, \mathbf{i}[i])$ as input.
2202
2203\subsubsection{Sessions}
2204Parties don't act directly. Instead, each party runs a number of
32609df3
MW
2205\emph{sessions}. A session is represented by a triple $S = (P_i, P_j, s)$,
2206where $i, j \in \Nupto{n}$ identify the owning party and a \emph{partner},
2207and $s \in \Bin^{\ell_S}$ is a \emph{session-id}. (The original model
2208includes a r\^ole, for distinguishing between initiators and responders. Our
2209protocol is symmetrical, so this distinction isn't useful.) If $P_i$ runs a
2210session $S = (P_i, P_j, s)$ and $P_j$ runs a session $S' = (P_j, P_i, s)$
2211then we say that $S$ and $S'$ are \emph{matching}, and that $P_j$ is $P_i$'s
a6e375a6
MW
2212\emph{partner} for the session.
2213
2214At most one participant in the game is \emph{active} at any given time.
2215Initially the adversary is active. The adversary may \emph{activate} a
2216session in one of two ways.
2217\begin{enumerate}
2218\item It may \emph{create a session} of a party~$P_i$, by selecting a
2219 session-id~$s \in \Bin^{\ell_S}$ and a partner $j$. There is no
2220 requirement that $P_j$ ever have a matching session. However, all sessions
2221 of a party must be distinct, i.e., sessions with the same partner must have
2222 different session-ids.
2223\item It may \emph{deliver a message}~$\mu \in \Bin^*$, from party~$P_j$, to
2224 an existing session~$S = (P_i, P_j, s)$. There is no requirement that any
2225 party previously sent $\mu$: the adversary is free to make up messages as
2226 it sees fit.
2227\end{enumerate}
2228The adversary becomes inactive, and the session becomes active. The session
2229performs some computation, according to its protocol, and may request a
2230message~$\mu$ be delivered to the matching session running in its partner
2231(which may not exist). The session may also \emph{terminate}. In the case
2232we are interested in, of key-exchange protocols, a session~$S = (P_i, P_j,
2233s)$ may terminate in one of two ways:
2234\begin{enumerate}
2235\item it may \emph{complete}, outputting $(i, j, s, K)$, for some
2236 \emph{session key}~$K$, or
2237\item it may \emph{abort}, outputting $(i, j, s, \bot)$.
2238\end{enumerate}
2239Once it has performed these actions, the session deactivates and the
2240adversary becomes active again. The adversary is given the message~$\mu$, if
2241any, and informed of whether the session completed or aborted, but, in the
2242case of completion, not of the value of the key~$K$. A session is
2243\emph{running} if it has been created and has not yet terminated.
2244
2245\subsubsection{Other adversarial actions}
2246As well as activating sessions, the adversary has other capabilities, as
2247follows.
2248\begin{itemize}
2249\item It may \emph{expire} any session~$S$, causing the owning party to
2250 `forget' the session key output by that session.
2251\item It may \emph{corrupt} any party~$P_i$, at will: the adversary learns
2252 the entire state of the corrupted party, including its initial
2253 input~$\mathbf{i}[i]$, the state of any sessions it was running at the
2254 time, and the session keys of any completed but unexpired sessions. Once
2255 corrupted, a party can no longer be activated. Of course, the adversary
2256 can continue to send messages allegedly from the corrupted party.
2257\item It may \emph{reveal the state} of a running session~$S$, learning any
2258 interesting values specific to that session, but \emph{not} the owning
2259 party's long-term secrets.
2260\item It may \emph{reveal the session-key} of a completed session~$S$.
2261\item It may elect to be \emph{challenged} with a completed session~$S$,
2262 provided. Challenge sessions form part of the security notion for
2263 key-exchange protocols. See below for more details.
2264\end{itemize}
2265We say that a session $S = (P_i, P_j, s)$ is \emph{locally exposed} if
2266\begin{itemize}
2267\item it has had its state revealed,
2268\item it has had its session-key revealed, or
2269\item $P_i$ has been corrupted, and $S$ had not been expired when this
2270 happened.
2271\end{itemize}
2272A session is \emph{exposed} if it is locally exposed, or if its matching
2273session exists and has been locally exposed.
2274
2275At the beginning of the game, a bit $b^*$ is chosen at random. The adversary
2276may choose to be \emph{challenged} with any completed, unexposed
2277session;\footnote{%
2278 The original Canetti-Krawczyk definition restricts the adversary to a
2279 single challenge session, but our proof works independent of the number of
2280 challenge sessions, so we get a stronger result by relaxing the requirement
2281 here.)}
2282the adversary is then given either the session's key -- if $b^* = 1$ -- or a
2283string chosen at random and independently of the game so far from a
2284protocol-specific distribution -- if $b^* = 0$. At the end of the game, the
2285adversary outputs a single bit~$b$.
2286
2287\subsubsection{SK-security}
2288We've now described the game; it is time to explain the adversary's goal in
2289it. The adversary \emph{wins} the game if either
2290\begin{enumerate}
2291\item two unexposed, matching sessions complete, but output different
2292 keys,\footnote{%
2293 The original Canetti-Krawczyk definition differs slightly here. It
2294 requires that `if two \emph{uncorrupted} parties complete matching
2295 sessions then they both output the same key' [original emphasis]. This
2296 can't be taken at face value, since none of the protocols they claim to
2297 be secure actually meet this requirement: they meet only the weaker
2298 requirement that parties completing matching sessions output different
2299 keys with negligible probability. We assume here that this is what they
2300 meant.}
2301 or
2302\item the adversary correctly guesses the hidden bit~$b^*$.
2303\end{enumerate}
2304More formally, we make the following definition.
32609df3 2305\fi
a6e375a6
MW
2306\begin{definition}[SK-security]
2307 \label{def:sk}
2308 Let $\Pi^{H_0(\cdot), H_1(\cdot), \ldots}$ be a key-exchange protocol
2309 which makes use of random oracles $H_0(\cdot)$, $H_1(\cdot)$, \dots, and
32609df3 2310 let $A$ be an adversary playing the game described \ifshort in
081da1b1 2311 \cite{canetti-krawczyk-2001:secure-channels}\else previously\fi, where $n$
a6e375a6
MW
2312 parties run the protocol~$\Pi$. Let $V$ be the event that any pair of
2313 matching, unexposed sessions completed, but output different session keys.
2314 Let $W$ be the event that the adversary's output bit matches the game's
2315 hidden bit~$b^*$. We define the adversary's \emph{advantage against the
2316 SK-security of the protocol~$\Pi$} to be
2317 \[ \Adv{sk}{\Pi}(A, n) = \max(\Pr[V], 2\Pr[W] - 1). \]
2318 Furthermore, we define the \emph{SK insecurity function of the
2319 protocol~$\Pi$} to be
2320 \[ \InSec{sk}(\Pi; t, n, q_S, q_M, q_{H_0}, q_{H_1}, \dots) =
2321 \max_A \Adv{sk}{\Pi}(A, n)
2322 \]
2323 where the maximum is taken over all adversaries~$A$ with total running
2324 time~$t$ (not including time taken by the parties), create at most $q_S$
2325 sessions, deliver at most $q_M$~messages, and (if applicable) make at most
2326 $q_{H_i}$ random-oracle queries to each random oracle $H_i(\cdot)$.
2327\end{definition}
2328
2329
2330\subsection{Security}
2331
2332In order to analyse our protocol $\Wkx^{G, \E}$ properly, we must describe
32609df3 2333exactly how it fits into our formal model.
a6e375a6
MW
2334
2335\subsubsection{Sessions and session-ids}
2336Our formal model introduced the concept of sessions, which the informal
2337description of section~\ref{sec:kx-overview} neglected to do. (One could
2338argue that we described a single session only.) As we shall show in
2339section~\ref{sec:kx-insecure}, our protocol is \emph{insecure} unless we
2340carefully modify it to distinguish between multiple sessions.
2341
2342In the model, distinct key-exchange sessions are given distinct partners and
2343session-ids. In order to prevent sessions interfering with each other, we
2344shall make explicit use of the session-ids.
2345
2346Suppose the session-ids are $\ell_S$-bit strings. We expand the domain of
2347the random oracle $H_I$ so that it's now
2348\[ H_I\colon G \times \Bin^{\ell_S} \times G \times G \to \Bin_{\ell_I}. \]
2349
2350\subsubsection{Messages}
2351We split the messages our protocols into two parts: a \emph{type}~$\tau$ and
2352a \emph{body}~$\mu$. We assume some convenient, unambiguous encoding of
2353pairs $(\tau, \mu)$ as bit-strings. For readability, we present message
2354types as text strings, e.g., `\cookie{challenge}', though in practice one
2355could use numerical codes instead.
2356
2357The message body itself may be a tuple of values, which, again, we assume are
2358encoded as bit-strings in some convenient and unambiguous fashion. We shall
2359abuse the notation for the sake of readability by dropping a layer of nesting
2360in this case: for example, we write $(\cookie{hello}, x, y, z)$ rather than
2361$\bigl(\cookie{hello}, (x, y, z)\bigr)$.
2362
2363\subsubsection{The protocol}
2364Our protocol is represented by three functions, shown in
2365figure~\ref{fig:wkx-formal}.
2366\begin{itemize}
2367\item $\id{init}(n)$ is the initialization function, as described in
2368 section~\ref{sec:um}. It outputs a pair $(\mathbf{p}, \mathbf{i})$, where
2369 $\mathbf{i}[i]$ is the private key of party~$P_i$ and $\mathbf{p}[i]$ is
2370 the corresponding public key. Only $P_i$ is given $\mathbf{i}[i]$, whereas
2371 all parties and the adversary are given $\mathbf{p}$.
2372\item $\id{new-session}^{H_I(\cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2373 (\mathbf{p}, x, i, j, s)$ is the new-session function. This is executed by
2374 party~$P_i$ when the adversary decides to create a new session~$S = (P_i,
2375 P_j, s)$. It is also given the relevant outputs of $\id{init}$, and
2376 allowed access to the random oracles $H_I$ and $H_K$.
2377\item $\id{message}^{H_I(\cdot, \cdot, \cdot, \cdot), H_K(\cdot)} (\tau,
2378 \mu)$ is the incoming-message function. This is executed by a session when
2379 the adversary decides to deliver a message $(\tau, \mu)$ to it. It makes
2380 use of the subsidiary functions $\id{msg-challenge}$ and
2381 $\id{msg-response}$ to handle the messages.
2382\end{itemize}
2383We observe that the protocol never aborts. We could make it do so if it
2384receives an invalid message, but we prefer to ignore invalid messages for the
2385sake of robustness.\footnote{%
2386 Note that this protocol would therefore require modification before it was
081da1b1
MW
2387 acceptable in the simulation-based model of
2388 \cite{shoup-1999:formal-model-key-exchange}. There it is required that a
2389 key-exchange protocol terminate after a polynomially-bounded number of
2390 messages are delivered to it.}
a6e375a6
MW
2391
2392\begin{figure}
2393 \begin{program}
2394 Function $\id{init}(n)$: \+ \\
2395 \FOR $i \in \Nupto{n}$ \DO \\ \ind
32609df3 2396 $x \getsr \gf{q}$; \\
a6e375a6
MW
2397 $\mathbf{i}[i] \gets x$; \\
2398 $\mathbf{p}[i] \gets x P$; \- \\
2399 \RETURN $(\mathbf{p}, \mathbf{i})$;
2400 \- \\[\medskipamount]
2401 Function $\id{new-session}^{H_I(\cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2402 (\mathbf{p}, x, i, j, s)$: \+ \\
2403 $X \gets \mathbf{p}[i]$;
2404 $X' \gets \mathbf{p}[j]$;
2405 $C \gets \emptyset$; \\
32609df3 2406 $r \getsr \gf{q}$;
a6e375a6
MW
2407 $R \gets r P$;
2408 $Y \gets r X'$; \\
2409 $h \gets H_I(X, s, R, Y)$;
2410 $c \gets r \xor h$; \\
2411 \SEND $(\cookie{challenge}, R, c)$;
2412 \- \\[\medskipamount]
2413 Function $\id{message}^{H_I(\cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2414 (\tau, \mu)$: \+ \\
2415 \IF $\tau = \cookie{challenge}$ \THEN $\id{msg-challenge}(\mu)$; \\
2416 \ELSE \IF $\tau = \cookie{response}$ \THEN $\id{msg-response}(\mu)$;
2417 \next
2418 Function $\id{msg-challenge}(\mu)$: \+ \\
2419 $(R', c') \gets \mu$; \\
2420 $Y' \gets x R'$; \\
2421 $h' \gets H_I(X', s, R', Y')$; \\
2422 $r' \gets c' \xor h'$; \\
2423 \IF $R' \ne r' P$ \THEN \RETURN; \\
2424 $C \gets C \cup \{R\}$; \\
2425 $Z \gets r R'$; \\
2426 $(K_0, K_1) \gets H_K(Z)$; \\
2427 $\chi \gets E_{K_0}(Y')$; \\
2428 \SEND $(\cookie{response}, R, \chi)$;
2429 \- \\[\medskipamount]
2430 Function $\id{msg-response}(\mu)$: \+ \\
2431 $(R', \chi') \gets \mu$; \\
2432 \IF $R' \notin C$ \THEN \RETURN; \\
2433 $Z \gets r R'$; \\
2434 $(K_0, K_1) \gets H_K(Z)$; \\
2435 $Y' \gets D_{K_0}(\chi')$; \\
2436 \IF $Y' \ne Y$ \THEN \RETURN; \\
2437 \OUTPUT $K_1$;
2438 \STOP;
2439 \end{program}
32609df3 2440
a6e375a6
MW
2441 \caption{Formalization of $\Wkx$}
2442 \label{fig:wkx-formal}
2443\end{figure}
2444
2445\subsubsection{Session states}
2446We must specify what the adversary obtains when it chooses to reveal a
2447session's state. Given the program in figure~\ref{fig:wkx-formal}, we can
2448see that the session state consists of the variables $(x, X, X', r, R, Y,
2449C)$.
2450
2451However, $x$ is the owning party's long-term secret, and it seems
2452unreasonable to disclose this to an adversary who stops short of total
2453corruption.
2454
2455The public keys $X$ and $X'$ are part of the adversary's input anyway, so
2456revealing them doesn't help. Similarly, the set $C$ of valid challenges
2457could have been computed easily by the adversary, since a group element $R'
2458\in C$ if and only if the session $S$ responded to some message
2459$(\cookie{challenge}, R', c')$.
2460
2461The value $R = r P$ is easily computed given $r$, and besides is sent in the
2462clear in the session's first message. The expected response $Y = r X'$ is
2463also easily computed from $r$. The converse is also true, since $r$ can be
2464recovered from $R$ and $c$ in the session's challenge message and the value
2465$Y$. Besides, $r$ is necessary for computing $Z$ in response to incoming
2466challenges.
2467
2468We conclude that the only `interesting' session state is $r$.
2469
2470\subsubsection{Security}
2471Having formally presented the protocol, we can now state our main theorem
32609df3
MW
2472about its security. The proof is given in \ifshort the full version of the
2473paper\else appendix~\ref{sec:sk-proof}\fi.
a6e375a6
MW
2474\begin{theorem}[SK-security of $\Wkx$]
2475 \label{thm:sk}
2476 Let $G$ be a cyclic group. Let $\E = (\kappa, E, D)$ be a symmetric
2477 encryption scheme. Then
2478 \begin{spliteqn*}
2479 \InSec{sk}(\Wkx^{G, \E}; t, n, q_S, q_M, q_I, q_K) \le
dff0fad2
MW
2480 2 q_S \bigl( \InSec{ind-cca}(\E; t', q_M, q_M) + {} \\
2481 \InSec{mcdh}(G; t', q_K) +
2482 n \,\InSec{mcdh}(G; t', q_M + q_I) \bigr) +
32609df3 2483 \frac{n (n - 1)}{q} +
dff0fad2 2484 \frac{2 q_M}{2^{\ell_I}}.
a6e375a6
MW
2485 \end{spliteqn*}
2486 where $t' = t + O(n) + O(q_S) + O(q_M q_I) + O(q_K)$.
2487\end{theorem}
2488
2489
32609df3 2490\ifshort\else
a6e375a6
MW
2491\subsection{Insecure protocol variants}
2492\label{sec:kx-insecure}
2493
2494It's important to feed the session-id and verifier's public key to the random
2495oracle $H_I$ when constructing the check-value~$c$. Without these, the
2496protocol is vulnerable to attack. In this section, we consider some variants
2497of our protocol which hash less information, and show attacks against them.
2498
2499To simplify the presentation, we shall consider Alice and Bob, and a third
2500character Carol. We shall be attacking a pair of matching sessions $A$
2501and~$B$, run by Alice and Bob respectively. Let Alice and Bob's private keys
2502be $x_A$ and~$x_B$, and let their public keys be $X_A = x_A P$ and $X_B = x_B
2503P$ respectively.
2504
2505\subsubsection{Protocol diagram notation}
2506In order to keep the presentation as clear as possible, we use a simple
2507diagrammatic notation for describing protocol runs. A line of the form
2508\protocolrun{\textit{action} \ar[r] & \PRsession{S} & \textit{result}}
2509states that the adversary performs the given \textit{action} on session~$S$,
2510with the stated \textit{result}. The \textit{action} may be
2511\begin{itemize}
2512\item \textsf{Create session $(P_i, P_j, s)$}: the session is created,
2513 running in party~$P_i$, with partner~$P_j$ and session-id~$s$.
2514\item \textsf{Receive $\mu$}: the session is activated with an incoming message~$\mu$.
2515\item \textsf{Session-state reveal}: The adversary requests the session's
2516 internal state.
2517\end{itemize}
2518The \textit{result} may be
2519\begin{itemize}
2520\item \textsf{Send $\mu'$}: the session requests the delivery of the
2521 message~$\mu'$.
2522\item \textsf{Complete: $K$}: the session completes, outputting the key~$K$.
2523\item \textsf{State: $\sigma$}: the session's state is revealed to
2524 be~$\sigma$.
2525\item \textsf{(Ignore)}: the result of the action is unimportant.
2526\end{itemize}
2527
2528\subsubsection{Omitting the session-id}
2529Consider a protocol variant where session $S$ sets $h_S = H_I(X_N, R_S,
2530Y_S)$, where $N$ is the session's partner. That is, we've omitted the
2531session-id from the hash. An adversary can cross over two sessions between
2532Alice and Bob. Here's how.
2533
2534The attack assumes that Alice and Bob set up two pairs of matching sessions
2535with each other, thus.
2536\protocolrun{
2537 \PRcreate{Alice}{Bob}{s} & \PRsession{A} &
2538 \PRsend{challenge}{R_A, c_A} \\
2539 \PRcreate{Bob}{Alice}{s} & \PRsession{B} &
2540 \PRsend{challenge}{R_B, c_B} \\
2541 \PRcreate{Alice}{Bob}{s'} & \PRsession{A'} &
2542 \PRsend{challenge}{R_{A'}, c_{A'}} \\
2543 \PRcreate{Bob}{Alice}{s'} & \PRsession{B'} &
2544 \PRsend{challenge}{R_{B'}, c_{B'}}
2545}
2546Observe that the session pairs use distinct session-ids $s \ne s'$, so this
2547is allowed. Now the adversary crosses over the challenges, using the second
2548pair of sessions to provide responses to the challenges issued by the first
2549pair. By revealing the state in the second pair of sessions, the adversary
2550can work out the (probably different) session keys accepted by the first
2551pair.
2552\protocolrun{
2553 \PRreceive{challenge}{R_B, c_B} & \PRsession{A'} &
2554 \PRsend{response}{R_{A'}, E_{K_{A'B,0}}(x_A R_B)} \\
2555 \PRreceive{challenge}{R_A, c_A} & \PRsession{B'} &
2556 \PRsend{response}{R_{B'}, E_{K_{B'A,0}}(x_B R_A)} \\
2557 \PRreceive{challenge}{R_{A'}, c_{A'}} & \PRsession{A} & \PRignore \\
2558 \PRreceive{challenge}{R_{B'}, c_{B'}} & \PRsession{B} & \PRignore \\
2559 \PRreveal & \PRsession{A'} & r_{A'} \\
2560 \PRreveal & \PRsession{B'} & r_{B'} \\
2561 \PRreceive{response}{R_{B'}, E_{K_{B'A,0}}(x_B R_A)} & \PRsession{A} &
2562 \PRcomplete{K_{AB',1}} \\
2563 \PRreceive{response}{R_{A'}, E_{K_{A'B,0}}(x_A R_B)} & \PRsession{B} &
2564 \PRcomplete{K_{BA',1}} \\
2565}
2566The adversary can now compute $K_{AB'} = H_K(r_{B'} R_A)$ and $K_{B'A} =
2567H_K(r_{A'} R_B)$. Safely in possession of both keys, the adversary can now
2568read and impersonate freely.
2569
2570\subsubsection{Omitting the partner's public key}
2571Now consider a protocol variant where session $S$ sets $h_S = H_I(s, R_S,
2572Y_S)$, where $s$ is the session-id. An adversary can use a sessions with
2573Carol to attack a session between Alice and Bob. Here's how the sessions are
2574set up.
2575\protocolrun{
2576 \PRcreate{Alice}{Bob}{s} & \PRsession{A} &
2577 \PRsend{challenge}{R_A, c_A} \\
2578 \PRcreate{Bob}{Alice}{s} & \PRsession{B} &
2579 \PRsend{challenge}{R_B, c_B} \\
2580 \PRcreate{Alice}{Carol}{s} & \PRsession{A'} &
2581 \PRsend{challenge}{R_{A'}, c_{A'}} \\
2582 \PRcreate{Bob}{Carol}{s} & \PRsession{B'} &
2583 \PRsend{challenge}{R_{B'}, c_{B'}}
2584}
2585Although each of Alice and Bob have two sessions with session-id~$s$, this is
2586allowed, since they are with different partners. The rest of the attack in
2587fact proceeds identically to the previous case.
32609df3 2588\fi
a6e375a6
MW
2589
2590\subsection{Deniability}
2591\label{sec:denial}
2592
2593We have claimed that the Wrestlers key-exchange protocol is \emph{deniable}.
2594In this section, we define what we mean, explain the limits of the
2595deniablility of the protocol as currently presented, fix the protocol with an
2596additional pass (which can be collapsed to a single message flow by breaking
2597the protocol's symmetry), and prove the deniability of the resulting
2598protocol.
2599
2600Our notion of deniability is taken from Di~Raimondo, Gennaro and Krawczyk
081da1b1
MW
2601\cite{raimondo-2006:deniable-authn-key-exchange}, except that, as usual, we
2602opt for a concrete security approach.
a6e375a6
MW
2603
2604\subsubsection{Discussion}
2605Our definition for deniability is that, for any adversary interacting with
2606participants in the protocol, a simulator exists which can compute the same
2607things as the adversary. In particular, since an adversary could output a
2608transcript of the interactions between itself and the parties, it would
2609follow that a simulator could do this too. If the simulator is effective,
2610its output is indistinguishable from that of the real adversary, and hence no
2611`judge' (distinguisher) should be persuaded by evidence presented by someone
2612who claims to have witnessed or participated in an interaction.
2613
2614We work again the model described in~\ref{sec:um}. That is, our adversary
2615has complete control over the ordering and delivery of all messages. The
2616adversary is also able, at any time, to reveal the state of any session.
2617However, deniability is obviously impossible against an adversary who can
2618\emph{corrupt} other parties, since simulating such an adversary's actions
2619would necessarily require the simulator to compute private keys corresponding
2620to the known public keys, and this is (we believe) difficult, because an
2621efficient algorithm for doing this could easily attack our protocol, which we
2622already proved secure. Therefore, we forbid the adversary from corrupting
2623parties.
2624
2625In order to allow the adversary to participate in the protocol, rather than
2626merely observing it, we need to give it one or more private keys. We could
2627modify the initialization function \id{init} from figure~\ref{fig:wkx-formal}
2628to give the adversary a private key, but there's an easier way: we can just
2629give the adversary a number of private keys in its auxiliary input.
2630
2631\subsubsection{Definitions}
2632Let $\Pi$ be a key-exchange protocol, in the model described in
2633section~\ref{sec:um}. We use the simulation framework of
2634section~\ref{sec:sim}. We define the initialization function $I_\Pi$ to be
2635the initialization function of $\Pi$, as before, and the corresponding
2636world~$W_\Pi(\iota, \sigma, \tau, \mu)$ is a fairly straightforward mapping
2637of the adversary's possible actions to the simulation model:
2638\begin{itemize}
2639\item The invocation $\cookie{new-session}$ with $\mu = (i, j, s)$ creates a
2640 new session on party~$P_i$, with partner~$P_j$ and session-id~$s$. The
2641 reply $\rho = (\delta, m)$ is a \emph{decision} $\delta \in
2642 \{\cookie{continue}, \cookie{abort}, \cookie{complete}\}$ and an output
2643 message $m \in \Bin^* \cup \{\bot\}$. If $m \ne \bot$ then $m$ is a
2644 message to be sent to the matching session (if any).
2645\item The invocation $\cookie{deliver}$ with $\mu = (i, j, s, m)$ delivers
2646 message $m$ to the session $S = (P_i, P_j, s)$. The response $\rho$ is as
2647 for $\cookie{new-session}$ invocations.
2648\item The invocation $\cookie{reveal-session-state}$ with $\mu = (i, j, s)$
2649 reveals to the adversary the state of the running session $S = (P_i, P_j,
2650 s)$. The response $\rho$ is the session's state if $S$ is indeed a running
2651 session, or $\bot$ otherwise.
2652\item The invocation $\cookie{reveal-session-key}$ with $\mu = (i, j, s)$
2653 reveals to the adversary the session-key of the completed session~$S =
2654 (P_i, P_j, s)$. The response $\rho$ is the session key~$K$ if the session
2655 is indeed complete, or $\bot$ otherwise.
2656\end{itemize}
2657There are no invocations corresponding to the adversary actions of corrupting
2658parties (since deniability against an corrupting adversary is impossible, as
2659discussed earlier), or session expiry or challenging (since they are useless
2660in this context).
2661
2662We measure the deniability of a protocol~$\Pi$, using a given simulator~$S$,
2663by the insecurity function $\InSec{sim}(W_\Pi, I_\Pi, S; t_D, t_A,
2664\mathcal{Q}_D, \mathcal{Q}_A, \mathcal{R}, \mathcal{U})$ of
2665definition~\ref{def:sim}. The interaction bounds $\mathcal{R} = (q_S, q_M)$
2666we place on the adversary are on the number ($q_S$) of \cookie{new-session}
2667and ($q_M$) \cookie{deliver} invocations it makes.
2668
2669We shall (informally) say that a protocol~$\Pi$ is deniable if there is a
2670simulator~$S_\Pi$ for which the insecurity function is small for appropriate
2671resource bounds on the adversary and distinguisher.
2672
2673\subsubsection{The current protocol}
2674As it stands, $\Wkx$ isn't deniable, according to our definition, for
2675arbitrary auxiliary inputs. Let's see why.
2676
2677Suppose that Bob is an informant for the secret police, and wants to convince
2678a judge that Alice is involved in subversive activities in cyberspace.
2679Unfortunately, Bob's job is difficult, because of the strong zero-knowledge
2680nature of the Wrestlers identification protocol. However, Bob can work with
2681the judge to bring him the evidence necessary to convict Alice. Here's how.
2682
2683Alice's public key is $A$, and Bob's public key is $B$. The judge chooses
32609df3 2684some session-id $s$, and $r \inr \gf{q}$. He computes $R = r P$ and $c =
a6e375a6
MW
2685r \xor H_I(B, s, R, r A)$, and gives Bob the triple $(s, R, c)$, keeping $r$
2686secret. Bob can now persuade Alice to enter into a key-exchange with him,
2687with session-id $s$. He uses $(R, c)$ as his challenge message. When Alice
2688sends back her response $(R', \chi)$ (because Bob's challenge is correctly
2689formed), Bob aborts and shows $(R', \chi)$ to the judge. The judge knows $r$
2690and can therefore decrypt $\chi$ and check the response. If it's wrong, then
2691Bob was cheating and gets demoted -- he can't get the right answer by himself
2692because that would require him to impersonate Alice. If it's right, Alice is
2693really a subversive element and `disappears' in the night.
2694
2695We shall show in theorem~\ref{thm:denial} below that this is basically the
2696only attack against the deniability of the protocol. However, we can do
2697better.
2698
2699\subsubsection{Fixing deniability}
2700We can fix the protocol to remove even the attack discussed above. The
2701change is simple: we feed \emph{both} parties' challenges to the hash
2702function~$H_I$ rather than just the sender's. We use a five-argument hash
2703function (random oracle) $H_I\colon G^2 \times \Bin^{\ell_S} \times G^2 \to
2704\Bin^{\ell_I}$. We introduce a new message pass at the beginning of the
2705protocol: each session simply sends its challenge point $R = r P$ in the
2706clear as a `pre-challenge'. The actual challenge is $R$ and $c = r \xor
2707H_I(X, R', s, R, c)$, where $R'$ is the challenge of the matching session.
2708
2709By breaking symmetry, we can reduce the communication complexity of this
2710variant to four messages. As before, we analyse the symmetrical version.
2711The extra flow might seem a high price to pay, but we shall see that it has
2712additional benefits beyond deniability.
2713
2714A summary of the new protocol is shown in figure~\ref{fig:wdkx}, and the
2715formal description is shown in figure~\ref{fig:wdkx-formal}.
2716
2717\begin{figure}
2718 \begin{description}
32609df3 2719 \item[Setup] Group $G = \langle P \rangle$; $\#G = q$ is prime.
a6e375a6
MW
2720 $H_I(\cdot, \cdot, \cdot, \cdot, \cdot)$ and $H_K(cdot)$ are secure
2721 hashes. $\E = (\kappa, E, D)$ is an IND-CCA2 symmetric encryption
2722 scheme.
2723 \item[Parties] $U_i$ for $0 \le i < n$.
32609df3 2724 \item[Private keys] $x_i \inr \gf{q}$.
a6e375a6
MW
2725 \item[Public keys] $X_i = x_i P$.
2726 \end{description}
32609df3
MW
2727
2728 \begin{protocol}
2729 $r_i \getsr I$; $R_i \gets r_i P$; &
2730 $r_j \getsr I$; $R_j \gets r_j P$; \\
2731 \send{->}{R_i}
2732 \send{<-}{R_j}
2733 $c_i \gets r_i \xor H_I(R_j, X_i, s, R_i, r_i X_j)$; &
2734 $c_j \gets r_j \xor H_I(R_i, X_j, s, R_j, r_j X_i)$; \\
2735 \send{->}{(R_i, c_i)}
2736 \send{<-}{(R_j, c_j)}
2737 Check $R_j = \bigl(c_j \xor H_I(x_i R_j)\bigr) P$; &
2738 Check $R_i = \bigl(c_i \xor H_I(x_j R_i)\bigr) P$; \\
2739 $Z \gets r_i R_j$; $(K_0, K_1) \gets H_K(Z)$; &
2740 $Z \gets r_j R_i$; $(K_0, K_1) \gets H_K(Z)$; \\
2741 $\chi_i \gets E_{K_0}(x_i R_j)$; &
2742 $\chi_j \gets E_{K_0}(x_j R_i)$; \\
2743 \send{->}{(R_i, \chi_i)}
2744 \send{<-}{(R_j, \chi_j)}
2745 Check $D_{K_0}(\chi_j) = r_i X_j$; &
2746 Check $D_{K_0}(\chi_i) = r_j X_i$; \\
2747 Shared key is $K_1$. & Shared key is $K_1$.
2748 \end{protocol}
a6e375a6
MW
2749
2750 \caption{Summary of the Deniable Wrestlers Key Exchange protocol, $\Wdkx$}
2751 \label{fig:wdkx}
2752\end{figure}
2753
2754\begin{figure}
2755 \begin{program}
2756 Function $\id{init}(n)$: \+ \\
2757 \FOR $i \in \Nupto{n}$ \DO \\ \ind
32609df3 2758 $x \getsr \gf{q}$; \\
a6e375a6
MW
2759 $\mathbf{i}[i] \gets x$; \\
2760 $\mathbf{p}[i] \gets x P$; \- \\
2761 \RETURN $(\mathbf{p}, \mathbf{i})$;
2762 \- \\[\medskipamount]
2763 Function $\id{new-session}^{H_I(\cdot, \cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2764 (\mathbf{p}, x, i, j, s)$: \+ \\
2765 $X \gets \mathbf{p}[i]$;
2766 $X' \gets \mathbf{p}[j]$;
2767 $C \gets \emptyset$; \\
32609df3 2768 $r \getsr \gf{q}$;
a6e375a6
MW
2769 $R \gets r P$;
2770 $Y \gets r X'$; \\
2771 \SEND $(\cookie{pre-challange}, R)$;
2772 \- \\[\medskipamount]
2773 Function $\id{message}^{H_I(\cdot, \cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2774 (\tau, \mu)$: \+ \\
2775 \IF $\tau = \cookie{pre-challenge}$ \THEN
2776 $\id{msg-pre-challenge}(\mu)$; \\
2777 \ELSE \IF $\tau = \cookie{challenge}$ \THEN
2778 $\id{msg-challenge}(\mu)$; \\
2779 \ELSE \IF $\tau = \cookie{response}$ \THEN $\id{msg-response}(\mu)$;
2780 \next
2781 Function $\id{msg-pre-challenge}(\mu)$: \+ \\
2782 $R' \gets \mu$; \\
2783 $h \gets H_I(R', X, s, R, c)$; \\
2784 $c \gets r \xor h$; \\
2785 \SEND $(\id{msg-challenge}, R, c)$;
2786 \- \\[\medskipamount]
2787 Function $\id{msg-challenge}(\mu)$: \+ \\
2788 $(R', c') \gets \mu$; \\
2789 $Y' \gets x R'$; \\
2790 $h' \gets H_I(R, X', s, R', Y')$; \\
2791 $r' \gets c' \xor h'$; \\
2792 \IF $R' \ne r' P$ \THEN \RETURN; \\
2793 $C \gets C \cup \{R\}$; \\
2794 $Z \gets r R'$; \\
2795 $(K_0, K_1) \gets H_K(Z)$; \\
2796 $\chi \gets E_{K_0}(Y')$; \\
2797 \SEND $(\cookie{response}, R, \chi)$;
2798 \- \\[\medskipamount]
2799 Function $\id{msg-response}(\mu)$: \+ \\
2800 $(R', \chi') \gets \mu$; \\
2801 \IF $R' \notin C$ \THEN \RETURN; \\
2802 $Z \gets r R'$; \\
2803 $(K_0, K_1) \gets H_K(Z)$; \\
2804 $Y' \gets D_{K_0}(\chi')$; \\
2805 \IF $Y' \ne Y$ \THEN \RETURN; \\
2806 \OUTPUT $K_1$;
2807 \STOP;
2808 \end{program}
32609df3 2809
a6e375a6
MW
2810 \caption{Deniable key-exchange: formalization of $\Wdkx$}
2811 \label{fig:wdkx-formal}
2812\end{figure}
2813
2814The security of this variant is given by the following theorem, whose proof
32609df3
MW
2815is \ifshort given in the full version of this paper\else in
2816appendix~\ref{sec:sk2-proof}\fi.
a6e375a6
MW
2817\begin{theorem}[SK-security of $\Wdkx$]
2818 \label{thm:sk2}
2819 Let $G$ be a cyclic group. Let $\E = (\kappa, E, D)$ be a symmetric
2820 encryption scheme. Then
2821 \[ \InSec{sk}(\Wdkx^{G, \E}; t, n, q_S, q_M, q_I, q_K) =
2822 \InSec{sk}(\Wkx^{G, \E}; t, n, q_S, q_M, q_I, q_K)
2823 \]
2824\end{theorem}
2825
2826\subsubsection{Deniability of the Wrestlers protocols}
2827In order to quantify the level of deniability our protocols provide, we shall
2828impose a limit on the auxiliary input to the adversary. In particular, we
2829shall use $\mathcal{U}$ of definition~\ref{def:sim} to count the number of
2830\emph{challenges} in the auxiliary input. That is, $\mathcal{U} = n_C$ is
2831the number of tuples $(i, j, s, R', R, c)$ for which there is an $r$ such
2832that $R = r P$ and $c = r \xor H_I(R', X_j, s, R, r X_i)$ (or without the
2833$R'$ for $\Wkx$).
2834
2835With this restriction in place, we can state the following theorem about the
2836deniability of our protocols.
2837\begin{theorem}[Deniability of $\Wkx$ and $\Wdkx$]
2838 \label{thm:denial}
2839 There exist simulators $S_\Wkx$ and $\Wdkx$ such that
2840 \[ \InSec{sim}(W_{\Wkx^{G, \E}}, I_{\Wkx^{G, \E}}, S_{\Wkx^{G, \E}};
2841 t_D, t_A, \mathcal{Q}_D, \mathcal{Q}_A, (q_S, q_M), 0) \le
2842 \frac{q_M}{2^{\ell_I}}
2843 \]
2844 and
32609df3
MW
2845 \iffancystyle\[\else\begin{spliteqn*}\fi
2846 \InSec{sim}(W_{\Wdkx^{G, \E}}, I_{\Wdkx^{G, \E}}, S_{\Wdkx^{G, \E}};
2847 t_D, t_A, \mathcal{Q}_D, \mathcal{Q}_A, (q_S, q_M), n_C) \le
2848 \iffancystyle\else\\\fi
2849 \frac{n_C q_S}{\#G} +
a6e375a6 2850 \frac{q_M}{2^{\ell_I}}.
32609df3 2851 \iffancystyle\]\else\end{spliteqn*}\fi
a6e375a6
MW
2852 The running time of the simulators is $O(t_A) + O(\mathcal{Q}_A q_M)$.
2853\end{theorem}
32609df3
MW
2854\begin{longproof}{The proof of this theorem can be found in the full version
2855 of this paper.}
a6e375a6
MW
2856 The simulators $S_\Wkx$ and $S_\Wdkx$ are very similar. We describe both
2857 here. Both are fake-world simulators, working as follows.
2858 \begin{enumerate}
2859 \item Initially, it constructs simulated parties $P_i$, for $0 \le i < n$,
2860 giving each the public key $X_i$ from the common input.
2861 \item Suppose the adversary requests creation of a new session $S = (P_i,
2862 P_j, s)$. Then the simulator creates a new session, including a random
32609df3 2863 value $r_S \inr \gf{q}$, and computes $R_S = r_S P$, and $Y_S = r_S
a6e375a6
MW
2864 X_j$. For $\Wdkx$, it sends the message $(\cookie{pre-challenge}, R_S)$;
2865 for $\Wkx$, it additionally computes $h = H_I(X_i, s, R_S, Y_S)$ and
2866 sends $(\cookie{challenge}, R_S, r_S \xor h)$.
2867 \item Suppose, for $\Wdkx$, the adversary sends a message
2868 $(\cookie{pre-challenge}, R')$ to a session~$S = (P_i, P_j, s)$. The
2869 simulator computes $h = H_I(R', X_i, s, R_S, Y_S)$, and sends
2870 $(\cookie{challenge}, R_S, r_S \xor h)$.
2871 \item Suppose the adversary sends a message $(\cookie{challenge}, R', c')$
2872 to session $S = (P_i, P_j, s)$. The simulator doesn't know $x_i$.
2873 \begin{enumerate}
2874 \item If $R' = R_T$ for some other simulated session $T$, then the
2875 simulator knows $r_T$ such that $R_T = r_T P$. Let $Y' = r_T X_i$.
2876 The simulator computes $h = H_I(X_j, s, R', Y')$ (resp.\ $h = H_I(R_S,
2877 X_j, s, R', Y')$) for $\Wkx$ (resp.\ $\Wdkx$) and checks that $r_T = c'
2878 \xor h$. If not, the simulator discards the message. Otherwise, it
2879 computes $(K_0, K_1) = H_K(r_S R')$, and sends the message
2880 $(\cookie{response}, R, E_{K_0}(Y'))$.
2881 \item \label{en:simextract} Otherwise the simulator runs the extractor
2882 $T_\Wident$ on the adversary's history of queries $H_I(X_j, s, R',
2883 \cdot)$ (resp.\ $H_I(R_S, X_j, s, R', \cdot)$) for $\Wkx$ (resp.\
2884 $\Wdkx$). The extractor returns $(r', Y')$. If $Y' = \bot$ then the
2885 simulator ignores the message. Otherwise, the simulator computes
2886 $(K_0, K_1) = H_K(r R')$ and sends back $(\cookie{response}, R,
2887 E_{K_0}(Y'))$.
2888 \end{enumerate}
2889 \item Suppose the adversary sends a message $(\cookie{response}, R', \chi)$
2890 to session $S = (P_i, P_j, s)$. The simulator computes $(K_0, K_1) =
2891 H_K(r_S R')$, and decrypts $Y' = D_{K_0}(\chi)$. If $Y' \ne Y_S$ then
2892 the simulator discards the message. Otherwise, it makes the simulated
2893 session complete, and outputs key $K_1$.
2894 \item Finally, if the adversary reveals a session's state, the simulator
2895 reveals $r_S$ as required; if the adversary reveals a session-key, the
2896 simulator reveals the $K_1$ it output.
2897 \end{enumerate}
2898 The only point where the simulation fails to be perfect is in
2899 \ref{en:simextract}. Let $R'$ and $c'$ be the values from an incoming
2900 challenge message to session $S = (P_i, P_j, s)$. Let $r'$ be such that
2901 $R' = r' P$ and let $Y' = r' X_i$. If a random-oracle query $H_I(X_j, s,
2902 R', Y')$ (or $H_I(R_S, X_j, s, R', Y')$ for $\Wdkx$) has been issued, then
2903 there are a number of possibilities. Let $h'$ be the result of this query.
2904 \begin{itemize}
2905 \item The adversary made this query. Then the extractor will find it and
2906 return $Y'$ if $c' = h' \xor r'$, or $\bot$ otherwise.
2907 \item Some simulated session $U = (P_{i'}, P_{j'}, s')$ made this query.
2908 But simulated sessions only make $H_I$-queries when constructing
2909 challenges, so $R' = R_U$ for some session~$U$. But the simulator does
2910 something different in that case.
2911 \item In $\Wdkx$, the quadruple $(s, R_S, R', c')$ came from the
2912 adversary's auxiliary input. In this case the simulator must fail. But
2913 $R_S = r_S P$, and $r_S$ was chosen uniformly at random. If there are at
2914 most $n_C$ challenge sets in the auxiliary input then this happens with
32609df3 2915 probability at most $n_C/\#G$ for any given session.
a6e375a6
MW
2916 \end{itemize}
2917 We conclude that the simulator fails with probability
32609df3 2918 \[ \frac{q_M}{2^{\ell_I}} + \frac{q_S n_C}{\#G}. \]
a6e375a6
MW
2919 (Note that we only consider $n_C = 0$ for $\Wkx$.) No adversary can
2920 distinguish the simulator from a real interaction unless the simulator
2921 fails, and the simulator is a fake-world simulator. We therefore apply
2922 proposition~\ref{prop:fakesim}; the theorem follows.
32609df3 2923\end{longproof}
a6e375a6 2924
32609df3 2925\ifshort\else
a6e375a6
MW
2926\subsection{Practical issues}
2927\label{sec:practice}
2928
2929\subsubsection{Denial of service from spoofers}
2930The adversary we considered in~\ref{sec:um} is very powerful. Proving
2931security against such a powerful adversary is good and useful. However,
2932there are other useful security properties we might like against weaker
2933adversaries.
2934
2935Eavesdropping on the Internet is actually nontrivial. One needs control of
2936one of the intermediate routers between two communicating parties. (There
2937are tricks one can play involving subversion of DNS servers, but this is also
2938nontrivial.) However, injecting packets with bogus source addresses is very
2939easy.
2940
4331bf84 2941Layering the protocol over TCP \cite{rfc793} ameliorates this problem because
a6e375a6
MW
2942an adversary needs to guess or eavesdrop in order to obtain the correct
2943sequence numbers for a spoofed packet; but the Wrestlers protocol is
2944sufficiently simple that we'd like to be able to run it over UDP
4331bf84 2945\cite{rfc768}, for which spoofing is trivial.
a6e375a6
MW
2946
2947Therefore, it's possible for anyone on the 'net to send Alice a spurious
2948challenge message $(R, c)$. She will then compute $Y = a R$, recover $r' = c
2949\xor H_I(\ldots, R, Y)$ check that $R = r' P$ and so on. That's at least two
2950scalar multiplications to respond to a spoofed packet, and even with very
2951efficient group operations, coping with this kind of simple denial-of-service
2952attack might be difficult.
2953
2954A straightforward solution is to use the Deniable variant of the protocol,
2955and require a challenge to quote its matching session's challenge $R'$ in its
2956challenge. That is, upon receiving a $(\cookie{pre-challenge}, R')$, the
2957session sends $(\cookie{challenge}, R', R, c)$. Alice then rejects any
2958\emph{incoming} challenge message which doesn't quote her current challenge
2959value. Now only eavesdroppers can force her to perform expensive
2960computations.
2961
2962Indeed, one need not quote the entire challenge $R'$: it suffices to send
2963some short hard-to-guess hash of it, maybe just the bottom 128 bits or so.
2964
2965This can't reduce security. Consider any adversary attacking this protocol
2966variant. We can construct an adversary which attacks the original protocol
2967just as efficiently. The new adversary attaches fake $R'$ values to
2968challenges output by other parties, and strips them off on delivery,
2969discarding messages with incorrect $R'$ values.
2970
a6e375a6
MW
2971\subsubsection{Key confirmation}
2972Consider an application which uses the Wrestlers protocol to re-exchange keys
2973periodically. The application can be willing to \emph{receive} incoming
2974messages using the new key as soon as the key exchange completes
2975successfully; however, it should refrain from \emph{sending} messages under
2976the new key until it knows that its partner has also completed. The partner
2977may not have received the final response message, and therefore be unwilling
2978to accept a new key; it will therefore (presumably) reject incoming messages
2979under this new key.
2980
2981While key confirmation is unnecessary for \emph{security}, it has
2982\emph{practical} value, since it solves the above problem. If the
2983application sends a \cookie{switch} message when it `completes', it can
2984signal its partner that it is indeed ready to accept messages under the new
dff0fad2
MW
2985key. Our implementation sends $(\cookie{switch-rq}, E_{K_0}(H_S(0, R, R')))$
2986as its switch message; the exact contents aren't important. Our
2987retransmission policy (below) makes use of an additional message
2988\cookie{switch-ok}, which can be defined similarly.
2989
2990It's not hard to show that this doesn't adversely affect the security of the
2991protocol, since the encrypted message is computed only from public values.
2992In the security proof, we modify the generation of \cookie{response}
2993messages, so that the plaintexts are a constant string rather than the true
2994responses, guaranteeing that the messages give no information about the
2995actual response. To show this is unlikely to matter, we present an adversary
2996attacking the encryption scheme by encrypting either genuine responses or
2997fake constant strings. Since the adversary can't distinguish which is being
2998encrypted (by the definition of IND-CCA security,
2999definition~\ref{def:ind-cca}), the change goes unnoticed. In order to allow
3000incorporate our switch messages, we need only modify this adversary, to
3001implement the modified protocol. This is certainly possible, since the
3002messages contain (hashes of) public values. We omit the details.
3003
d8139f2c
MW
3004However, while the extra message doesn't affect the security of our protocol,
3005it would be annoying if an adversary could forge the switch request message,
3006since this would be a denial of service. In the strong adversarial model,
3007this doesn't matter, since the adversary can deny service anyway, but it's a
3008concern against less powerful adversaries. Most IND-CCA symmetric encryption
081da1b1
MW
3009schemes also provide integrity of plaintexts
3010\cite{bellare-namprempre-2000:authn-enc-notions} (e.g., the
3011encrypt-then-MAC generic composition approach
3012\cite{bellare-namprempre-2000:authn-enc-notions,%
3013 krawczyk-2001:order-enc-authn},
3014and the authenticated-encryption modes of
3015\cite{rogaway-2001:ocb,bellare-2004:eax,%
3016 mcgrew-viega-2004:gcm-security-performance}),
3017so this isn't a great imposition.
d8139f2c 3018
dff0fad2
MW
3019\subsubsection{Optimization and piggybacking}
3020We can optimize the number of messages sent by combining them. Here's one
3021possible collection of combined messages:
3022\begin{description}
3023\item [\cookie{pre-challenge}] $R$
3024\item [\cookie{challenge}] $R'$, $R$, $c = H_I(R', X, s, R, c) \xor r$
3025\item [\cookie{response}] $R'$, $R$, $c$, $E_{K_0}(x R')$
3026\item [\cookie{switch}] $R'$, $E_{K_0}(x R', H_S(0, R, R'))$
3027\item [\cookie{switch-ok}] $R'$, $E_{K_0}(H_S(1, R, R'))$
3028\end{description}
3029The combination is safe:
3030\begin{itemize}
3031\item the \cookie{switch} and \cookie{switch-ok} messages are safe by the
3032 argument above; and
3033\item the other recombinations can be performed and undone in a `black box'
3034 way, by an appropriately defined SK-security adversary.
3035\end{itemize}
a6e375a6
MW
3036
3037\subsubsection{Unreliable transports}
4331bf84 3038The Internet UDP \cite{rfc768} is a simple, unreliable protocol for
a6e375a6
MW
3039transmitting datagrams. However, it's very efficient, and rather attractive
3040as a transport for datagram-based applications such as virtual private
dff0fad2
MW
3041networks (VPNs). Since UDP is a best-effort rather than a reliable
3042transport, it can occasionally drop packets. Therefore it is necessary for a
3043UDP application to be able to retransmit messages which appear to have been
3044lost.
a6e375a6 3045
dff0fad2
MW
3046We recommend the following simple retransmission policy for running the
3047Wrestlers protocol over UDP.
3048\begin{itemize}
3049\item Initially, send out the \cookie{pre-challenge} message every minute.
3050\item On receipt of a \cookie{pre-challenge} message, send the corresponding
3051 full \cookie{challenge}, but don't retain any state.
3052\item On receipt of a (valid) \cookie{challenge}, record the challenge value
3053 $R'$ in a table, together with $K = (K_0, K_1)$ and the response $Y' = x
3054 R'$. If the table is full, overwrite an existing entry at random. Send
3055 the corresponding \cookie{response} message, and retransmit it every ten
3056 seconds or so.
3057\item On receipt of a (valid) \cookie{response}, discard any other
3058 challenges, and stop sending \cookie{pre-challenge} and \cookie{response}
3059 retransmits. At this point, the basic protocol described above would
3060 \emph{accept}, so the key $K_1$ is known to be good. Send the
3061 \cookie{switch} message, including its response to the (now known-good)
3062 sender's challenge.
3063\item On receipt of a (valid) \cookie{switch}, send back a \cookie{switch-ok}
3064 message and stop retransmissions. It is now safe to start sending messages
3065 under $K_1$.
3066\item On receipt of a (valid) \cookie{switch-ok}, stop retransmissions. It
3067 is now safe to start sending messages under $K_1$.
3068\end{itemize}
a6e375a6
MW
3069
3070\subsubsection{Key reuse}
dff0fad2
MW
3071Suppose our symmetric encryption scheme $\E$ is not only IND-CCA secure
3072(definition~\ref{def:ind-cca}) but also provides integrity of plaintexts
081da1b1
MW
3073\cite{bellare-namprempre-2000:authn-enc-notions} (or, alternatively,
3074is an AEAD scheme \cite{rogaway-2002:aead}. Then we can use it to construct
3075a secure channel, by including message type and sequence number fields in the
3076plaintexts, along with the message body. If we do this, we can actually get
3077away with just the one key $K = H_K(Z)$ rather than both $K_0$ and $K_1$.
dff0fad2
MW
3078
3079To do this, it is essential that the plaintext messages (or additional data)
3080clearly distinguish between the messages sent as part of the key-exchange
3081protocol and messages sent over the `secure channel'. Otherwise, there is a
d8139f2c
MW
3082protocol-interference attack: an adversary can replay key-exchange
3083ciphertexts to insert the corresponding plaintexts into the channel.
dff0fad2
MW
3084
3085We offer a sketch proof of this claim in appendix~\ref{sec:sc-proof}.
32609df3 3086\fi
dff0fad2
MW
3087
3088%%%--------------------------------------------------------------------------
a6e375a6 3089
dff0fad2
MW
3090\section{Conclusions}
3091\label{sec:conc}
3092
3093We have presented new protocols for identification and authenticated
3094key-exchange, and proven their security. We have shown them to be efficient
3095and simple. We have also shown that our key-exchange protocol is deniable.
3096Finally, we have shown how to modify the key-exchange protocol for practical
3097use, and proven that this modification is still secure.
a6e375a6
MW
3098
3099%%%--------------------------------------------------------------------------
3100
3101\section{Acknowledgements}
3102
3103The Wrestlers Protocol is named after the Wrestlers pub in Cambridge where
3104Clive Jones and I worked out the initial design.
3105
3106%%%--------------------------------------------------------------------------
3107
081da1b1 3108\bibliography{mdw-crypto}
a6e375a6
MW
3109
3110%%%--------------------------------------------------------------------------
3111
32609df3 3112\ifshort\def\next{\end{document}}\expandafter\next\fi
a6e375a6
MW
3113\appendix
3114\section{Proofs}
3115
3116\subsection{Proof of theorem~\ref{thm:sk}}
3117\label{sec:sk-proof}
3118
3119Before we embark on the proof proper, let us settle on some notation. Let
3120$P_i$ be a party. Then we write $x_i$ for $P_i$'s private key and $X_i = x_i
3121P$ is $P_i$'s public key. Let $S = (P_i, P_j, s)$ be a session. We write
3122$r_S$ for the random value chosen at the start of the session, and $R_S$,
3123$c_S$ etc.\ are the corresponding derived values in that session.
3124
a6e375a6
MW
3125The proof uses a sequence of games. For each game~$\G{i}$, let $V_i$ be the
3126event that some pair of unexposed, matching sessions both complete but output
3127different keys, and let $W_i$ be the event that the adversary's final output
3128equals the game's hidden bit~$b^*$. To save on repetition, let us write
3129\[ \diff{i}{j} = \max(|\Pr[V_i] - \Pr[V_j]|, |\Pr[W_i] - \Pr[W_j]|). \]
3130Obviously,
3131\[ \diff{i}{j} \le \sum_{i\le k<j} \diff{k}{k + 1}. \]
3132
3133Here's a quick overview of the games we use.
3134\begin{itemize}
3135\item $\G0$ is the original SK-security game.
3136\item In $\G1$, we abort the game unless all parties' public keys are
3137 distinct. Since keys are generated at random, parties are unlikely to be
3138 given the same key by accident.
3139\item In $\G2$, we change the way sessions respond to challenge messages, by
3140 using the extractor to fake up answers to most challenges. Since the
3141 extractor is good, the adversary is unlikely to notice.
3142\item In $\G3$, we abort the game if the adversary ever queries $H_K(\cdot)$
3143 on the Diffie-Hellman secret $r_S r_T P$ shared between two unexposed
3144 matching sessions. We show that this is unlikely to happen if the
3145 Diffie-Hellman problem is hard.
3146\item In $\G4$, we abort the game if any unexposed session \emph{accepts} a
3147 response message which wasn't sent by a matching session.
3148\end{itemize}
3149Finally, we show that the adversary has no advantage in $\G4$. The theorem
3150follows.
3151
3152For ease of comprehension, we postpone the detailed proofs of some of the
3153steps until after we conclude the main proof.
3154
3155Let $A$ be a given adversary which runs in time~$t$, creates at most~$q_S$
3156sessions, delivers at most~$q_M$ messages, and makes at most~$q_I$ queries to
3157its $H_I(\cdot, \cdot, \cdot, \cdot)$ oracle and at most~$q_K$ queries to its
3158$H_K(\cdot)$ oracle. Let $\G0$ be the original SK-security game of
3159definition~\ref{def:sk}, played with adversary~$A$.
3160
3161Game~$\G1$ is the same as game~$\G0$ except, if the initialization function
3162reports two parties as having the same public key (i.e., we have $X_i \ne
3163X_j$ where $0 \le i < j < n$), we stop the game immediately and without
3164crediting the adversary with a win. This only happens when the corresponding
3165private keys are equal, i.e., $x_i = x_j$, and since the initialization
3166function chooses private keys uniformly at random, this happens with
32609df3 3167probability at most $\binom{n}{2}/\#G$. Since if this doesn't happen, the
a6e375a6
MW
3168game is identical to $\G0$, we can apply lemma~\ref{lem:shoup}, and see that
3169\begin{equation}
3170 \label{eq:sk-g0-g1}
32609df3 3171 \diff{0}{1} \le \frac{1}{\#G} \binom{n}{2} = \frac{n (n - 1)}{2 \#G}.
a6e375a6
MW
3172\end{equation}
3173In game~$\G1$ and onwards, we can assume that public keys for distinct
dff0fad2
MW
3174parties are themselves distinct. Note that the game now takes at most
3175$O(q_I)$ times longer to process each message delivered by the adversary.
3176This is where the $O(q_I q_M)$ term comes from in the theorem statement.
a6e375a6
MW
3177
3178Game~$\G2$ is the same as game~$\G1$, except that we change the way that we
3179make parties respond to \cookie{challenge} messages $(\cookie{challenge}, R,
3180c)$. Specifically, suppose that $S = (P_i, P_j, s)$ is a session.
3181\begin{itemize}
3182\item Suppose $T = (P_j, P_i, s)$ is the matching session of $S$. The game
3183 proceeds as before if $(R, c) = (R_T, c_T)$ is the challenge issued by $T$.
3184\item Otherwise, we run the extractor $T_\Wident$ on the adversary's history
3185 so far of oracle queries $H_I(X_i, s, R, \cdot)$ to determine a pair $(r,
3186 Y)$. If $r = \bot$ then we discard the message. Otherwise, we add $R$ to
3187 the list~$C$, and return a fake response to the adversary by computing $K =
3188 H_K(r R_S)$ and handing the adversary $(\cookie{response}, R_S, E_K(Y))$.
3189\end{itemize}
3190The following lemma shows how this affects the adversary's probabilities of
3191winning.
3192\begin{lemma}
3193 \label{lem:sk-g1-g2}
3194 \begin{equation}
3195 \label{eq:sk-g1-g2}
3196 \diff{1}{2} \le \frac{q_M}{2^{\ell_I}}.
3197 \end{equation}
3198\end{lemma}
3199
3200Let us say that a session $S = (P_i, P_j, s)$ is \emph{ripe} if
3201\begin{itemize}
3202\item there is a matching session $T = (P_j, P_i, s)$, and
3203\item $S$ is unexposed.
3204\end{itemize}
3205Suppose that $S$ is a ripe session, and that it has a matching session~$T$:
3206let $Z_S = Z_T = r_S r_T P$.
3207
3208Game~$\G3$ is the same as $\G2$, except that the game is immediately aborted
3209if ever the adversary queries its random oracle $H_K(\cdot)$ at a value $Z_S$
3210for any ripe session~$S$. The following lemma shows how this affects the
3211adversary's probabilities of winning.
3212\begin{lemma}
3213 \label{lem:sk-g2-g3}
dff0fad2 3214 For some $t'$ within the bounds given in the theorem statement we have
a6e375a6
MW
3215 \begin{equation}
3216 \label{eq:sk-g2-g3}
3217 \diff{2}{3} \le q_S \InSec{mcdh}(G; t', q_K).
3218 \end{equation}
3219\end{lemma}
3220
3221Game~$\G4$ is the same as $\G3$ except that the game is immediately aborted
3222if ever the adversary sends a response message to a ripe session~$S$ which
3223wasn't output by its matching session as a response to $S$'s challenge, with
3224the result that $S$ completes.
3225
3226Let's make this more precise. Let $U$ and $V$ be a pair of matching
3227sessions. Let $C_U = (\cookie{challenge}, R_U, c_U$ be the challenge message
3228sent by $U$. Let $M_T$ be the set of messages which $T$ has sent upon
3229delivery of $C_U$. Then, in $\G4$, we abort the game if, for any pair $S$
3230and~$T$ of matching, unexposed sessions, $S$ has completed as a result of
3231being sent a message $\mu \notin M_T$. We have the following lemma.
3232\begin{lemma}
3233 \label{lem:sk-g3-g4}
dff0fad2 3234 For a $t'$ within the stated bounds, we have
a6e375a6
MW
3235 \begin{equation}
3236 \label{eq:sk-g3-g4}
3237 \diff{3}{4} \le q_S \bigl( \InSec{ind-cca}(\E; t', q_M, q_M) +
3238 n \cdot \InSec{mcdh}(G; t', q_M + q_I) \bigr)
3239 \end{equation}
3240\end{lemma}
3241
3242Finally, let us consider the state we're in with $\G4$.
3243\begin{itemize}
3244\item No ripe session completes except as a result the adversary faithfully
3245 delivering messages between it and its matching session.
3246\item The adversary never queries $Z_S$ for any ripe session~$S$. If we set
3247 $K_S = (K_{S, 0}, K_{S, 1}) = H_K(Z_S)$, then $K_{S, 1}$ is the key output
3248 by $S$ when it completes.
3249\item If $S$ and $T$ are matching ripe sessions, then $K_S = K_T$, since $Z_S
3250 = r_S R_T = r_T R_S = Z_T$.
3251\item For any ripe session~$S$, $K_{S, 1}$ is uniformly distributed in
3252 $\Bin^\kappa$ and independent of the adversary's view.
3253\item If $S = (P_i, P_j, s)$ and $T = (P_j, P_i, s)$ are matching ripe
3254 sessions, then $Z_S$ depends only $r_S$ and $r_T$. Hence, once $S$ and~$T$
3255 complete, and erase their states, $Z_S$ is independent of everything except
3256 the messages sent between the two sessions. In particular, $Z_S$ is
3257 independent of the long-term secrets $x_i$ and~$x_j$, so if either player
3258 is later corrupted, the key $K_{S, 1}$ remains independent of the
3259 adversary's view.
3260\item Hence, the keys output by unexposed sessions are indistinguishable from
3261 freshly-generated random strings, and remain so indefinitely.
3262\end{itemize}
3263We conclude that, for any adversary $A$,
3264\begin{equation}
3265 \label{eq:sk-g4}
3266 \Pr[V_4] = 0 \qquad \text{and} \qquad \Pr[W_4] = \frac{1}{2}.
3267\end{equation}
3268Putting equations~\ref{eq:sk-g0-g1}--\ref{eq:sk-g4} together, we find
32609df3 3269\begingroup \splitright=4em minus 4em
a6e375a6 3270\begin{spliteqn}
dff0fad2
MW
3271 \Adv{sk}{\Wident^{G, \E}}(A) \le
3272 2 q_S \bigl(\InSec{ind-cca}(\E; t', q_M, q_M) + {} \\
3273 \InSec{mcdh}(G; t', q_K) +
3274 n \,\InSec{mcdh}(G; t', q_M + q_I) \bigr) + {}
32609df3 3275 \frac{n (n - 1)}{\#G} +
a6e375a6 3276 \frac{2 q_M}{2^{\ell_I}}.
32609df3 3277\end{spliteqn} \endgroup
a6e375a6
MW
3278The theorem follows, since $A$ was chosen arbitrarily.
3279
3280
3281\begin{proof}[Proof of lemma~\ref{lem:sk-g1-g2}]
3282 The two games $\G1$ and~$\G2$ differ only in whether they accept or reject
3283 particular challenge messages $(\cookie{challenge}, R, c)$.
3284
3285 We claim firstly that no message is \emph{accepted} by $\G2$ which would
3286 have been rejected by $\G1$. To prove the claim, it is sufficient to note
3287 that the extractor's output, if not $\bot$, is always correct, and hence if
3288 $\G2$ accepts a message then $\G1$ would have done so too.
3289
3290 Since $\G2$ also behaves identically when the adversary submits to~$S$ the
3291 challenge from the matching session~$T$, we have nothing to prove in this
3292 case. Let $F$ be the event that the adversary submits a message
3293 $(\cookie{challenge}, R, c)$ to a session~$S$ which $S$ would have accepted
3294 in $\G1$ but would be rejected by the new rule in~$\G2$. By
3295 lemma~\ref{lem:shoup} we have $\diff{1}{2} \le \Pr[F]$. To prove the
3296 current lemma, therefore, we must show that $\Pr[F] \le q_M/2^{\ell_I}$.
3297
3298 Rather than consider individual challenge messages, we consider
3299 \emph{classes} of messages. We shall refer to a quadruple~$\Cid = (i, j,
3300 s, R)$ as a \emph{class-id}, and define some useful functions:
3301 \begin{itemize}
3302 \item the class's \emph{session} $\Csession(\Cid) = (P_i, P_j, s)$;
3303 \item the class's \emph{index} $\Cindex(\Cid)$ is $r \in I$ where $R = r
3304 P$, which is well-defined by lemma~\ref{lem:unique-dl};
3305 \item the class's \emph{query} $\Cquery(\Cid) = (X_j, s, R, x_i R)$;
3306 \item the class's \emph{hash} $\Chash(\Cid) = H_I(\Cquery(\Cid)) = H_I(X_j,
3307 s, R, x_i R)$;
3308 \item the class's \emph{check-value} $\Ccheck(\Cid) = \Chash(\Cid) \xor
3309 \Cindex(\Cid)$;
3310 \item the class's \emph{check-set} $\Ccheckset(\Cid)$ is the set of
3311 check-values $c$ such that a message $(\cookie{challenge}, R, c)$ was
3312 sent to session $S = (P_i, P_j, s)$; and
3313 \item the class's \emph{count} $\Ccount(\Cid) = |\Ccheckset(\Cid)|$.
3314 \end{itemize}
3315
3316 Consider any class-id~$\Cid = (i, j, s, R)$. A natural question which
3317 arises is: which participants have issued $\Cid$'s query, i.e., queried
3318 $H_I$ at $\Cquery(\Cid)$?
3319
3320 We can characterise the $H_I(\cdot, \cdot, \cdot, \cdot)$ queries of a
3321 session $U = (P_{i'}, P_{j'}, s')$ as follows:
3322 \begin{itemize}
3323 \item computing the check-value for the challenge $R_U$ by querying
3324 $H_I(X_{i'}, s', R_U, r_U X_{j'})$, and
3325 \item checking an incoming challenge~$R'$ by querying $H_I(X_{j'}, s', R',
3326 x_{i'} R')$.
3327 \end{itemize}
3328 The class~$\Cid$'s query $\Cquery(\Cid)$ is $U$'s check-value query if
3329 \[ (j, i, s, R) = (i', j', s', R_U) \]
3330 i.e., $U$ is the matching session of $\Csession(\Cid)$, and moreover $R =
3331 R_U$ is the challenge value issued by $U$. For any $c \in
3332 \Ccheckset(\Cid)$, if $c = \Ccheck(\Cid)$ then $(\cookie{challenge}, R, c)$
3333 is precisely the challenge message issued by~$U$ to $\Csession(\Cid)$; the
3334 rules for handling this message didn't change. However, if $c \ne
3335 \Ccheck(\Cid)$ then the message would have been rejected in $\G1$, and we
3336 have already shown that $\G2$ continues to reject all messages rejected by
3337 $\G1$.
3338
3339 Let us say that a class-id~$\Cid = (i, j, s, R)$ is \emph{bad} if
3340 \begin{enumerate}
3341 \item the value $R$ is not the challenge issued by $\Csession(\Cid)$'s
3342 matching session, and
3343 \item the adversary has not issued $\Cid$'s query $\Cquery(\Cid)$,
3344 \emph{but}
3345 \item $\Ccheck(\Cid) \in \Ccheckset(\Cid)$, so one of the check-values
3346 submitted to $S$ was actually correct.
3347 \end{enumerate}
3348 We claim that our extractor will work perfectly unless some class-id is
3349 bad. Certainly, if $R$ was issued by the matching session, there is
3350 nothing to prove; if the adversary has issued the relevant query then the
3351 extractor will recover $\Cindex(\Cid)$ just fine; and if $\Ccheck(\Cid)
3352 \notin \Ccheckset(\Cid)$ then all messages in the class would have been
3353 rejected by $\G1$ anyway.
3354
3355 Let $B(\Cid)$ be the event that the class~$\Cid$ is bad. We claim that
3356 \[ \Pr[B(\Cid)] \le \frac{\Ccount(\Cid)}{2^{\ell_I}}. \]
3357 The present lemma follows, since
3358 \[ \diff{1}{2}
3359 \le \Pr[F]
3360 \le \sum_\Cid \Pr[B(\Cid)]
3361 \le \sum_\Cid \frac{\Ccount(\Cid)}{2^{\ell_I}}
3362 = \frac{1}{2^{\ell_I}} \sum_\Cid \Ccount(\Cid)
3363 \le \frac{q_M}{2^{\ell_I}}
3364 \]
32609df3 3365 as required.
a6e375a6
MW
3366
3367 Now observe that, in $\G2$, sessions don't actually check incoming
3368 challenges in this way any more -- instead we run the extractor. So, to
3369 prove the claim, we consider a class~$\Cid$ where properties~1 and~2 above
3370 hold. The correct hash $\Chash(\Cid)$ is then independent of the rest of
3371 the game, so the probability that $\Ccheck(\Cid) \in \Ccheckset(\Cid)$ is
3372 precisely $\Ccount(\Cid)/2^{\ell_I}$ as required.
3373
3374 This completes the proof the lemma.
3375\end{proof}
3376
3377\begin{proof}[Proof of lemma~\ref{lem:sk-g2-g3}]
3378 Let $F$ be the event that the adversary makes a query $H_K(Z_S)$ for some
3379 ripe session~$S$. Since $\G3$ proceeds exactly as $\G2$ did unless $F_2$
3380 occurs, we apply lemma~\ref{lem:shoup}, which tells us that $\diff{2}{3}
3381 \le \Pr[F_2]$. We must therefore bound this probability.
3382
3383 To do this, we consider a new game~$\G3'$, which is the same as $\G3$,
3384 except that, at the start of the game, we choose a random number $k \inr
3385 \Nupto{q_S}$. For $0 \le i < q_S$, let $S_i$ be the $i$th session created
3386 by the adversary. We define $F'$ to be the event that the adversary
3387 queries $H_K(Z_{S_k})$ when $S_k$ is ripe.
3388
3389 The lemma now follows from these two claims.
3390
3391 \begin{claim}
3392 $\Pr[F] \le q_S \Pr[F']$.
3393 \end{claim}
3394 To see this, for any session~$S$, let $F_S$ be the event that the adversary
3395 queries~$H_K(Z_S)$ when $S$ is ripe. Then
3396 \[ \Pr[F] \le \sum_{0\le i<q_S} \Pr[F_{S_i}]. \]
3397 Hence,
3398 \[ \Pr[F'] = \Pr[F_{S_k}] = \sum_{0\le i<q_S} \Pr[F_{S_i}] \Pr[k = i]
3399 = \frac{1}{q_S} \sum_{0\le i<q_S} \Pr[F_{S_i}]
3400 \ge \frac{\Pr[F]}{q_S}
3401 \]
3402 proving the claim.
3403
3404 \begin{claim}
3405 For some $t' = t + O(n) + O(q_S q_M) + O(q_I) + O(q_K)$, we have
3406 $\Pr[F'] \le \InSec{mcdh}(G; t', q_K).$
3407 \end{claim}
3408 To prove this claim, we construct an adversary~$B$ which solves the MCDH
3409 problem in~$G$. The adversary works as follows.
3410 \begin{enumerate}
3411 \item It is given a pair $(R^*, S^*) = (r^* P, s^* P)$ of group elements;
3412 its objective is to make a verification-oracle query $V(Z^*)$ where $Z^*
3413 = r^* s^* P$.
3414 \item It sets up a simulation of the game~$\G3'$, by running the
3415 $\id{init}$ function, and simulating all of the parties. In particular,
3416 it chooses a random~$k \in \Nupto{q_S}$.
3417 \item It sets up accurate simulations of the random oracles $H_K(\cdot)$
3418 and $H_I(\cdot, \cdot, \cdot, \cdot)$, which choose random outputs for
3419 new, fresh inputs. However, whenever $A$ queries $H_K(\cdot)$ on a group
3420 element $Z$, $B$ also queries $V(Z)$.
3421 \item It runs $A$ in its simulated game. It responds to all of $A$'s
3422 instructions faithfully, until the $k$th session-creation.
3423 \item When creating the $k$th session $S = S_k = (P_i, P_j, s)$, $B$ has
3424 party~$P_i$ choose $R^*$ as its challenge, rather than choosing $r_S$ and
3425 setting $R_S = r_S P$. Because it simulates all the parties, $B$ can
3426 compute $Y_S = x_j R$, which is still correct.
3427 \item If $A$ requests the creation of a matching session $T = (P_j, P_i,
3428 s)$ then $B$ has party~$P_j$ choose $S^*$ as its challenge. Again, $B$
3429 computes $Y_T = x_i S^*$.
3430 \item If $A$ ever corrupts the parties $P_i$ or $P_j$, or reveals the
3431 session state of $S$ or $T$ then $B$ stops the simulation abruptly and
3432 halts.
3433 \end{enumerate}
dff0fad2
MW
3434 Adversary $B$'s running time is within the bounds required of $t'$, and $B$
3435 makes at most $q_K$ queries to $V(\cdot)$; we therefore have
a6e375a6
MW
3436 \[ \Pr[F'] \le \Succ{mcdh}{G}(B) \le \InSec{mcdh}(G; t', q_K) \]
3437 as required.
3438\end{proof}
3439
3440\begin{proof}[Proof of lemma~\ref{lem:sk-g3-g4}]
3441 Let $F_4$ be the event under which we abort the game~$\G4$. Clearly, if
3442 $F$ doesn't occur, games $\G3$ and $\G4$ proceed identically, so we can
3443 apply lemma~\ref{lem:shoup} to see that $\diff{3}{4} \le \Pr[F_4]$.
3444 Bounding $\Pr[F_4]$, however, is somewhat complicated. We use a further
3445 sequence of games.
3446
3447 Firstly, let $\G5$ be like $\G4$ with the exception that we choose, at
3448 random, an integer~$k \inr \Nupto{q_S}$. As we did in the proof for
3449 lemma~\ref{lem:sk-g3-g4}, let $S_i$ be the $i$th session created by the
3450 adversary. For each session~$S_i$, let $T_i$ be its matching session, if
3451 there is one. We define $F_5$ to be the event that
3452 \begin{itemize}
3453 \item $S_k$ completes immediately following delivery of a message $\mu
3454 \notin M_{T_k}$, and
3455 \item $S_k$ was ripe at this point.
3456 \end{itemize}
3457 For games~$\G{i}$, for $i > 5$, we define the event $F_i$ to be the event
3458 corresponding to $F_5$ in $\G{i}$. Note that if $S_k$ \emph{is} sent a
3459 message in $M_{T_k}$ then $S_k$ immediately completes.
3460
3461 \begin{claim}
3462 $\Pr[F_4] \le \Pr[F_5]/q_S$.
3463 \end{claim}
3464 This claim is proven exactly as we did for claim~1 of
3465 lemma~\ref{lem:sk-g2-g3}.
3466
3467 Let~$\G6$ be the same as $\G5$ except that we change the encrypted
3468 responses of session~$S_k$ and its matching session~$T_k$. Let $K^* =
3469 (K_0^*, K_1^*) = H_K(Z_S)$. Then, rather than sending $(\cookie{response},
3470 R_S, E_{K_0^*}(Y_T))$, session~$S$ sends $(\cookie{response}, R_S,
3471 E_{K_0^*}(1^{\ell_G}))$.
3472 \begin{claim}
3473 $|\Pr[F_6] - \Pr[F_5]| \le \InSec{ind-cca}(\E; t', q_M, q_M).$
3474 \end{claim}
3475 To prove this claim, we construct an adversary~$B$ which attacks the
3476 IND-CCA security of our encryption scheme $\E$. The adversary~$B$ works as
3477 follows.
3478 \begin{enumerate}
3479 \item It is given no input, but a pair of oracles $E(\cdot, \cdot)$ and
3480 $D(\cdot)$; the former encrypts either the left or right input, according
3481 to a hidden bit, and the latter decrypts ciphertexts other than those
3482 returned by $E(\cdot, \cdot)$. Its goal is to guess the hidden bit.
3483 \item It sets up a simulation of the game~$\G5$, by running the $\id{init}$
3484 function, and simulating all of the parties. In particular, it chooses a
3485 random $k \in \Nupto{q_S}$.
3486 \item It sets up accurate simulations of the random oracles $H_K(\cdot)$
3487 and $H_I(\cdot, \cdot, \cdot, \cdot)$.
3488 \item It runs $A$ in its simulated game. It responds to all of $A$'s
3489 instructions faithfully, except for the matching sessions $S_k$ and
3490 $T_k$. Let $S = S_k = (P_i, P_j, s)$, and $T = T_k = (P_j, P_i, s)$.
3491 \item Suppose $T$ is sent the message $C_S = (\cookie{challenge}, R_S,
3492 c_S)$. Rather than computing $K^* = H_K(r_T R_S)$ and performing the
3493 encryption properly, $B$ queries its left-or-right encryption oracle
3494 $E(\cdot, \cdot)$ on $E(1^{\ell_G}, x_j R_S)$, and sends the resulting
3495 ciphertext~$\chi$ back to~$S$ as part of a message $(\cookie{response},
3496 R_T, \chi)$. The set $M_T$ is precisely the set of messages constructed
3497 in this fashion. (Recall that challenge messages other than $C_S$ aren't
3498 actually delivered to $T$, since we simulate the responses using the
3499 extractor, as of $\G2$.)
3500 \item Suppose $S$ is sent a message $M = (\cookie{response}, R_T, \chi) \in
3501 M_T$. We immediately stop the simulation, and $B$ returns $0$.
3502 \item Suppose, instead, that $S$ is sent some message $M' =
3503 (\cookie{response}, R, \chi) \notin M_T$. There are two cases to
3504 consider. If $R = R_T$ then we must have $\chi$ distinct from the
3505 ciphertexts returned by the $E(\cdot, \cdot)$ oracle, so we can invoke
3506 the decryption oracle $D(\cdot)$ on $\chi$ to obtain a response $Y$.
3507 Alternatively, if $R \ne R_T$, we can compute the key $K = (K_0, K_1) =
3508 H_K(r_S R)$, and recover $Y = D_{K_0}(\chi)$. In either case, if $Y =
3509 r_S X_j)$ then $S$ would complete at this point: $B$ stops the simulation
3510 and returns $1$.
3511 \item If $A$ exposes $S$ (by corrupting $P_i$ or~$P_j$, or revealing $S$ or
3512 $T$) then we stop the simulation and $B$ returns $0$.
3513 \item Finally, if the game stops, either because $A$ halts, or because of
3514 one of the special rules introduced in earlier games, $B$ returns $0$.
3515 \end{enumerate}
3516 It is clear that $B$'s oracle queries are acceptable, since $|x_j R_S| =
3517 \ell_G$ by definition, and $B$ never queries $D(\cdot)$ on a ciphertext
3518 returned by its encryption oracle. By the rules of~$\G3$, we know that the
3519 game stops immediately if $A$ ever queries $Z_S$, so the key $K^*$ is
3520 independent of everything in $A$'s view except the ciphertexts $\chi$
3521 output by $S$ and $T$. Therefore, if the hidden bit of the IND-CCA game is
3522 $1$, $B$ accurately simulates $\G5$, whereas if the bit is $0$ then $B$
dff0fad2
MW
3523 accurately simulates $\G6$. We issue no more that $q_M$ encryption or
3524 decryption queries. Finally, $B$'s running time is within the bounds
3525 allowed for $t'$. Therefore,
a6e375a6
MW
3526 \[ \Adv{ind-cca}{\E}(B) = \Pr[F_5] - \Pr[F_6]
3527 \le \InSec{ind-cca}(\E; t', q_M, q_M). \]
3528 We construct the adversary~$\bar{B}$ which is the same as $B$ above, except
3529 that $\bar{B}$ returns $0$ whenever $B$ returns~$1$, and \emph{vice versa}.
3530 Clearly
3531 \[ \Adv{ind-cca}{\E}(\bar{B})
3532 = (1 - \Pr[F_5]) - (1 - \Pr[F_6])
3533 = \Pr[F_6] - \Pr[F_5]
3534 \le \InSec{ind-cca}(\E; t', q_M, q_M).
3535 \]
3536 This proves the claim.
3537
3538 Let $\G7$ be the same as $\G6$, except that at the start of the game we
3539 choose a random $m \in \Nupto{n}$, and when the adversary creates the
3540 session $S = S_k = (P_i, P_j, s)$, we abort the game unless $j = m$.
3541 Clearly we have $\Pr[F_6] = n \Pr[F_7]$.
32609df3 3542
a6e375a6
MW
3543 Finally, we can explicitly bound $F_6$. In $\G6$, the adversary's view is
3544 independent of the correct response $Y_S = r_S X_S = x_j R_S$ to $S$'s
3545 challenge. Therefore, if $A$ manages to send any message $\mu \notin M_T$
3546 which causes $S$ to complete, then it has impersonated~$P_j$.
3547 \begin{claim}
3548 $\Pr[F_7] \le \InSec{mcdh}(G; t', q_M + q_I)$.
3549 \end{claim}
3550 The present lemma follows from this and the previous claims.
3551
3552 To prove the claim formally, we construct an adversary~$B'$, which behaves
3553 as follows.
3554 \begin{enumerate}
3555 \item It is given as input a public key $X^*$ and a single challenge $(R^*,
3556 c^*)$, a random oracle $H^*_I(\cdot, \cdot)$, and an oracle $V(\cdot,
3557 \cdot)$, which verifies responses $(R, Y)$. Its goal is to invoke
3558 $V(\cdot, \cdot)$ with a correct response to the challenge.
3559 \item It chooses a random $k \in \Nupto{q_S}$ and $m \in \Nupto{n}$. It
3560 sets up a simulation of the game~$\G7$, by running the $\id{init}$
3561 function, and simulating all of the parties, except that it gives party
3562 $P_m$ the public key $X^*$. This makes no difference, since $P_m$
3563 doesn't actually need to give any `honest' responses because of the
3564 change we made in $\G6$.
3565 \item It sets up accurate simulations of the random oracles $H_K(\cdot)$
3566 and $H_I(\cdot, \cdot, \cdot, \cdot)$, with one exception -- see below.
3567 \item It runs $A$ in its simulated game. It responds to all of $A$'s
3568 instructions faithfully, except for the session $S_k$. Let $S = S_k =
3569 (P_i, P_j, s)$, and let $T = T_k = (P_j, P_i, s)$ be its matching
3570 session.
3571 \item When session~$S$ is created, $B'$ checks that $j = m$, and if not
3572 stops the simulation and halts. Otherwise, $B'$ invokes its oracle~$C()$
3573 to obtain a pair $(R, c)$. Session~$S$ sends $C_S = (\cookie{challenge},
3574 R, c)$ as its challenge to~$T$.
3575 \item When $A$ makes a query $H_I(X^*, s, R, Y)$, $B$ answers it by
3576 querying its \emph{own} random oracle $H^*_I(R, Y)$.
3577 \item When $S$ receives a message $(\cookie{response}, R, \chi)$, we
3578 compute $(K_0, K_1) = r_S R$, and $Y = D_{K_0}(\chi)$. If $Y \ne \bot$
3579 then $B'$ calls $V(R, Y)$.
3580 \item If $A$ reveals $S$ or corrupts $P_i$ or $P_j$ then $B'$ stops the
3581 simulation immediately and halts.
3582 \end{enumerate}
dff0fad2
MW
3583 The running time of $B'$ is within the bounds required of $t'$; it makes at
3584 most $q_I$ random-oracle and at most $q_M$ verification queries. Clearly
3585 $B'$ succeeds whenever $F_7$ occurs. The claim follows from
3586 theorem~\ref{thm:wident-sound}.
a6e375a6
MW
3587\end{proof}
3588
3589
3590\subsection{Proof of theorem~\ref{thm:sk2}}
3591\label{sec:sk2-proof}
3592
3593The proof is almost identical to the proof of theorem~\ref{thm:sk}, in
3594appendix~\ref{sec:sk-proof}. Unfortunately a black-box reduction doesn't
32609df3 3595seem possible.
a6e375a6
MW
3596
3597We use the games and notation of section~\ref{sec:sk-proof}.
3598
3599The change to the check-value calculation doesn't affect key-generation at
3600all, so the transition to $\G1$ goes through as before.
3601
3602The transition from $\G1$ to $\G2$ -- answering challenges using the
3603extractor -- needs a little care. Let $S = (P_i, P_j, s)$ be a session, and
3604consider an incoming message $(\cookie{challenge}, R, c)$.
3605\begin{itemize}
3606\item If $T = (P_j, P_i, s)$ is the matching session to $S$, and $R = R_T$ is
3607 the public challenge value of $T$, and $c = r_T \xor H_I(R_S, X_j, s, R_T,
3608 r_T X_i)$ is the check-value output by $T$ when it received
3609 $(\cookie{pre-challenge}, R_S)$ as input, then $S$ replies as it did in
3610 $\G1$.
3611\item If the challenge message is any other message, then we use the
3612 extractor.
3613\end{itemize}
3614As in lemma~\ref{lem:sk-g1-g2}, we examine which sessions could have queried
3615$H_I(R_S, X_j, s, R, x_i R)$, and for the same reasons conclude that only the
3616matching session would have done this, and only in response to the
3617pre-challenge $R_S$. It follows that $\diff{1}{2} \le q_M/2^{\ell_I}$ as
3618before.
3619
3620The remaining game steps go through unchanged. In particular, we conclude
3621that a ripe session will only complete if the adversary has transmitted
3622messages from its matching session correctly, and the session key is
3623independent of the adversary's view. The theorem follows.
3624
dff0fad2
MW
3625
3626\subsection{Sketch proof of single-key protocol for secure channels}
3627\label{sec:sc-proof}
3628
3629We want to show that the Wrestlers Key-Exchange protocol, followed by use of
3630the encryption scheme $\E$, with the \emph{same} key $K = K_0$, still
3631provides secure channels.
3632
3633\subsubsection{Secure channels definition}
081da1b1
MW
3634We (very briefly!) recall the \cite{canetti-krawczyk-2001:secure-channels}
3635definition of a secure channels protocol. We again play a game with the
3636adversary. At the beginning, we choose a bit $b^* \inr \{0, 1\}$ at random.
3637We allow the adversary the ability to establish \emph{secure channels}
3638sessions within the parties. Furthermore, for any running session $S = (P_i,
3639P_j, s)$, we allow the adversary to request $S$ to send a message~$\mu$
3640through its secure channel. Finally, the adversary is allowed to choose one
3641ripe \emph{challenge} session, and ask for it to send of one of a \emph{pair}
3642of messages $(\mu_0, \mu_1)$, subject to the restriction that $|\mu_0| =
dff0fad2
MW
3643|\mu_1|$; the session sends message $\mu_{b^*}$. The adversary may not
3644expose the challenge session.
3645
3646The adversary wins if (a)~it can guess the bit $b^*$, or (b)~it can cause a
3647ripe session $S$ (i.e., an unexposed, running session), with a matching
3648session~$T$ to output a message other than one that it requested that $T$
3649send.
3650
3651\subsubsection{Protocol definition}
3652The protocol begins with Wrestlers key-exchange. The encryption in the
3653key-exchange protocol is performed as $E_K(\cookie{kx}, \cdot)$; encryption
3654for secure channels is performed as $E_K(\cookie{sc}, i, o, \cdot)$, where
3655$i$ is a sequence number to prevent replays and $o \in \{S, T\}$ identifies
3656the sender.
3657
3658\subsubsection{Proof sketch}
3659We use the games and notation of appendix~\ref{sec:sk-proof}.
3660
3661The proof goes through as far as the step between $\G5$ and $\G6$ in the
3662proof of lemma~\ref{lem:sk-g3-g4}. Here we make the obvious adjustments to
3663our adversary against the IND-CCA security of $\E$. (Our adversary will need
3664to use the left-or-right oracle for messages sent using the secure channel
3665built on $K^*$. That's OK.)
3666
3667In $\G4$, we know that ripe sessions agree the correct key, and the adversary
3668never queries the random oracle, so the key is independent of the adversary's
3669view.
3670
3671We define a new game $\G8$, which is like $\G4$, except that we stop the game
3672if the adversary ever forges a message sent over the secure channel. That
3673is, if a ripe session $S$ ever announces receipt of a message not sent (at
3674the adversary's request) by its matching session $T$. Let $F_8$ be the event
3675that a forgery occurs. We apply lemma~\ref{lem:shoup}, which tells us that
3676$\diff{4}{8} \le \Pr[F_8]$. To bound $F_8$, we isolate a session at random
3677(as in lemmata \ref{lem:sk-g2-g3} and~\ref{lem:sk-g3-g4}), which tells us
3678that
3679\begin{equation}
3680 \label{eq:sc-g4-g8}
3681 \diff{4}{8} \le q_S \cdot \InSec{int-ptxt}(\E; t', q_M, q_M)
3682\end{equation}
3683Finally, we can bound the adversary's advantage at guessing the hidden bit
3684$b^*$. We isolate (we hope) the challenge session $S$ by choosing a target
32609df3 3685session at random, as before. Let $K^* = H_K(Z_S)$ be the key agreed by the
dff0fad2
MW
3686session (if it becomes ripe). We define an adversary $B$ against the IND-CCA
3687security of $\E$. The adversary $B$ simulates the game. If the adversary
3688exposes the target session, or doesn't choose it as the challenge session,
3689$B$ fails (and exits 0); otherwise it uses the left-or-right encryption
3690oracle to encrypt both of the adversary's message choices, and outputs the
3691adversary's choice. Let $b$ be the adversary's output, and let $\epsilon$ be
3692the advantage of our IND-CCA distinguisher. Then
3693\begin{eqnarray*}[rl]
3694 \Pr[b = b^*]
3695 & = \Pr[b = b^* \wedge b^* = 1] + \Pr[b = b^* \wedge b^* = 0] \\
3696 & = \frac{1}{2}\bigl( \Pr[b = b^* \mid b^* = 1] +
3697 \Pr[b = b^* \mid b^* = 0] \bigr) \\
3698 & = \frac{1}{2}\bigl( \Pr[b = b^* \mid b^* = 1] +
3699 (1 - \Pr[b \ne b^* \mid b^* = 0]) \bigr) \\
3700 & = \frac{1}{2}\bigl( \Pr[b = 1 \mid b^* = 1] -
3701 \Pr[b = 1 \mid b^* = 0] + 1 \bigr) \\
3702 & = \frac{1}{2}\bigl(1 + q_S\,\Adv{ind-cca}{\E}(B) \bigr) \\
3703 & \le \frac{1}{2} \bigl( 1 + q_S\,\InSec{ind-cca}(\E; t', q_M, q_M) \bigr).
3704 \eqnumber
3705\end{eqnarray*}
3706
a6e375a6
MW
3707\end{document}
3708
32609df3 3709%%% Local Variables:
a6e375a6
MW
3710%%% mode: latex
3711%%% TeX-master: t
32609df3 3712%%% End: