wrestlers.tex: Fix citations to catch up with Bibtex database changes.
[doc/wrestlers] / wrestlers.tex
CommitLineData
a6e375a6
MW
1%%% -*-latex-*-
2%%%
3%%% The Wrestlers Protocol: secure, deniable key-exchange
4%%%
5%%% (c) 2006 Mark Wooding
6%%%
7
b12a993f
MW
8\makeatletter
9\def\@doit#1{#1}
10\ifx\iffancystyle\xxundefined\expandafter\@doit\else\expandafter\@gobble\fi
11{\newif\iffancystyle\fancystyletrue}
12\ifx\ifshort\xxundefined\expandafter\@doit\else\expandafter\@gobble\fi
13{\newif\ifshort\shortfalse}
14\iffancystyle\expandafter\@gobble\else\expandafter\@doit\fi{\newif\ifpdfing}
15\makeatother
32609df3
MW
16
17\typeout{Options:}
18\typeout{ Fancy style: \iffancystyle ON\else OFF\fi}
19\typeout{ Short version: \ifshort ON\else OFF\fi}
20
a6e375a6
MW
21\errorcontextlines=\maxdimen
22\showboxdepth=\maxdimen
23\showboxbreadth=\maxdimen
24
25\iffancystyle
32609df3
MW
26 \documentclass{strayman}
27 \parskip=0pt plus 1pt \parindent=1.2em
a6e375a6
MW
28 \usepackage[T1]{fontenc}
29 \usepackage[palatino, helvetica, courier, maths=cmr]{mdwfonts}
30 \usepackage[within = subsection, mdwmargin]{mdwthm}
31 \usepackage{mdwlist}
32 \usepackage{sverb}
32609df3 33 \ifpdfing\else
d8139f2c
MW
34 \PassOptionsToPackage{dvips}{xy}
35 \fi
a6e375a6 36\else
32609df3
MW
37 \PassOptionsToClass{runningheads}{llncs}
38 \documentclass{llncs}
a6e375a6
MW
39\fi
40
41\PassOptionsToPackage{show}{slowbox}
42%\PassOptionsToPackage{hide}{slowbox}
32609df3 43\usepackage{mdwtab, mdwmath, crypto}
a6e375a6
MW
44\usepackage{slowbox}
45\usepackage{amssymb, amstext}
46\usepackage{url, multicol}
47\usepackage{tabularx}
48\DeclareUrlCommand\email{\urlstyle{tt}}
49\ifslowboxshow
50 \usepackage[all]{xy}
51 \turnradius{4pt}
52\fi
32609df3 53\usepackage{mathenv}
a6e375a6 54
32609df3 55\newcommand{\Nupto}[1]{\{0, 1, \ldots, #1 - 1\}}
a6e375a6 56\iffancystyle
32609df3 57 \let\next\title
a6e375a6 58\else
32609df3 59 \def\next[#1]{\titlerunning{#1}\title}
a6e375a6
MW
60\fi
61\next
32609df3
MW
62 [The Wrestlers Protocol]
63 {The Wrestlers Protocol%
64 \ifshort\thanks{This is an extended abstract; the full version
65 \cite{Wooding:2006:WP} is available from
66 \texttt{http://eprint.iacr.org/2006/386/}.}\fi \\
a6e375a6
MW
67 A simple, practical, secure, deniable protocol for key-exchange}
68\iffancystyle
69 \author{Mark Wooding \\ \email{mdw@distorted.org.uk}}
70\else
71 \author{Mark Wooding}
72 \institute{\email{mdw@distorted.org.uk}}
73\fi
74
75\iffancystyle
76 \bibliographystyle{mdwalpha}
77 \let\epsilon\varepsilon
78 \let\emptyset\varnothing
79 \let\le\leqslant\let\leq\le
80 \let\ge\geqslant\let\geq\ge
81 \numberwithin{table}{section}
82 \numberwithin{figure}{section}
83 \numberwithin{equation}{subsection}
84 \let\random\$
85\else
32609df3 86 \bibliographystyle{splncs}
a6e375a6
MW
87 \expandafter\let\csname claim*\endcsname\claim
88 \expandafter\let\csname endclaim*\endcsname\endclaim
89\fi
90
a6e375a6
MW
91\let\Bin\Sigma
92\let\emptystring\lambda
93\edef\Pr{\expandafter\noexpand\Pr\nolimits}
94\newcommand{\bitsto}{\mathbin{..}}
95\newcommand{\E}{{\mathcal{E}}}
96\newcommand{\M}{{\mathcal{M}}}
97\iffancystyle
98 \def\description{%
99 \basedescript{%
100 \let\makelabel\textit%
101 \desclabelstyle\multilinelabel%
102 \desclabelwidth{1in}%
103 }%
104 }
105\fi
106\def\fixme#1{\marginpar{FIXME}[FIXME: #1]}
107\def\hex#1{\texttt{#1}_{x}}
108
32609df3
MW
109\newenvironment{longproof}[1]{%
110 \ifshort#1\expandafter\ignore
111 \else\proof\fi
112}{%
113 \ifshort\else\endproof\fi
114}
115
a6e375a6
MW
116\def\dbox#1{%
117 \vtop{%
118 \def\\{\unskip\egroup\hbox\bgroup\strut\ignorespaces}%
119 \hbox{\strut#1}%
120 }%
121}
122
123\def\Wident{\Xid{W}{ident}}
124\def\Wkx{\Xid{W}{kx}}
125\def\Wdkx{\Xid{W}{dkx}}
126\def\Func#1#2{\mathcal{F}[#1\to#2]}
127\def\diff#1#2{\Delta_{#1, #2}}
128\def\Hid{H_{\textit{ID}}}
129
130%% protocol run diagrams
131\def\PRaction#1{#1\ar[r]}
132\def\PRcreatex#1{\PRaction{\textsf{Create session\space}#1}}
133\def\PRcreate#1#2#3{\PRcreatex{(\text{#1},\text{#2},#3)}}
134\def\PRreceivex#1{\PRaction{\textsf{Receive\space}#1}}
135\def\PRreceive#1#2{\PRreceivex{\msg{#1}{#2}}}
136\def\PRsession#1{\relax\mathstrut#1\ar[r]}
137\def\msg#1#2{(\cookie{#1},#2)}
138\def\PRresult#1{#1}
139\def\PRsendx#1{\PRresult{\textsf{Send\space}#1}}
140\def\PRsend#1#2{\PRsendx{\msg{#1}{#2}}}
141\def\PRcomplete#1{\textsf{Complete:\space}#1}
142\def\PRstate#1{\textsf{State:\space}#1}
143\def\PRignore{\textsf{(ignored)}}
144\def\PRreveal{\textsf{Session-state reveal}\ar[r]}
145\def\protocolrun#1{\[\xymatrix @R=0pt @C=2em {#1}\]}
146
32609df3
MW
147\def\protocol{%
148 \unskip\bigskip
149 \begin{tabular*}{\linewidth}%
150 {@{\qquad}l@{\extracolsep{0ptplus1fil}}r@{\qquad}}}
151\def\endprotocol{\end{tabular*}}
152\def\send#1#2{\noalign{%
153 \centerline{\xy\ar @{#1}|*+{\mathstrut#2}<.5\linewidth, 0pt>\endxy}}}
154
a6e375a6
MW
155%% class-ids for proof of extractor lemma
156\let\Cid=\Lambda
157\let\Csession=S
158\let\Cindex=r
159\let\Cquery=Q
160\let\Chash=H
161\let\Ccheck=c
162\let\Ccheckset=V
163\let\Ccount=\nu
164
dff0fad2
MW
165\def\HG#1{\mathbf{H}_{#1}}
166
32609df3
MW
167\iffancystyle\else
168 \let\xsssec\subsubsection\def\subsubsection#1{\xsssec[#1]{#1.}}
169\fi
170
a6e375a6
MW
171\begin{document}
172
173%%%--------------------------------------------------------------------------
174
175\maketitle
d8139f2c 176\iffancystyle \thispagestyle{empty} \fi
a6e375a6
MW
177
178\begin{abstract}
179 We describe and prove (in the random-oracle model) the security of a simple
180 but efficient zero-knowledge identification scheme, whose security is based
181 on the computational Diffie-Hellman problem. Unlike other recent proposals
182 for efficient identification protocols, we don't need any additional
183 assumptions, such as the Knowledge of Exponent assumption.
184
185 From this beginning, we build a simple key-exchange protocol, and prove
186 that it achieves `SK-security' -- and hence security in Canetti's Universal
187 Composability framework.
188
189 Finally, we show how to turn the simple key-exchange protocol into a
190 slightly more complex one which provides a number of valuable `real-life'
191 properties, without damaging its security.
192\end{abstract}
193
194\iffancystyle
195\newpage
d8139f2c 196\thispagestyle{empty}
a6e375a6
MW
197\columnsep=2em \columnseprule=0pt
198\tableofcontents[\begin{multicols}{2}\raggedright][\end{multicols}]
199%%\listoffigures[\begin{multicols}{2}\raggedright][\end{multicols}]
200%% \listoftables[\begin{multicols}{2}\raggedright][\end{multicols}]
201\newpage
202\fi
203
204%%%--------------------------------------------------------------------------
205
206\section{Introduction}
207
dff0fad2
MW
208This paper proposes protocols for \emph{identification} and
209\emph{authenticated key-exchange}.
a6e375a6 210
dff0fad2
MW
211An identification protocol allows one party, say Bob, to be sure that he's
212really talking to another party, say Alice. It assumes that Bob has some way
213of recognising Alice; for instance, he might know her public key. Our
214protocol requires only two messages -- a challenge and a response -- and has
215a number of useful properties. It is very similar to, though designed
32609df3 216independently of, a recent protocol by Stinson and Wu
4331bf84
MW
217\cite{cryptoeprint:2006:337}; we discuss their protocol and compare it to
218ours in \ifshort the full version of this paper. \else
32609df3 219section~\ref{sec:stinson-ident}. \fi
a6e375a6 220
dff0fad2 221Identification protocols are typically less useful than they sound. As Shoup
4331bf84
MW
222\cite{cryptoeprint:1999:012} points out, it provides a `secure ping', by
223which Bob can know that Alice is `there', but provides no guarantee that any
224other communication was sent to or reached her. However, there are
225situations where this an authentic channel between two entities -- e.g., a
226device and a smartcard -- where a simple identification protocol can still be
227useful.
a6e375a6 228
dff0fad2
MW
229An authenticated key-exchange protocol lets Alice and Bob agree on a shared
230secret, known to them alone, even if there is an enemy who can read and
231intercept all of their messages, and substitute messages of her own. Once
232they have agreed on their shared secret, of course, they can use standard
233symmetric cryptography techniques to ensure the privacy and authenticity of
234their messages.
a6e375a6
MW
235
236
237\subsection{Desirable properties of our protocols}
238
239Our identification protocol has a number of desirable properties.
240\begin{itemize}
241\item It is \emph{simple} to understand and implement. In particular, it
242 requires only two messages.
243\item It is fairly \emph{efficient}, requiring two scalar multiplications by
244 each of the prover and verifier.
245\item It is provably \emph{secure} (in the random oracle model), assuming the
246 intractability of the computational Diffie-Hellman problem.
247\end{itemize}
248
dff0fad2 249Our key-exchange protocol also has a number of desirable
d8139f2c 250properties.
a6e375a6
MW
251\begin{itemize}
252\item It is fairly \emph{simple} to understand and implement, though there
253 are a few subtleties. In particular, it is \emph{symmetrical}. We have
254 implemented a virtual private network system based on this protocol.
255\item It is \emph{efficient}, requiring four scalar multiplications by each
256 participant. The communication can be reduced to three messages by
257 breaking the protocol's symmetry.
258\item It is provably \emph{secure} (again, in the random oracle model),
259 assuming the intractability of the computational Diffie-Hellman problem,
260 and the security of a symmetric encryption scheme.
261\item It provides \emph{perfect forward secrecy}. That is, even if a user's
262 long-term secrets are compromised, past sessions remain secure.
263\item It is \emph{deniable}. It is possible to construct simulated
264 transcripts of protocol executions between any number of parties without
265 knowing any of their private keys. The simulated transcripts are (almost)
266 indistinguishable from real protocol transcripts. Hence, a transcript
267 does not provide useful evidence that a given party was really involved in
268 a given protocol execution.
269\end{itemize}
270
32609df3 271\ifshort\else
dff0fad2 272\subsection{Asymptotic and concrete security results}
a6e375a6 273
dff0fad2
MW
274Most security definitions for identification (particularly zero-knowledge)
275and key-exchange in the literature are \emph{asymptotic}. That is, they
276consider a family of related protocols, indexed by a \emph{security
277parameter}~$k$; they that any \emph{polynomially-bounded} adversary has only
278\emph{negligible} advantage. A polynomially-bounded adversary is one whose
279running time is a bounded by some polynomial~$t(k)$. The security definition
280requires that, for any such polynomially-bounded adversary, and any
281polynomial $p(k)$, the adversary's advantage is less than $p(k)$ for all
282sufficiently large values of $k$.
283
284Such asymptotic notions are theoretically interesting, and have obvious
285connections to complexity theory. Unfortunately, such an asymptotic result
286tells us nothing at all about the security of a particular instance of a
287protocol, or what parameter sizes one needs to choose for a given level of
288security against a particular kind of adversary. Koblitz and Menezes
4331bf84
MW
289\cite{cryptoeprint:2006:229} (among other useful observations) give examples
290of protocols, proven to meet asymptotic notions of security, whose security
dff0fad2
MW
291proofs guarantee nothing at all for the kinds of parameters typically used in
292practice.
293
294Since, above all, we're interested in analysing a practical and implemented
295protocol, we follow here the `practice-oriented provable security' approach
296largely inspired by Bellare and Rogaway, and exemplified by
297\cite{Bellare:1994:SCB,Bellare:1995:XMN,Bellare:1995:OAE,Bellare:1996:ESD,%
4331bf84 298Bellare:1996:KHF,Bellare:1997:CST}; see also \cite{Bellare:1999:POP}.
dff0fad2
MW
299Rather than attempting to say, formally, whether or not a protocol is
300`secure', we associate with each protocol an `insecurity function' which
301gives an upper bound on the advantage of any adversary attacking the protocol
302within given resource bounds.
32609df3 303\fi
a6e375a6 304
dff0fad2 305\subsection{Formal models for key-exchange}
a6e375a6 306
32609df3
MW
307\ifshort
308
309The first model for studying the \emph{computational} security of
310key-exchange protocols (rather than using protocol-analysis logics like that
4331bf84 311of \cite{Burrows:1989:LAa}) was given by Bellare and Rogaway
32609df3
MW
312\cite{Bellare:1994:EAK}; the model has since been enhanced, both by the
313original authors and others, in \cite{Bellare:1995:PSS,%
314Blake-Wilson:1997:KAP,Blake-Wilson:1998:EAA}. The model defines security
315in terms of a game: key-exchange protocols are secure if an adversary can't
316distinguish the key agreed by a chosen `challenge session' from a key chosen
317independently at random. Other models for key-exchange have been proposed in
4331bf84
MW
318\cite{Bellare:1998:MAD} and \cite{cryptoeprint:1999:012}; these use a
319different notion of security, involving implementation of an ideal
320functionality.
32609df3
MW
321
322\else
323
dff0fad2
MW
324Many proposed key-exchange protocols have turned out to have subtle security
325flaws. The idea of using formal methods to analyse key-exchange protocols
4331bf84 326begins with the logic of Burrows, Abadi and Needham \cite{Burrows:1989:LAa}.
dff0fad2
MW
327Their approach requires a `formalising' step, in which one expresses in the
328logic the contents of the message flows, and the \emph{beliefs} of the
329participants.
330
331Bellare and Rogaway \cite{Bellare:1994:EAK} describe a model for studying the
332computational security of authentication and key-exchange protocols in a
333concurrent setting, i.e., where multiple parties are running several
334instances of a protocol simultaneously. They define a notion of security in
335this setting, and show that several simple protocols achieve this notion.
336Their original paper dealt with pairs of parties using symmetric
337cryptography; they extended their definitions in \cite{Bellare:1995:PSS} to
338study three-party protocols involving a trusted key-distribution centre.
339
340Blake-Wilson, Johnson and Menezes \cite{Blake-Wilson:1997:KAP} applied the
341model of \cite{Bellare:1994:EAK} to key-exchange protocols using asymmetric
342cryptography, and Blake-Wilson and Menezes \cite{Blake-Wilson:1998:EAA}
343applied it to protocols based on the Diffie-Hellman protocol.
344
345The security notion of \cite{Bellare:1994:EAK} is based on a \emph{game}, in
346which an adversary nominates a \emph{challenge session}, and is given either
347the key agreed by the participants of the challenge session, or a random
348value independently sampled from an appropriate distribution. The
349adversary's advantage -- and hence the insecurity of the protocol -- is
350measured by its success probability in guessing whether the value it was
351given is really the challenge key. This challenge-session notion was also
352used by the subsequent papers described above.
353
354Bellare, Canetti and Krawczyk \cite{Bellare:1998:MAD} described a pair of
355models which they called the \textsc{am} (for `authenticated links model')
356and \textsc{um} (`unauthenticated links model'). They propose a modular
357approach to the design of key-exchange protocols, whereby one first designs a
358protocol and proves its security in the \textsc{am}, and then applies a
359authenticating `compiler' to the protocol which they prove yields a protocol
360secure in the realistic \textsc{um}. Their security notion is new. They
361define an `ideal model', in which an adversary is limited to assigning
362sessions fresh, random and unknown keys, or matching up one session with
363another, so that both have the same key. They define a protocol to be secure
364if, for any adversary~$A$ in the \textsc{am} or \textsc{um}, there is an
365ideal adversary~$I$, such that the outputs of $A$ and $I$ are computationally
366indistinguishable.
367
4331bf84
MW
368In \cite{cryptoeprint:1999:012}, Shoup presents a new model for key-exchange,
369also based on the idea of simulation. He analyses the previous models,
dff0fad2
MW
370particularly \cite{Bellare:1994:EAK} and \cite{Bellare:1998:MAD}, and
371highlights some of their inadequacies.
372
32609df3
MW
373\fi
374
4331bf84
MW
375Canetti and Krawczyk \cite{cryptoeprint:2001:040,Canetti:2001:AKE} describe a
376new notion of security in the model of \cite{Bellare:1998:MAD}, based on the
dff0fad2
MW
377challenge-session notion of \cite{Bellare:1994:EAK}. The security notion,
378called `SK-security', seems weaker in various ways than those of earlier
4331bf84
MW
379works such as \cite{Bellare:1994:EAK} or \cite{cryptoeprint:1999:012}.
380However, the authors show that their notion suffices for constructing `secure
381channel' protocols, which they also define.
dff0fad2 382
32609df3 383\ifshort\else
d8139f2c 384In \cite{Canetti:2001:UCS}, Canetti describes the `universal composition'
dff0fad2
MW
385framework. Here, security notions are simulation-based: one defines security
386notions by presenting an `ideal functionality'. A protocol securely
387implements a particular functionality if, for any adversary interacting with
388parties who use the protocol, there is an adversary which interacts with
389parties using the ideal functionality such that no `environment' can
390distinguish the two. The environment is allowed to interact freely with the
391adversary throughout, differentiating this approach from that of
4331bf84
MW
392\cite{Bellare:1998:MAD} and \cite{cryptoeprint:1999:012}, where the
393distinguisher was given only transcripts of the adversary's interaction with
394the parties. With security defined in this way, it's possible to prove a
395`universal composition theorem': one can construct a protocol, based upon
396various ideal functionalities, and then `plug in' secure implementations of
397the ideal functionalities and appeal to the theorem to prove the security of
398the entire protocol. The UC framework gives rise to very strong notions of
399security, due to the interactive nature of the `environment' distinguisher.
32609df3 400\fi
dff0fad2
MW
401
402Canetti and Krawczyk \cite{Canetti:2002:UCN} show that the SK-security notion
403of \cite{Canetti:2001:AKE} is \emph{equivalent} to a `relaxed' notion of
32609df3
MW
404key-exchange security in the UC framework\ifshort\space of
405\cite{Canetti:2001:UCS}\fi, and suffices for the construction of UC secure
406channels.
dff0fad2
MW
407
408The result of \cite{Canetti:2002:UCN} gives us confidence that SK-security is
409the `right' notion of security for key-exchange protocols. Accordingly,
410SK-security is the standard against which we analyse our key-exchange
411protocol.
a6e375a6
MW
412
413
414\subsection{Outline of the paper}
415
416The remaining sections of this paper are as follows.
417\begin{itemize}
418\item Section \ref{sec:prelim} provides the essential groundwork for the rest
419 of the paper. It introduces important notation, and describes security
420 notions and intractability assumptions.
421\item Section \ref{sec:zk-ident} describes our zero-knowledge identification
422 protocol and proves its security.
423\item Section \ref{sec:kx} describes the simple version of our key-exchange
424 protocol, and proves its security and deniability. It also describes some
425 minor modifications which bring practical benefits without damaging
32609df3 426 security.
dff0fad2 427\item Finally, section \ref{sec:conc} presents our conclusions.
a6e375a6
MW
428\end{itemize}
429
32609df3
MW
430\ifshort
431The full version of this paper describes how to make our protocols
432identity-based by using bilinear pairings using the techniques introduced in
433\cite{Boneh:2003:IBE}. It also contains proofs of the various theorems
434stated here.
435\fi
436
a6e375a6
MW
437%%%--------------------------------------------------------------------------
438
439\section{Preliminaries}
440\label{sec:prelim}
441
32609df3
MW
442\ifshort
443\subsection{Basics}
444\let\prelimsec\subsubsection
445\else
446\let\prelimsec\subsection
447\fi
448
449\prelimsec{Miscellaneous notation}
a6e375a6
MW
450
451We write $\Func{D}{R}$ for the set of all functions with domain $D$ and range
452$R$.
a6e375a6 453
32609df3 454\prelimsec{Groups}
a6e375a6
MW
455
456Let $(G, +)$ be a cyclic group\footnote{
457 We find that additive group notation is easier to read. In particular, in
458 multiplicative groups, one ends up with many interesting things tucked away
459 in little superscripts.}%
460of prime order $q$, and generated by an element $P$. We shall write the
461identity of $G$ as $0_G$, or simply as $0$ when no ambiguity is likely to
462arise. Thus, we have $\langle P \rangle = G$ and $q P = 0$. Any $X \in G$
32609df3 463can be written as $X = x P$ for some $x \in \{0, 1, \ldots, q - 1\}$.
a6e375a6 464
32609df3
MW
465We consider a cyclic group of order $n$ as a $\Z/n\Z$-module, and in
466particular our group $G$ can be seen as a vector space over $\gf{q}$. This
467makes the notation slightly more convenient.
a6e375a6 468
32609df3 469\prelimsec{Bit strings and encodings}
a6e375a6
MW
470\label{sec:bitenc}
471
472Let $\Bin = \{0, 1\}$ be the set of binary digits. Then $\Bin^n$ is the set
473of $n$-bit strings, and $\Bin^*$ the set of all (finite) bit strings. If $x
474\in \Bin^n$ is a bit string, we write its length as $|x| = n$. For a bit
475string $x \in \Bin^n$, and for $0 \le i < n$, we write $x[i]$ as the $i$th
476bit of $x$. The empty string is denoted $\emptystring$.
477
478Let $x$ and $y$ be two bit strings. If $|x| = |y| = n$, we write $x \xor y$
32609df3
MW
479to mean the bitwise exclusive-or of $x$ and $y$\ifshort\else: if $z = x \xor
480y$ then $|z| = n$, and $z[i] = (x[i] + y[i]) \bmod 2$ for $0 \le i < n$\fi.
481We write $x \cat y$ to mean the concatenation of $x$ and $y$\ifshort\else: if
482$z = x \cat y$ then $|z| = |x| + |y|$ and $z[i] = x[i]$ if $0 \le i < |x|$
483and $z[i] = y[i - |x|]$ if $|x| < i \le |x| + |y|$\fi.
a6e375a6
MW
484
485Finally, we let $\bot$ be a value distinct from any bit string.
486
487We shall want to encode group elements $X \in G$ and indices $x \in I =
32609df3
MW
488\gf{q}$ as bit strings.
489\ifshort
490To this end, we shall assume the existence of efficient, unambiguous
491encodings of group elements as $\ell_G$-bit strings, and indices as
492$\ell_I$-bit strings. To reduce clutter, we shall leave encoding and
493decoding as implicit operations.
494\else
495To this end, we shall assume the existence of
a6e375a6
MW
496integers $\ell_G, \ell_I > 0$ and functions
497\[
498 e_S\colon S \to \Bin^{\ell_S}
499 \quad \textrm{and} \quad
500 d_S\colon \Bin^{\ell_S} \to S \cup \{ \bot \}
501 \qquad
32609df3 502 \textrm{for } S \in \{ G, \F \}.
a6e375a6
MW
503\]
504with the following properties.
505\begin{itemize}
506\item The functions are \emph{unique} and \emph{unambiguous}, i.e., for any
507 $t \in \Bin^{\ell_S}$, we have
508 \[ d_S(t) = \begin{cases}
509 s & if there is some $s \in S$ such that $t = e_S(s)$, or \\
510 \bot & if no such $s$ exists.
511 \end{cases}
512 \]
513\item The functions should be \emph{efficient} to compute. Indeed, we shall
514 be assuming that the time taken for encoding and decoding is essentially
515 trivial.
516\end{itemize}
517Note that, as we have defined them, all encodings of group elements are the
518same length, and similarly for encodings of indices. This is necessary for
519the security of our protocols.
520
521We shall frequently abuse notation by omitting the encoding and decoding
522functions where it is obvious that they are required.
32609df3 523\fi
a6e375a6 524
32609df3
MW
525\ifshort\else
526\prelimsec{Games, adversaries, and oracles}
a6e375a6
MW
527\label{sec:games}
528
529Many of the security definitions and results given here make use of
530\emph{games}, played with an \emph{adversary}. An adversary is a
531probabilistic algorithm. In some games, the adversary is additionally
532equipped with \emph{oracles}, which perform computations with values chosen
533by the adversary and secrets chosen by the game but not revealed to the
534adversary. We impose limits on the adversary's resource usage: in
535particular, the total time it takes, and the number of queries it makes to
536its various oracles. Throughout, we include the size of the adversary's
537program as part of its `time', in order to model adversaries which contain
538large precomputed tables.
539
540The games provide models of someone trying to attack a construction or
541protocol. For security, we will either define a notion of `winning' the
542game, and require that all adversaries have only a very small probability of
543winning, or we consider two different games and require that no adversary can
32609df3 544distinguish between the two except with very small probability.
a6e375a6
MW
545
546Our proofs make frequent use of sequences of games; see
4331bf84
MW
547\cite{cryptoeprint:2004:332,cryptoeprint:2004:331}. The presentation owes
548much to Shoup \cite{cryptoeprint:2004:332}. We begin with a game $\G0$ based
549directly on a relevant security definition, and construct a sequence of games
550$\G1$, $\G2$, \dots, each slightly different from the last. We define all of
551the games in a sequence over the same underlying probability space -- the
552random coins tossed by the algorithms involved -- though different games may
553have slightly differently-defined events and random variables. Our goal in
554doing this is to bound the probability of the adversary winning the initial
555game $\G0$ by simultaneously (a) relating the probability of this event to
556that of corresponding events in subsequent games, and (b) simplifying the
557game until the probability of the corresponding event can be computed
558directly.
a6e375a6
MW
559
560The following simple lemma from \cite{Shoup:2001:OR} will be frequently
561useful.
562\begin{lemma}[Difference Lemma]
563 \label{lem:shoup}
32609df3
MW
564 Let $S$, $T$, $F$ be events. Suppose $\Pr[S \mid \bar F] =
565 \Pr[T \mid \bar F]$. Then $|\Pr[S] - \Pr[T]| \le \Pr[F]$.
a6e375a6
MW
566\end{lemma}
567\begin{proof}
568 A simple calculation:
569 \begin{eqnarray*}[rl]
570 |\Pr[S] - \Pr[T]|
32609df3
MW
571 & = |(\Pr[S \mid F]\Pr[F] + \Pr[S \mid \bar F]\Pr[\bar F]) -
572 (\Pr[T \mid F]\Pr[F] + \Pr[T \mid \bar F]\Pr[\bar F])| \\
573 & = \Pr[F] \cdot |\Pr[S \mid F] - \Pr[T \mid F]| \\
a6e375a6
MW
574 & \le \Pr[F]
575 \end{eqnarray*}
576 and we're done!
577\end{proof}
32609df3 578\fi
a6e375a6
MW
579
580
32609df3 581\prelimsec{The random oracle model}
a6e375a6
MW
582\label{sec:ro}
583
32609df3 584\ifshort\else
a6e375a6
MW
585In particular, most of our results will make use of the \emph{random oracle}
586model \cite{Bellare:1993:ROP}, in which all the participants, including the
587adversary, have access to a number of `random oracles'. A random oracle with
588domain $D$ and range $R$ is an oracle which computes a function chosen
589uniformly at random from the set of all such functions. (In the original
590paper \cite{Bellare:1993:ROP}, random oracles are considered having domain
591$\Bin^*$ and range $\Bin^\omega$; we use finite random oracles here, because
592they're easier to work with.)
593
594Given a protocol proven secure in the random oracle model, we can instantiate
595each random oracle by a supposedly-secure hash function and be fairly
596confident that either our protocol will be similarly secure, or one of the
597hash functions we chose has some unfortunate property.
598
599Proofs in the random oracle must be interpreted carefully. For example,
600Canetti, Goldreich and Halevi \cite{Canetti:2004:ROM} show that there are
601schemes which can be proven secure in the random oracle model but provably
602have no secure instantiation in the standard model.
32609df3 603\fi
a6e375a6 604
32609df3
MW
605The random oracle model \ifshort\cite{Bellare:1993:ROP} \fi is useful for
606constructing reductions and simulators for two main reasons.
a6e375a6
MW
607\begin{enumerate}
608\item One can use the transcript of an adversary's queries to random oracles
609 in order to extract knowledge from it.
610\item One can `program' a random oracle so as to avoid being bound by prior
611 `commitments', or to guide an adversary towards solving a selected instance
612 of some problem.
613\end{enumerate}
614Our proofs only make use of the first feature. This becomes particularly
615important when we consider issues of zero-knowledge and deniability in a
616concurrent setting, because we want to be able to claim that we retain these
617features when the random oracle is instantiated using a cryptographic hash
618function, and hash functions definitely aren't `programmable' in this way!
619The former property seems rather more defensible -- one would indeed hope
620that the only sensible way of working out (anything about) the hash of a
621particular string is to actually compute the hash function, and the random
622oracle model is, we hope, just giving us a `hook' into this process.
623
32609df3 624\ifshort\else
a6e375a6
MW
625(Our protocols can be modified to make use of bilinear pairings so as to
626provide identity-based identification and key-exchange, using the techniques
d8139f2c 627of \cite{Boneh:2003:IBE}. Proving the security of the modifications we
a6e375a6
MW
628discuss would involve `programming' random oracles, but this doesn't affect
629the zero-knowledge or deniability of the resulting protocols.)
32609df3 630\fi
a6e375a6
MW
631
632
32609df3
MW
633\ifshort\else
634\prelimsec{Notation for algorithms}
a6e375a6
MW
635
636We shall have occasion to describe algorithms by means of a pseudocode. Our
637choice of pseudocode is unlikely to be particularly controversial. We let $x
638\gets y$ denote the action of setting $x$ to the value $y$; similarly, $x
639\getsr Y$ denotes the action of sampling $x$ from the set $Y$ uniformly at
640random.
641
642The expression $a \gets A^{O(\cdot, x)}(y)$ means `assign to $a$ the value
643output by algorithm $A$ on input $y$, and with oracle access to the algorithm
644which, given input $z$, computes $O(z, x)$'.
645
646We make use of conditional (\IF-\ELSE) and looping (\FOR-\DO and \WHILE-\DO)
647constructions; in order to reduce the amount of space taken up, the bodies of
648such constructions are shown by indentation only.
649
650We don't declare the types of our variables explicitly, assuming that these
651will be obvious by inspection; also, we don't describe our variables' scopes
652explicitly, leaving the reader to determine these from context.
653
654Finally, the notation $\Pr[\textit{algorithm} : \textit{condition}]$ denotes
655the probability that \textit{condition} is true after running the given
656\textit{algorithm}.
32609df3 657\fi
a6e375a6 658
32609df3 659\prelimsec{Diffie-Hellman problems}
a6e375a6
MW
660\label{sec:dhp}
661
662The security of our protocols is related to the hardness of the
663computational, decisional, and gap Diffie-Hellman problems in the group $G$.
664We define these problems and what it means for them to be `hard' here.
665
666The \emph{computational} Diffie-Hellman problem (CDH) is as follows: given
667two group elements $X = x P$ and $Y = y P$, find $Z = x y P$.
32609df3 668\ifshort\else
a6e375a6
MW
669\begin{definition}[The computational Diffie-Hellman problem]
670 Let $(G, +)$ be a cyclic group generated by $P$. For any adversary $A$, we
671 say that $A$'s \emph{success probability} at solving the computational
672 Diffie-Hellman problem in $G$ is
673 \[ \Succ{cdh}{G}(A) =
32609df3 674 \Pr[ x \getsr I; y \getsr \Z/\#G\Z : A(x P, y P) = x y P ]
a6e375a6
MW
675 \]
676 where the probability is taken over the random choices of $x$ and $y$ and
677 any random decisions made by $A$. We say that the \emph{CDH insecurity
678 function} of $G$ is
679 \[ \InSec{cdh}(G; t) = \max_A \Succ{cdh}{G}(A) \]
680 where the maximum is taken over adversaries which complete in time $t$.
681\end{definition}
682Certainly, if one can compute discrete logarithms in the group $G$ (i.e.,
683given $x P$, find $x$), then one can solve the computational Diffie-Hellman
684problem. The converse is not clear, though. Shoup \cite{Shoup:1997:LBD}
685gives us some confidence in the difficulty of the problem by showing that a
686\emph{generic} adversary -- i.e., one which makes no use of the specific
32609df3 687structure of a group -- has success probability no greater than $q^2/\#G$.
a6e375a6
MW
688
689This isn't quite sufficient for our purposes. Our proofs will be able to
690come up with (possibly) a large number of guesses for the correct answer, and
691at most one of them will be correct. Unfortunately, working out which one is
692right seems, in general, to be difficult. This is the \emph{decision}
693Diffie-Hellman problem (DDH), which \cite{Shoup:1997:LBD} shows, in the
694generic group model, is about as hard as CDH. (See \cite{Boneh:1998:DDP} for
695a survey of the decision Diffie-Hellman problem.)
32609df3 696\par\fi
a6e375a6
MW
697Our reference problem will be a `multiple-guess computational Diffie-Hellman
698problem' (MCDH), which is captured by a game as follows. An adversary is
699given a pair of group elements $(x P, y P)$, and an oracle $V(\cdot)$ which
700accepts group elements as input. The adversary wins the game if it queries
701$V(x y P)$.
a6e375a6
MW
702
703\begin{figure}
704 \begin{program}
705 $\Game{mcdh}{G}(A)$: \+ \\
706 $w \gets 0$; \\
32609df3 707 $x \getsr \Z/\#G\Z$; $y \getsr \Z/\#G\Z$; \\
a6e375a6
MW
708 $A^{V(\cdot)}(x P, y P)$; \\
709 \RETURN $w$;
710 \next
711 Function $V(Z)$: \+ \\
712 \IF $Z = x y P$ \THEN \\ \ind
713 $w \gets 1$; \\
714 \RETURN $1$; \- \\
715 \RETURN $0$;
716 \end{program}
717
718 \caption{The multiple-guess computational Diffie-Hellman problem:
719 $\Game{mcdh}{G}(A)$}
720 \label{fig:mcdh}
721\end{figure}
722
32609df3
MW
723\begin{definition}[The multiple-guess computational Diffie-Hellman problem]
724 \label{def:mcdh}
725 Let $(G, +)$ be a cyclic group generated by $P$. For some adversary $A$,
726 we say that $A$'s \emph{success probability} at solving the multiple-guess
727 computational Diffie-Hellman problem in $G$ is
728 \[ \Succ{mcdh}{G}(A) = \Pr[\Game{mcdh}{G}(A) = 1] \]
729 where $\Game{mcdh}{G}(A)$ is shown in figure~\ref{fig:mcdh}. We say that
730 the \emph{MCDH insecurity function of $G$} is
731 \[ \InSec{mcdh}(G; t, q_V) = \max_A \Succ{mcdh}{G}(A) \]
732 where the maximum is taken over adversaries which complete in time $t$ and
733 make at most $q_V$-oracle queries.
734\end{definition}
735\ifshort
736We can (loosely) relate the difficulty of MCDH to the difficulty of
737the standard CDH problem, in which the adversary is allowed only a single
738guess.
739\else
a6e375a6
MW
740Note that our MCDH problem is not quite the `gap Diffie-Hellman problem'
741(GDH). The gap problem measures the intractibility of solving CDH even with
742the assistance of an oracle for solving (restricted) decision Diffie-Hellman
743problems in the group. Specifically, the adversary is given $(X, Y) = (x P,
744y P)$ and tries to find $Z = x y P$, as for CDH, but now it has access to an
745oracle $D(R, S)$ which answers $1$ if $S = x R$ and $0$ otherwise.
746
747Clearly MCDH is at least as hard as GDH, since our simple verification oracle
748$V(Z)$ can be simulated with the gap problem's DDH oracle, as $D(Y, Z)$.
749However, we can (loosely) relate the difficulty of MCDH to the difficulty of
750CDH.
32609df3 751\fi
a6e375a6
MW
752\begin{proposition}[Comparison of MCDH and CDH security]
753 For any cyclic group $(G, +)$,
32609df3
MW
754 \[ \InSec{mcdh}(G; t, q_V) \le
755 \ifshort q_V\,\InSec{mcdh}(G; t + O(q_V), 1) \else
756 q_V\,\InSec{cdh}(G; t + O(q_V)) \fi.
757 \]
a6e375a6 758\end{proposition}
32609df3
MW
759\begin{longproof}{The proof of this proposition may be found in the full
760 version of this paper.}
a6e375a6
MW
761 Let $A$ be an adversary attacking the multiple-guess computational
762 Diffie-Hellman problem in $G$, and suppose that it runs in time $t$ and
763 issues $q_V$ queries to its verification oracle.
764
765 We use a sequence of games. Game $\G0$ is the original MCDH attack game.
766 In each game $\G{i}$, we let the event $S_i$ be the probability that the
767 adversary wins the game.
768
769 Game $\G1$ is the same as $\G0$, except that we change the behaviour of the
770 verification oracle. Specifically, we make the oracle always return $0$.
771 We claim that this doesn't affect the adversary's probability of winning,
772 i.e., $\Pr[S_1] = \Pr[S_0]$. To see this, note that if none of the
773 adversary's $V(\cdot)$ queries was correct, then there is no change in the
774 game; conversely, if any query was correct, then the adversary will have
775 won regardless of its subsequent behaviour (which may differ arbitrarily
776 between the two games).
777
778 We are now ready to construct from $A$ an adversary $B$ attacking the
779 standard computational Diffie-Hellman problem.
780 \begin{program}
781 Adversary $B(X, Y)$: \+ \\
782 $n \gets 0$; \\
783 \FOR $i \in \Nupto{q_V}$ \DO $Q_i \gets 0$; \\
784 $A^{V(\cdot)}$; \\
785 $r \getsr \Nupto{n}$; \\
786 \RETURN $Q_r$;
787 \next
788 Function $D(Z')$: \+ \\
789 $Q_n \gets Z'$; \\
790 $n \gets n + 1$; \\
791 \RETURN $0$;
792 \end{program}
793 Observe that $B$ provides $A$ with an accurate simulation of game $\G1$.
32609df3
MW
794 Moreover, at the end of the algorithm, we have $0 < n \le q_V$, and the
795 values $Q_0$, $Q_1$, \dots, $Q_{n-1}$ are the values of $A$'s oracle
796 queries. Hence, with probability $Pr[S_1]$, at least of one of the $Q_i$
797 is the correct answer to the CDH problem. Let $\epsilon = \Pr[S_1] =
798 \Pr[S_0]$; we claim that $B$'s probability of success is at least
799 $\epsilon/q_V$. The proposition follows directly from this claim and that,
800 because $A$ was chosen arbitrarily, we can maximize and count resources.
a6e375a6
MW
801
802 We now prove the above claim. For $0 \le i < q_V$, let $W_i$ be the
803 event that $Q_i = x y P$, i.e., that $Q_i$ is the correct response. A
804 simple union bound shows that
805 \[ \sum_{0\le i<j} \Pr[W_i \mid n = j] \ge \epsilon. \]
806 We now perform a calculation:
807 \begin{eqnarray*}[rl]
808 \Succ{cdh}{G}(B)
809 & = \sum_{0\le i<q_V} \Pr[W_i \land r = i] \\
810 & = \sum_{0<j\le q_V} \Pr[n = j]
811 \biggl( \sum_{0\le i<j} \Pr[W_i \land r = i \mid n = j] \biggr) \\
812 & = \sum_{0<j\le q_V} \Pr[n = j]
813 \biggl( \frac{1}{j} \sum_{0\le i<j} \Pr[W_i \mid n = j] \biggr) \\
814 &\ge \sum_{0<j\le q_V} \Pr[n = j] \frac{\epsilon}{j} \\
815 &\ge \frac{\epsilon}{q_V} \sum_{0<j\le q_V} \Pr[n = j] \\
816 & = \frac{\epsilon}{q_V}.
817 \end{eqnarray*}
818 which completes the proof.
32609df3 819\end{longproof}
a6e375a6 820
32609df3
MW
821\ifshort\else
822\prelimsec{Example groups and encodings}
a6e375a6
MW
823
824For nonnegative integers $0 \le n < 2^\ell$, there is a natural binary
825encoding $N_\ell\colon \Nupto{2^\ell} \to \Bin^\ell$ which we can define
826recursively as follows.
827\[ N_0(0) = \emptystring \qquad
828 N_\ell(n) = \begin{cases}
829 N_{\ell-1}(n) \cat 0 & if $0 \le n < 2^{\ell-1}$ \\
830 N_{\ell-1}(n - 2^{\ell-1}) \cat 1 & if $2^{\ell-1} \le n < 2^\ell$.
831 \end{cases}
832\]
833Given an encoding $a = N_\ell(n)$ we can recover $n$ as
834\[ n = \sum_{0\le i<\ell} a[i] 2^i. \]
835Hence, given some limit $L \le 2^\ell$, we can encode elements of $\Nupto{L}$
836using the functions $(e, d)$:
837\[ e(L, \ell, n) = N_\ell(n) \qquad
838 d(L, \ell, a) = \begin{cases}
839 N_\ell(a) & if $N_\ell(a) < L$ \\
840 \bot & otherwise
841 \end{cases}
842\]
843The reader can verify that the functions $e(L, \ell, \cdot)$ and $d(L, \ell,
844\cdot)$ satisfy the requirements of section~\ref{sec:bitenc}.
845
32609df3
MW
846Given some $q$ with $q < 2^{\ell_I}$, then, we can define an encoding
847$(e_\F, d_\F)$ by $e_\F(n) = e(q, \ell_I, n)$ and $d_\F(a) = d(q, \ell_I,
848a)$.
849
850Let $p$ and $q$ be primes, with $q \mid (p - 1)$. Then there is an order-$q$
851subgroup of $\F_p^*$. In practice, an order-$q$ element can be found easily
852by taking elements $h \in \F_p^*$ at random and computing $g = h^{(p-1)/2}$
853until $g \ne 1$; then $G = \langle g \rangle$ is a group of $q$ elements.
854Assuming that $p$ and $q$ are sufficiently large, the Diffie-Hellman problems
855seem to be difficult in $G$. Some texts recommend additional restrictions on
856$p$, in particular that $(p - 1)/2q$ be either prime or the product of large
857primes. Primes of this form protect against small-subgroup attacks; but our
858protocols are naturally immune to these attacks, so such precautions are
859unnecessary here. Elements of $G$ can be encoded readily, since each element
860$n + p\Z$ of $\F_p = \Z/p\Z$ has an obvious `representative' integer $n$ such
861that $0 \le n < p$, and given $2^{\ell_G} > p$, we can encode $n$ as $e(p,
862\ell_G, n)$, as above.
a6e375a6
MW
863
864Alternatively, let $\F = \gf{p^f}$ be a finite field, and $E$ be an elliptic
865curve defined over $\F$ such that the group $E(\F)$ of $\F$-rational points
866of $E$ has a prime-order cyclic subgroup $G$. Elements of $G$ can be
867represented as pairs of elements of $\F$. If $f = 1$, i.e., $\F = \Z/p\Z$
868then field elements can be encoded as above. If $p = 2$, we can represent
869the field as $\F_2/(p(x))$ for some irreducible polynomial $p(x) \in \F_2[x]$
870of degree $f$. An element $r \in \F$ can then be represented by a polynomial
871$r(x)$ with degree less than $f$, and coefficients $c_i \in \{0, 1\}$, i.e.,
872\[ r(x) = \sum_{0\le i<f} c_i x^i \]
873and hence we can uniquely encode $r$ as an $f$-bit string $a$ such that $a[i]
874= c_i$.
32609df3 875\fi
a6e375a6
MW
876
877
32609df3 878\prelimsec{Symmetric encryption}
a6e375a6
MW
879\label{sec:sym-enc}
880
881Our key-exchange protocol requires a symmetric encryption scheme. Our
882definition is fairly standard, except that, rather than specifying a
883key-generation algorithm, we assume that key generation simply involves
884selecting a string of a given length uniformly at random.
885\begin{definition}[Symmetric encryption schemes]
886 A \emph{symmetric encryption scheme} $\E = (\kappa, E, D)$ consists of:
887 \begin{itemize}
888 \item an integer $\kappa \ge 0$,
889 \item a randomized \emph{encryption algorithm} $E$ which, on input $K \in
890 \Bin^\kappa$ and $p \in \Bin^*$ outputs some $c \in \Bin^*$, written $c
891 \gets E_K(p)$;
892 \item a \emph{decryption algorithm} $D$ which, on input $K \in \Bin^\kappa$
893 and $c \in \Bin^*$ outputs some $p' \in \Bin^* \cup \{\bot\}$, written
894 $p' \gets D_K(c)$.
895 \end{itemize}
896 Furthermore, a symmetric encryption scheme must be \emph{sound}: that is,
897 if $c \gets E_K(p)$ for some $K \in \Bin^\kappa$ and $p \in \Bin^*$, and
898 $p' \gets D_K(c)$ then $p = p'$.
899\end{definition}
900Our security notion for symmetric encryption is the standard notion of
901left-or-right indistinguishability of ciphertexts under chosen-ciphertext
902attack.
32609df3 903\begin{definition}[IND-CCA]
dff0fad2 904 \label{def:ind-cca}
a6e375a6
MW
905 Let $\E = (\kappa, E, D)$ be a symmetric encryption scheme, and $A$ be an
906 adversary. Let $\id{lr}_b(x_0, x_1) = x_b$ for $b \in \{0, 1\}$. Let
907 \[ P_b =
dff0fad2
MW
908 \Pr[K \getsr \Bin^\kappa;
909 b \gets A^{E_K(\id{lr}_b(\cdot, \cdot)), D_K(\cdot)}() :
910 b = 1]
a6e375a6
MW
911 \]
912 An adversary is \emph{valid} if
913 \begin{itemize}
914 \item for any query to its encryption oracle $E_K(\id{lr}_b(x_0, x_1))$ we
915 have $|x_0| = |x_1|$, and
916 \item no query to the decryption oracle $D_K(\cdot)$ is equal to any reply
917 from an encryption query.
918 \end{itemize}
919 If $A$ is valid, then we define its \emph{advantage} in attacking the
920 security of $\E$ as follows
921 \[ \Adv{ind-cca}{\E} = P_1 - P_0. \]
922 Further, we define the \emph{IND-CCA insecurity function of $\E$} to be
923 \[ \InSec{ind-cca}(\E; t, q_E, q_D) = \max_A \Adv{ind-cca}{\E}(A) \]
924 where the maximum is taken over all valid adversaries $A$ which run in time
925 $t$, and issue at most $q_E$ encryption and $q_D$ decryption queries.
926\end{definition}
927
928
929\subsection{Simulations}
930\label{sec:sim}
931
932In section~\ref{sec:zk-ident}, we shall prove that our identification
933protocol is zero-knowledge; in section~\ref{sec:denial}, we show that our
934key-exchange protocol is deniable. In both of these proofs, we shall need to
32609df3
MW
935demonstrate \emph{simulatability}.
936
937\ifshort
938
939We consider an adversary~$A$ interacting with a `world'~$W$; we model both as
940probabilistic algorithms. Both $A$ and~$W$ are given a common input~$c$; the
941world is additionally given a private input~$w$; these are chosen by a
942randomized initialization function $I$. The adversary is additionally given
943an auxiliary input~$u$ computed from $w$ by a randomized algorithm~$U$. All
944these algorithms -- the adversary and the world, but also the initialization
945and auxiliary-input algorithms $I$ and~$U$ -- have access to a number of
946random oracles $\mathcal{H} = (H_0, H_1, \ldots, H_{n-1})$. The adversary
947eventually decides to stop interacting, and produces an output~$a$.
948
949A \emph{simulator} for $A$'s interaction with $W$ is an algorithm $S^A$ which
950attempts to produce a similar output distribution, but without interacting
951with $W$. The simulator is given the same inputs $(c, u)$ as we gave
952to~$A$, and $S$ is also allowed to query the random oracles~$\mathcal{H}$.
953
954To measure the effectiveness of a simulator, we consider a distinguisher~$D$
955which is given $(c, u, a)$, and access to $\mathcal{H}$, and returns a bit
956$b$ representing its verdict as to whether the output $a$ was produced by the
957adversary or the simulator.
958
959\else
a6e375a6
MW
960
961\subsubsection{General framework}
962Consider a game in which an adversary~$A$ interacts with some `world'~$W$,
963which we shall represent as a probabilistic algorithm. The world may in fact
964represent a number of honest parties communicating in a concurrent fashion,
965but we can consider them as a single algorithm for our present purposes.
966
967Initially the world and the adversary are both given the same \emph{common
968input}~$c$; in addition, the world is given a \emph{private input}~$w$.
969Both $c$ and~$w$ are computed by an \emph{initialization function}~$I$, which
970is considered to be part of the definition of the game. Finally, the
971adversary decides somehow that it has finished interacting, and outputs a
972value~$a$. All this we notate as
973\[ (w, c) \gets I(); a \gets A^{W(w, c)}(c). \]
974This game is \emph{simulatable} if there is an algorithm~$S$ -- the
975\emph{simulator} -- which can compute the same things as~$A$, but all by
976itself without interacting with the world. That is, we run the simulator on
977the common input~$c$, allowing it to interact in some way with the
978adversary~$A$, and finally giving us an output~$s$.
979\[ (w, c) \gets I(); s \gets S^A(c). \]
980We shall say that the simulator is \emph{effective} if it's difficult to tell
981whether a given string was output by the adversary after interacting with the
982world, or by the simulator running by itself. That is, for any algorithm~$D$
983-- a \emph{distinguisher} -- running in some bounded amount of time, its
984advantage
985\begin{spliteqn*}
986 \Pr[(w, c) \gets I(); a \gets A^{W(w, c)}(c);
987 b \gets D(c, a) : b = 1] - {} \\
988 \Pr[(w, c) \gets I(); s \gets S^A(c); b \gets D(c, s) : b = 1]
989\end{spliteqn*}
990is small. (Note that we gave the distinguisher the common input as well as
991the output of the adversary or the simulator.)
992
993It's usual to study \emph{transcripts} of interactions in these kinds of
994settings. We are considering arbitrary adversarial outputs here, so this
995certainly includes adversaries which output a transcript of their
996interactions. Indeed, for any adversary~$A$, we could construct an
997adversary~$A_T$ which performs the same computation, and outputs the same
998result, but also includes a complete transcript of $A$'s interaction with the
999world. Therefore we're just providing additional generality.
1000
1001\subsubsection{Random oracles}
1002We shall be considering interactions in which all the parties have access to
1003several random oracles. We could simply say that the random oracles are part
1004of the world~$W$. In the setting described above, only the adversary
1005actually interacts with the world (and therefore would be able to query
1006random oracles). The simulator would be forced to `make up' its own random
1007oracle, and the distinguisher would have to study the distributions of the
1008random-oracle queries and their responses to make up its mind about which it
1009was given.
1010
1011However, this would be a poor model for the real world, since once we
1012instantiate the random oracle with a hash function, we know that everyone
1013would in actually be able to compute the hash function for themselves. Thus
1014a distinguisher in the real world would be able to tell the difference
1015immediately between a real interaction and the simulated transcript, since
1016the `random oracle' queries recorded in the latter would be wrong!
1017
1018Therefore we decide not to include the random oracles as part of the world,
1019but instead allow all the participants -- adversary, simulator and
1020distinguisher -- access to them. If we denote by~$\mathcal{H} = (H_0, H_1,
1021\ldots, H_{n-1})$ the collection of random oracles under consideration, the
1022expression for the distinguisher's advantage becomes
1023\begin{spliteqn*}
1024 \Pr[(w, c) \gets I(); a \gets A^{W(w, c), \mathcal{H}}(c);
1025 b \gets D^{\mathcal{H}}(c, a) : b = 1] - {} \\
1026 \Pr[(w, c) \gets I(); s \gets S^{A, \mathcal{H}}(c);
1027 b \gets D^{\mathcal{H}}(c, s) : b = 1].
1028\end{spliteqn*}
1029
1030\subsubsection{Auxiliary inputs}
1031If an adversary's output can be effectively simulated, then we can
1032confidently state that the adversary `learnt' very little of substance from
1033its interaction, and certainly very little it can \emph{prove} to anyone
1034else. However, as we have described the setting so far, we fix an adversary
1035before we choose inputs to the world, so our model says little about what an
1036adversary which has already acquired some knowledge might learn beyond that.
1037For example, an adversary might overhear some other conversation between
1038honest parties and be able to use this to its advantage.
1039
1040To this end, we give the adversary an \emph{auxiliary input}~$u$, computed by
1041an algorithm~$U$. We give $U$ both $c$ and $w$, in order to allow the
1042adversary to gain some (possibly partial) knowledge of the secrets of the
1043other parties. We also allow $U$ access to the random oracles~$\mathcal{H}$,
1044because clearly in the `real world' it would be ridiculous to forbid such an
1045algorithm from computing a publicly-known hash function.
1046
1047The simulator and distinguisher are also given the auxiliary input. The
1048simulator is meant to represent the adversary's ability to compute things on
1049its own, without interacting with the world, and since the adversary is given
1050the auxiliary input, the simulator must be too. The distinguisher must be
32609df3
MW
1051given the auxiliary input because otherwise the simulator could just `make
1052up' plausible-looking inputs.
1053
1054\fi
1055
1056\ifshort
1057Because we're interested in a concrete, quantitative analysis, we must
1058constrain the resource usage of the various algorithms described above.
1059Specifically, we shall be interested in
1060\else
a6e375a6
MW
1061
1062\subsubsection{Resource limits}
1063We shall not allow our algorithms to perform completely arbitrary
1064computations and interactions. Instead, we impose limits on the amount of
1065time they are allowed to take, the number of random-oracle queries they make,
1066and so on. Specifically, we are interested in
32609df3 1067\fi
a6e375a6
MW
1068\begin{itemize}
1069\item the time $t_A$ taken by the adversary and $t_D$ taken by the
1070 distinguisher,
1071\item the number of oracle queries $\mathcal{Q}_A = (q_{A,0}, q_{A,1},
1072 \ldots, q_{A,n-1})$ made by the adversary, and $\mathcal{Q}_D$ made by the
1073 distinguisher,
1074\item a number of resource bounds $\mathcal{R}$ on the adversary's
1075 interaction with the world (e.g., number of messages of various kinds sent
1076 and received), and
1077\item a number of bounds $\mathcal{U}$ on the contents of the adversary's
1078 auxiliary input~$u$.
1079\end{itemize}
1080Sometimes we shall not be interested in proving simulatability of adversaries
1081with auxiliary inputs. We write $\mathcal{U} = 0$ to indicate that auxiliary
32609df3
MW
1082input is not allowed.
1083
1084\ifshort\else
a6e375a6
MW
1085
1086\subsubsection{World syntax}
1087It will be worth our while being more precise about what a `world' actually
1088is, syntactically. We define a world to be a single, randomized algorithm
1089taking inputs $(\iota, \sigma, \tau, \mu) \in (\Bin^*)^4$; the algorithm's
1090output is a pair $(\sigma', \rho) \in (\Bin^*)^2$. We show how the
1091adversary's interaction is mapped on to this world algorithm in
1092figure~\ref{fig:sim-world}.
1093\begin{itemize}
1094\item The `input' $\iota$ is the result of the initialization function~$I$.
1095 That is, it is the pair $(w, c)$ of the world's private input and the
1096 common input.
1097\item The `state' $\sigma$ is empty on the world's first invocation; on each
1098 subsequent call, the value of the world's output $\sigma'$ is passed back.
1099 In this way, the world can maintain state.
32609df3 1100\item The `type $\tau$ is a token giving the type of invocation this is.
a6e375a6
MW
1101\item The `message' $\mu$ is any other information passed in; its form will
1102 typically depend on the type~$\tau$ of the invocation.
1103\item The `new state' $\sigma'$ is the value of $\sigma$ to pass to the next
1104 invocation of the world.
1105\item The `reply $\rho$ is the actual output of the invocation.
1106\end{itemize}
1107There are two special invocation types. The adversary is \emph{forbidden}
1108from making special invocations.
1109\begin{itemize}
1110\item The special invocation type $\cookie{init}$ is used to allow the world to
1111 prepare an initial state. The world is invoked as
1112 \[ W^{\mathcal{H}}(\iota, \emptystring, \cookie{init}, \emptystring) \]
1113 and should output an initial state $\sigma'$. The world's reply $\rho$ is
1114 ignored. (Recall that $\emptystring$ represents the empty string.)
1115\item The special invocation type $\cookie{random}$ is used to inform the
1116 world that the adversary has issued a random oracle query. The world is
1117 invoked as
1118 \[ W^{\mathcal{H}}(\iota, \sigma, \cookie{random}, (i, x, h)) \]
1119 to indicate that the adversary has queried its random oracle $H_i(\cdot)$
1120 on the input $x$, giving output~$h$. The world may output an updated state
1121 $\sigma'$; its reply $\rho$ is ignored.
1122\end{itemize}
1123The latter special query is a technical device used to allow the `fake-world'
1124simulators we define below to be aware of the adversary's random oracle
1125queries without being able to `program' the random oracle. Including it here
1126does little harm, and simplifies the overall exposition.
1127
1128\begin{figure}
1129 \begin{program}
1130 Interaction $A^{W(w, c), \mathcal{H}}(c, u)$: \+ \\
1131 $(\sigma, \rho) \gets
1132 W((w, c), \emptystring, \cookie{init}, \emptystring)$; \\
1133 $a \gets A^{\id{world}(\cdot, \cdot),
1134 \id{random}(\cdot, \cdot)}(c, u)$; \\
1135 \RETURN $a$;
1136 \newline
1137 Function $\id{world}(\tau, \mu)$: \+ \\
1138 \IF $\tau \in \{\cookie{init}, \cookie{random}\}$ \THEN
1139 \RETURN $\bot$; \\
1140 $(\sigma, \rho) \gets W((w, c), \sigma, \tau, \mu)$; \\
1141 \RETURN $\rho$; \-
1142 \next
1143 Function $\id{random}(i, x)$: \+ \\
1144 $h \gets H_i(x)$; \\
1145 $(\sigma, \rho) \gets
1146 W((w, c), \sigma, \cookie{random}, (i, x, h))$; \\
1147 \RETURN $h$;
1148 \end{program}
1149
1150 \caption{Interacting with a world: Interaction $A^{W, \mathcal{H}}$}
1151 \label{fig:sim-world}
1152\end{figure}
1153
1154\subsubsection{Definitions}
1155We are now ready to begin making definitions.
32609df3
MW
1156\fi
1157
a6e375a6
MW
1158\begin{definition}[Simulation security]
1159 \label{def:sim}
1160 Consider the game described above, with the initialization function~$I$,
1161 and the world~$W$: let $A$ be an adversary, and let~$U$ be an
1162 auxiliary-input function; let $S$ be a simulator, and let $D$ be a
1163 distinguisher. We define $D$'s \emph{advantage against $S$'s simulation of
1164 $A$'s interaction with~$W$ with auxiliary inputs provided by~$U$} to be
1165 \[ \Adv{sim}{W, I, S}(A, U, D) =
1166 \Pr[\Game{real}{W, I, S}(A, U, D) = 1] -
1167 \Pr[\Game{sim}{W, I, S}(A, U, D)= 1]
1168 \]
1169 where the games are as shown in figure~\ref{fig:sim}.
1170 Furthermore, we define the \emph{simulator's insecurity function} to be
1171 \[ \InSec{sim}(W, I, S;
1172 t_D, t_A, \mathcal{Q}_D, \mathcal{Q}_A, \mathcal{R}, \mathcal{U}) =
1173 \max_{D, A, U} \Adv{sim}{W, I, S}(A, U, D)
1174 \]
1175 where the maximum is taken over all distinguishers~$D$ running in
1176 time~$t_D$ and making at most $\mathcal{Q}_D$ random-oracle queries, and
1177 all adversaries~$A$ running in time~$t_A$, making at most $\mathcal{Q}_A$
1178 random-oracle queries, not exceeding the other stated resource
1179 bounds~$\mathcal{R}$ on its interaction with~$W$, and auxiliary-input
1180 functions producing output not exceeding the stated bounds~$\mathcal{U}$.
1181\end{definition}
1182\begin{remark}
1183 The usual definitions of zero-knowledge, for example, require the simulator
1184 to work for all choices of inputs (common, private and auxiliary), rather
1185 than for random choices. Our definition therefore looks weaker. Our proof
1186 of zero-knowledge actually carries through to the traditional
1187 stronger-looking definition. Critically, however, the standard
1188 universal quantification over inputs fails to capture deniability in the
1189 random oracle model, since the inputs can't therefore depend on the random
1190 oracle. Our formulation therefore actually gives \emph{stronger}
32609df3 1191 deniability than the usual one.
a6e375a6
MW
1192\end{remark}
1193
1194\begin{figure}
1195 \begin{program}
1196 $\Game{real}{W, I, S}(A, U, D)$: \+ \\
1197 $(w, c) \gets I()$; \\
1198 $u \gets U^{\mathcal{H}}(w, c)$; \\
1199 $a \gets A^{W(w, c), \mathcal{H}}(c, u)$; \\
1200 $b \gets D^{\mathcal{H}}(c, u, a)$; \\
1201 \RETURN $b$;
1202 \next
1203 $\Game{sim}{W, I, S}(A, U, D)$: \+ \\
1204 $(w, c) \gets I()$; \\
1205 $u \gets U^{\mathcal{H}}(w, c)$; \\
1206 $s \gets S^{A, \mathcal{H}}(c, u)$; \\
1207 $b \gets D^{\mathcal{H}}(c, u, s)$; \\
1208 \RETURN $b$;
1209 \end{program}
1210
1211 \caption{Games for simulation: $\Game{real}{W, I}$ and $\Game{sim}{W, I}$}
1212 \label{fig:sim}
1213\end{figure}
1214
32609df3 1215\ifshort\else
a6e375a6
MW
1216\subsubsection{Fake-world simulators}
1217The simulators we shall be considering in the present paper are of a specific
1218type which we call `fake-world simulators'. They work by running the
1219adversary in a fake `cardboard cut-out' world, and attempting to extract
1220enough information from the adversary's previous interactions and random
1221oracle queries to maintain a convincing illusion.
1222
1223That is, the behaviour of a fake-world simulator~$S$ is simply to allow the
1224adversary to interact with a `fake world'~$W'$, which was not given the world
1225private input. That is, there is some world $W'$ such that
1226\[ S^{A, \mathcal{H}}(c, u) \equiv A^{W'(u, c), \mathcal{H}}(c, u) \]
1227Fake-world simulators are convenient because they allow us to remove from
1228consideration the distinguisher~$D$ as the following definition shows.
1229\begin{definition}[Fake-world simulation security]
1230 \label{def:fakesim}
1231 Let $I$, $W$ and $U$ be as in definition~\ref{def:sim}. Let $A$ be an
1232 adversary which outputs a single bit. Let $S$ be a fake-world simulator.
1233 We define $A$'s \emph{advantage against $S$'s fake-world simulation of $W$
1234 with auxiliary inputs provided by~$U$} to be
1235 \begin{spliteqn*}
1236 \Adv{fw}{W, I, S}(A, U) =
1237 \Pr[(w, c) \gets I(); u \gets U^{\mathcal{H}}(w, c);
1238 b \gets A^{W(w, c), \mathcal{H}}(c, u) : b = 1] - {} \\
1239 \Pr[(w, c) \gets I(); u \gets U^{\mathcal{H}}(w, c);
1240 b \gets S^{A, \mathcal{H}}(c, u) : b = 1]
1241 \end{spliteqn*}
1242 Furthermore, we define the \emph{simulator's insecurity function} to be
1243 \[ \InSec{fw}(W, I, S;
1244 t_D, t, \mathcal{Q}, \mathcal{R}, \mathcal{U}) =
1245 \max_{A, U} \Adv{fw}{W, I, S}(A, U)
1246 \]
1247 where the maximum is taken over all adversaries~$A$ running in time~$t$,
1248 making at most $\mathcal{Q}$ random-oracle queries, not exceeding the other
1249 stated resource bounds~$\mathcal{R}$ on its interaction with~$W$, and
1250 auxiliary-input functions producing output not exceeding the stated
1251 bounds~$\mathcal{U}$.
1252\end{definition}
1253It remains for us to demonstrate that this is a valid way of analysing
1254simulators; the following simple proposition shows that this is indeed the
1255case.
1256\begin{proposition}[Fake-world simulation]
1257 \label{prop:fakesim}
1258 Let $I$ be an initialization function and let $W$ be a world. Then, for
1259 any fake-world simulator~$S$,
1260 \[ \InSec{sim}(W, I, S; t_D, t_A, \mathcal{Q}_D, \mathcal{Q}_A,
1261 \mathcal{R}, \mathcal{U}) \le
1262 \InSec{fw}(W, I, S; t_A + t_D, \mathcal{Q}_D + \mathcal{Q}_A,
1263 \mathcal{R}, \mathcal{U})
1264 \]
1265 (where addition of query bounds $\mathcal{Q}$ is done elementwise).
1266\end{proposition}
1267\begin{proof}
1268 Let $W$ and $I$ as in the proposition statement be given; also let a
1269 distinguisher~$D$ running in time~$t_D$ and making $\mathcal{Q}_D$
1270 random-oracle queries, an adversary~$A$ running in time~$t_A$ and making
1271 $\mathcal{Q}_A$ random-oracle queries and interacting with its world within
1272 the stated bounds~$\mathcal{R}$, an auxiliary-input function~$U$ satisfying
1273 the constraints~$\mathcal{U}$ on its output, and a fake-world simulator~$S$
1274 all be given.
1275
1276 We construct an adversary~$B$ outputting a single bit as follows
1277 \begin{program}
1278 Adversary $B^{W, \mathcal{H}}(c, u)$: \+ \\
1279 $a \gets A^{W, \mathcal{H}}(c, u)$; \\
1280 $b \gets D^{\mathcal{H}}(c, u, a)$; \\
1281 \RETURN $b$;
1282 \end{program}
1283 A glance at definitions \ref{def:sim} and~\ref{def:fakesim} and the
1284 resources used by $B$ shows that
1285 \[ \Adv{sim}{W, I, S}(A, U) = \Adv{fw}{W, I, S}(B, U)
1286 \le \InSec{fw}(W, I, S; t_D + t_A, \mathcal{Q}_D + \mathcal{Q}_A,
1287 \mathcal{R}, \mathcal{U})
1288 \]
1289 as required.
1290\end{proof}
32609df3 1291\fi
a6e375a6
MW
1292
1293%%%--------------------------------------------------------------------------
1294
1295\section{A zero-knowledge identification scheme}
1296\label{sec:zk-ident}
1297
1298
1299\subsection{Description}
1300
1301Here we present a simple zero-knowledge identification scheme. Fix some
32609df3
MW
1302group $G$ with prime order $q = \#G$. Suppose Alice chooses a private key $x
1303\inr \gf{q}$, and publishes the corresponding public key $X = x P$. Let
1304$H_I\colon G^2 \to \Bin^{\ell_I}$ be a secure hash function. Here's a simple
1305protocol which lets her prove her identity to Bob.
a6e375a6 1306\begin{enumerate}
32609df3
MW
1307\item Bob selects a random $r \inr \gf{q}$, and computes $R = r P$, $Y = r X$,
1308 and $c = r \xor H_I(R, Y)$. He sends the pair $(R, c)$ to Alice as his
1309 \emph{challenge}.
a6e375a6
MW
1310\item Alice receives $(R, c)$. She computes $Y' = x R$ and $r' = c \xor
1311 H_I(R', Y')$, and checks that $R = r' P$. If so, she sends $Y'$ as her
1312 \emph{response}; otherwise she sends $\bot$.
1313\item Bob receives $Y'$ from Alice. He checks that $Y' = Y$. If so, he
1314 accepts that he's talking to Alice; otherwise he becomes suspicious.
1315\end{enumerate}
1316We name this the Wrestlers Identification Protocol in~$G$, $\Wident^G$ (we
1317drop the superscript to refer to the protocol in general, or when no
1318ambiguity is likely to result). A summary is shown in
1319figure~\ref{fig:wident}.
1320
1321\begin{figure}
1322 \begin{description}
32609df3 1323 \item[Setup] Group $G = \langle P \rangle$; $\#G = q$ is prime.
a6e375a6 1324 $H_I(\cdot, \cdot)$ is a secure hash.
32609df3 1325 \item[Private key] $x \inr \gf{q}$.
a6e375a6 1326 \item[Public key] $X = x P$.
32609df3 1327 \item[Challenge] $(R, c)$ where $r \inr \gf{q}$, $R = r P$, $c = r \xor
a6e375a6
MW
1328 H_I(R, r X)$.
1329 \item[Response] $x R = r X$ if $R = (c \xor H_I(R, x R)) P$; otherwise
1330 $\bot$.
1331 \end{description}
1332
1333 \caption{Summary of the Wrestlers Identification Protocol, $\Wident$}
1334 \label{fig:wident}
1335\end{figure}
1336
1337
1338\subsection{Security}
1339
1340In order to evaluate the security of our protocol, we present a formal
1341description of the algorithms involved in figure~\ref{fig:wident}. Here, the
1342hash function $H_I(\cdot, \cdot)$ is modelled as a random oracle.
1343
1344\begin{figure}
1345 \begin{program}
1346 Function $\id{setup}()$: \+ \\
32609df3 1347 $x \getsr \gf{q}$; \\
a6e375a6
MW
1348 $X \gets x P$; \\
1349 \RETURN $(x, X)$;
32609df3 1350 \ifshort\newline\else\next\fi
a6e375a6 1351 Function $\id{challenge}^{H_I(\cdot, \cdot)}(R, c, X)$: \+ \\
32609df3 1352 $r \getsr \gf{q}$; \\
a6e375a6
MW
1353 $R \gets r P$; $Y \gets r X$; \\
1354 $h \gets H_I(R, Y)$; $c \gets r \xor h$; \\
1355 \RETURN $(Y, R, c)$; \- \\[\medskipamount]
1356 Function $\id{verify}(Y, Y')$: \+ \\
1357 \IF $Y' = Y$ \THEN \RETURN $1$; \\
1358 \RETURN $0$;
1359 \next
1360 Function $\id{response}^{H_I(\cdot, \cdot)}(R, c, x)$: \+ \\
1361 $Y' \gets x R$; \\
1362 $h \gets H_I(R', Y')$; $r' \gets c \xor h$; \\
1363 \IF $R \ne r' P$ \THEN \RETURN $\bot$; \\
1364 \RETURN $Y'$;
1365 \end{program}
1366
1367 \caption{Functions implementing $\Wident$ in the random oracle model}
1368 \label{fig:wident-ro}
1369\end{figure}
1370
1371\subsubsection{Completeness}
1372Suppose that Bob really is talking to Alice. Note that $Y' = x R = x (r P) =
1373r (x P) = r X = Y$. Hence $r' = c \xor H_I(R', Y') = c \xor H_I(R, Y) = r$,
1374so $r' P = r P = R$, so Alice returns $Y' = Y$ to Bob. Therefore $\Wident$
1375is \emph{complete}: if Bob really is communicating with Alice then he
1376accepts.
1377
1378\subsubsection{Soundness}
1379We next show that impersonating Alice is difficult. The natural way to prove
1380this would be to give an adversary a challenge and prove that its probability
1381of giving a correct response is very small. However, we prove a stronger
1382result: we show that if the adversary can respond correctly to any of a large
1383collection of challenges then it can solve the MCDH problem.
1384
1385Consider the game $\Game{imp}{\Wident}$ shown in
1386figure~\ref{fig:wident-sound}. An adversary's probability of successfully
1387impersonating Alice in our protocol, by correctly responding to any one of
1388$n$ challenges, is exactly its probability of winning the game (i.e., causing
1389it to return $1$).
1390
1391\begin{figure}
1392 \begin{program}
1393 $\Game{imp-$n$}{\Wident}(A)$: \+ \\
1394 $H_I \getsr \Func{G^2}{\Bin^{\ell_I}}$; \\
1395 $(x, X) \gets \id{setup}()$; \\
1396 $\id{win} \gets 0$; \\
1397 $\Xid{R}{map} \gets \emptyset$; \\
1398 $\mathbf{c} \gets \id{challenges}(n)$; \\
1399 $(R', Y') \gets A^{H_I(\cdot, \cdot), \id{check}(\cdot, \cdot)}
1400 (X, \mathbf{c})$; \\
1401 \RETURN $\id{win}$;
1402 \newline
1403 Function $\id{challenges}(n)$: \+ \\
1404 \FOR $i \in \Nupto{n}$ \DO \\ \ind
1405 $(Y, R, c) \gets \id{challenge}^{H_I(\cdot, \cdot)}$; \\
1406 $\Xid{R}{map} \gets \Xid{R}{map} \cup \{ R \mapsto Y \}$; \\
1407 $\mathbf{c}[i] \gets (R, c)$; \- \\
1408 \RETURN $\mathbf{c}$; \next
1409 Function $\id{check}(R', Y')$: \\
1410 \IF $R' \notin \dom \Xid{R}{map}$ \THEN \RETURN $0$; \\
1411 $Y \gets \Xid{R}{map}(R')$; \\
1412 \IF $\id{verify}(Y, Y')$ \THEN \\ \ind
1413 $\id{win} \gets 1$; \\
1414 \RETURN $1$; \- \\
1415 \RETURN $0$;
1416 \end{program}
1417
1418 \caption{Soundness of $\Wident$: $\Game{imp-$n$}{\Wident}(A)$}
1419 \label{fig:wident-sound}
1420\end{figure}
1421
1422\begin{theorem}[Soundness of $\Wident$]
1423 \label{thm:wident-sound}
1424 Let $A$ be any adversary running in time $t$ and making $q_I$ queries to
1425 its random oracle, and $q_V$ queries to its verification oracle. Let $G$
1426 be a cyclic group. Then
1427 \[ \Pr[\Game{imp-$n$}{\Wident^G}(A) = 1] \le
1428 \InSec{mcdh}(G; t', q_I + q_V)
1429 \]
1430 where $t' = t + O(q_I) + O(q_V)$.
1431\end{theorem}
1432\begin{remark}
1433 Note that the security bound here is \emph{independent} of the value of
1434 $n$.
1435\end{remark}
32609df3
MW
1436\begin{longproof}{The proof of this theorem can be found in the full version
1437 of the paper.}
a6e375a6
MW
1438 We prove this by defining a sequence of games $\G{i}$. The first will be
1439 the same as the attack game $\Game{imp-$n$}{\Wident}(A)$ and the others
1440 will differ from it in minor ways. In each game $\G{i}$, let $S_i$ be the
1441 event that $A$ wins the game -- i.e., that it successfully impersonates the
1442 holder of the private key~$x$.
32609df3 1443
a6e375a6
MW
1444 Let game $\G0$ be the attack game $\Game{imp}{\Wident}(A)$, and let $(R',
1445 Y')$ be the output of $A$ in the game.
1446
1447 We define a new game $\G1$ which is the same as $\G0$, except that we query
1448 the random oracle $H_I$ at $(R', Y')$ whenever the adversary queries
1449 $\id{check}(R', Y')$. (We don't charge the adversary for this.) This
1450 obviously doesn't affect the adversary's probability of winning, so
1451 $\Pr[S_1] = \Pr[S_0]$.
1452
1453 Game $\G2$ is like $\G1$, except that we change the way we generate
1454 challenges and check their responses. Specifically, we new functions
1455 $\id{challenges}_2$ and $\id{check}_2$, as shown in
1456 figure~\ref{fig:wident-sound-2}.
1457
1458 \begin{figure}
1459 \begin{program}
1460 Function $\id{challenges}_2(n)$: \+ \\
1461 $r^* \getsr I$; $R^* \gets r^* P$; $Y^* \gets r^* X$; \\
1462 \FOR $i \in \Nupto{n}$ \DO \\ \ind
1463 $r \getsr I$; $R \gets r R^*$; $Y \gets r Y^*$; \\
1464 $h \gets H_I(R, Y)$; $c \gets r \xor h$; \\
1465 $\Xid{R}{map} \gets \Xid{R}{map} \cup \{ R \mapsto r \}$; \\
1466 $\mathbf{c}[i] \gets (R, c)$; \- \\
1467 \RETURN $\mathbf{c}$;
1468 \next
1469 Function $\id{check}_2(R', Y')$: \+ \\
1470 \IF $R' \notin \dom \Xid{R}{map}$ \THEN \RETURN $0$; \\
1471 $r \gets \Xid{R}{map}(R')$; \\
1472 \IF $\id{verify}(Y^*, Y'/r)$ \THEN \\ \ind
1473 $\id{win} \gets 1$; \\
1474 \RETURN $1$; \- \\
1475 \RETURN $0$;
1476 \end{program}
1477
1478 \caption{Soundness of $\Wident$: $\id{challenges}_2$ and $\id{check}_2$}
1479 \label{fig:wident-sound-2}
1480 \end{figure}
1481
1482 While we're generating and checking challenges in a more complicated way
1483 here, we're not actually changing the distribution of the challenges, or
1484 changing the winning condition. Hence $\Pr[S_2] = \Pr[S_1]$.
1485
1486 Now we change the rules again. Let $\G3$ be the same as $\G2$ except that
1487 we change the winning condition. Instead, we say that the adversary wins
1488 if any of the queries to its random oracle $H_I(R', Y')$ would be a correct
1489 response -- i.e., $\id{check}_2(R', Y')$ would return $1$. Since we query
1490 the oracle on $(R', Y')$ on its behalf at the end of the game, no adversary
1491 can do worse in this game than it does in $\G2$, so we have $\Pr[S_3] \ge
1492 \Pr[S_2]$. (It's not hard to see that this only helps quite stupid
1493 adversaries. We can transform any adversary into one for which equality
1494 holds here.)
1495
1496 Finally, let $\G4$ be the same as $\G3$ except that we change the way we
1497 generate challenges again: rather than computing $h$ and setting $c \gets h
1498 \xor r$, we just choose $c$ at random. Specifically, we use the new
1499 function, $\id{challenges}_4$, shown in figure~\ref{fig:wident-sound-4}.
1500
1501 \begin{figure}
1502 \begin{program}
1503 Function $\id{challenges}_4(n)$: \+ \\
1504 $r^* \getsr I$; $R^* \gets r^* P$; $Y^* \gets r^* X$; \\
1505 \FOR $i \in \Nupto{n}$ \DO \\ \ind
1506 $r \getsr I$; $R \gets r R^*$; \\
1507 $c \getsr \Bin^{\ell_I}$; \\
1508 $\Xid{R}{map} \gets \Xid{R}{map} \cup \{ R \mapsto r \}$; \\
1509 $\mathbf{c}[i] \gets (R, c)$; \- \\
1510 \RETURN $\mathbf{c}$;
1511 \end{program}
1512
1513 \caption{Soundness of $\Wident$: $\id{challenges}_4$}
1514 \label{fig:wident-sound-4}
1515 \end{figure}
1516
1517 Since $H_I(\cdot, \cdot)$ is a random function, the adversary can only
1518 distinguish $\G4$ from $\G3$ if it queries its random oracle at some $(R, r
1519 Y^*)$. But if it does this, then by the rule introduced in $\G3$ it has
1520 already won. Therefore we must have $\Pr[S_4] = \Pr[S_3]$.
1521
1522 Our $\id{challenges}_4$ function is interesting, since it doesn't actually
1523 make use of $r^*$ or $Y^*$ when generating its challenges. This gives us
1524 the clue we need to bound $\Pr[S_4]$: we can use adversary $A$ to solve the
1525 multiple-guess Diffie-Hellman problem in $G$ by simulating the game $\G4$.
1526 Specifically, we define the adversary $B$ as shown in
1527 figure~\ref{fig:wident-sound-cdh}. That is, for each query $A$ makes to
1528 its random oracle at a new pair $(R', Y')$, we see whether this gives us
1529 the answer we're looking for. We have $\Pr[S_0] \le \Pr[S_4] =
1530 \Succ{mcdh}{G}(B) \le \InSec{gdh}(G; t', q_I + q_V)$ as required.
1531
1532 \begin{figure}
1533 \begin{program}
1534 Adversary $B^{V(\cdot)}(X, R^*)$: \+ \\
1535 $F \gets \emptyset$; $\Xid{R}{map} \gets \emptyset$; \\
1536 \FOR $i \in \Nupto{n}$ \DO \\ \ind
1537 $r \getsr I$; $R \gets r R^*$; $c \getsr \Bin^{\ell_I}$; \\
1538 $\Xid{R}{map} \gets \Xid{R}{map} \cup \{ R \mapsto r \}$; \\
1539 $\mathbf{c}[i] \gets (R, c)$; \- \\
1540 $(R', Y') \gets A^{H_I(\cdot, \cdot), \id{check}(\cdot, \cdot)}
1541 (X, \mathbf{c})$; \\
1542 \IF $Y' \neq \bot$ \THEN $H_I(R', Y')$;
1543 \next
1544 Oracle $H_I(R', Y')$: \+ \\
1545 \IF $(R', Y') \in \dom F$ \THEN \\ \quad
1546 $h \gets F(x)$; \\
1547 \ELSE \\ \ind
1548 $\id{check}(R', Y')$; \\
1549 $h \getsr \Bin^{\ell_I}$;
1550 $F \gets F \cup \{ (R', Y') \mapsto h \}$; \- \\
1551 \RETURN $h$;
1552 \- \\[\medskipamount]
1553 Oracle $\id{check}(R', Y')$: \+ \\
1554 \IF $R' \in \dom \Xid{R}{map}$ \THEN
1555 $V(Y'/\Xid{R}{map}(R'))$;
1556 \end{program}
1557
1558 \caption{Soundness of $\Wident$: reduction from MCDH}
1559 \label{fig:wident-sound-cdh}
1560 \end{figure}
32609df3 1561\end{longproof}
a6e375a6
MW
1562
1563\subsubsection{Zero-knowledge}
1564Finally we must prove that $\Wident$ is (statistical) zero-knowledge -- i.e.,
1565that, except with very small probability, Bob learns nothing of use to him
1566except that he's interacting with Alice. To do this, we show that, for any
1567algorithm $B$ which Bob might use to produce his challenge to the real Alice,
1568there exists a simulator $S$ which produces transcripts distributed very
1569similarly to transcripts of real conversations between $B$ and Alice, the
1570difference being that $S$ doesn't know Alice's key. We shall show that the
1571statistical difference between the two distributions is $2^{-\ell_I}$.
1572
1573The intuition here is that Bob ought to know what answer Alice is going to
1574give him when he constructs his challenge. This is certainly true if he's
1575honest: his challenge is $R = r P$ for some $r$ he knows, so he won't learn
1576anything useful when Alice responds with $x R = r X$. However, if Bob sends
1577a challenge $R$ when he doesn't know the corresponding $r$, he learns
1578something potentially useful. The accompanying check value $c = r \xor
1579H_I(R, r X)$ keeps him honest.
1580
1581To show this, we present an \emph{extractor} which, given any challenge $(R,
1582c)$ Bob can construct, and his history of random-oracle queries, either
1583returns a pair $(r, Y)$ such that $R = r P$ and $Y = r X$, or $\bot$;
1584moreover, the probability that Alice returns a response $Y' \ne \bot$ given
1585the challenge $(R, c)$ is $2^{-\ell}$. We can, of course, readily convert
1586this extractor into a simulator to prove the zero-knowledge property of our
1587protocol.
1588
1589We shall actually consider a slightly more complex setting. We grant Bob
1590access to an oracle which produces random, correctly-formed challenges. We
1591require this to model the legitimate challenges of other parties when we
32609df3 1592analyse the security of our key exchange protocol.
a6e375a6
MW
1593
1594\begin{definition}[Discrete-log extractors]
1595 Let $T$, $B$ be randomized algorithms. Define the game
1596 $\Game{dl-ext}{G}(T, B)$ as shown in figure~\ref{fig:dlext}. The
1597 \emph{success probability of $T$ as a discrete-log extractor against $B$}
1598 is defined as
1599 \[ \Succ{dl-ext}{G}(T, B) = \Pr[\Game{dl-ext}{G}(T, B) = 1]. \]
1600\end{definition}
a6e375a6
MW
1601
1602\begin{figure}
1603 \begin{program}
1604 $\Game{dl-ext}{G}(T, B):$ \+ \\
1605 $H_I \getsr \Func{G^2}{\Bin^{\ell_I}}$;
1606 $Q_H \gets \emptyset$; $Q_C \gets \emptyset$; \\
1607 $(x, X) \gets \id{setup}()$; \\
1608 $(R, c) \gets B^{\Xid{H_I}{trap}(\cdot, \cdot), C()}(x, X)$; \\
1609 $(r, Y) \gets T(R, c, Q_H)$; \\
1610 $Y' \gets x R$; $h' \gets H_I(R, Y')$; $r' \gets c \xor h'$; \\
1611 \IF $r \ne \bot$ \THEN \\ \quad
1612 \IF $Y = \bot \lor R \ne r P \lor Y \ne Y'$ \THEN \RETURN $0$; \\
32609df3 1613 \IF $R = r' P$ \THEN $(r^*, Y^*) \gets (r', Y')$; \\
a6e375a6
MW
1614 \ELSE $(r^*, Y^*) \gets (\bot, \bot)$; \\
1615 \IF $(R, c) \in Q_C$ \THEN \RETURN $1$; \\
1616 \IF $(r, Y) = (r', Y')$ \THEN \RETURN $1$; \\
1617 \RETURN $0$;
1618 \next
1619 Oracle $\Xid{H_I}{trap}(R', Y')$: \+ \\
1620 $h \gets H_I(R', Y')$; \\
1621 $Q_H \gets Q_H \cup \{(R', Y', h)\}$; \\
1622 \RETURN $h$; \- \\[\medskipamount]
1623 Oracle $C()$: \+ \\
32609df3 1624 $r \getsr \gf{q}$; \\
a6e375a6
MW
1625 $R \gets r P$; $c \gets r \xor H_I(R, r X)$; \\
1626 $Q_C \gets Q_C \cup \{(R, c)\}$; \\
32609df3 1627 \RETURN $(R, c)$
a6e375a6
MW
1628 \end{program}
1629
1630 \caption{Discrete log extraction game: $\Game{dl-ext}{G}(T, B)$}
1631 \label{fig:dlext}
1632\end{figure}
1633
32609df3
MW
1634Let's unpack this definition slightly. We make the following demands of our
1635extractor.
1636\begin{itemize}
1637\item It is given a bare `transcript' of $B$'s execution. In particular, it
1638 is given only its output and a list of $B$'s random-oracle queries in no
1639 particular order.
1640\item While the extractor is not given the private key~$x$, the adversary~$B$
1641 is given the private key.
1642\item We require that, if the extractor produces values $r, Y \ne \bot$ then
1643 $r$ and $Y$ are \emph{correct}; i.e., that $R = r P$ and $Y = x R$.
1644\item The extractor is explicitly \emph{not} given the outputs of the
1645 challenge-generation oracle $C()$, nor of the random-oracle queries issued
1646 by $C()$. However, we allow the extractor to fail (i.e., return $\bot$) if
1647 $B$ simply parrots one of its $C$-outputs.
1648\item The extractor is allowed -- indeed \emph{required} -- to fail if the
1649 challenge $(R, c)$ is \emph{invalid} (i.e., Alice would return $\bot$ given
1650 the challenge).
1651\end{itemize}
1652The resulting definition bears a striking similarity to the concept of
1653\emph{plaintext awareness} in \cite{Bellare:1998:RAN}.
1654
a6e375a6
MW
1655Such an extractor indeed exists, as the following lemma states.
1656\begin{lemma}[Effectiveness of extractor $T_\Wident$]
1657 \label{lem:dlext}
32609df3
MW
1658 There exists a \emph{universal discrete-log extractor} $T_\Wident$, shown
1659 in figure~\ref{fig:twident}, such that, for any algorithm $B$,
a6e375a6
MW
1660 \[ \Succ{dl-ext}{G}(T_\Wident, B) \ge 1 - \frac{1}{2^{\ell_I}}. \]
1661 Moreover, if $B$ issues at most $q_H$ random-oracle queries, then the
1662 running time of $T_\Wident$ is $O(q_H)$.
1663\end{lemma}
32609df3
MW
1664\ifshort
1665The proof of this lemma is given in the full version of this paper.
1666\else
a6e375a6
MW
1667We prove this result at the end of the section. For now, let us see how to
1668prove that $\Wident$ is zero-knowledge.
32609df3
MW
1669\fi
1670
1671\begin{figure}
1672 \begin{program}
1673 Extractor $T_\Wident(R, c, Q_H)$: \+ \\
1674 \FOR $(R', Y', h)$ \IN $Q_H$ \DO \\ \ind
1675 $r \gets h \xor c$; \\
1676 \IF $R = R' = r P \land Y' = r X$ \THEN \RETURN $(r, Y')$; \- \\
1677 \RETURN $(\bot, \bot)$;
1678 \end{program}
1679
1680 \caption{The discrete-log extractor $T_\Wident$}
1681 \label{fig:twident}
1682\end{figure}
a6e375a6
MW
1683
1684We use the set-up described in section~\ref{sec:sim}. Our initialization
32609df3 1685function~$I_\Wident$ just chooses a random $x \in \gf{q}$ as the world
a6e375a6
MW
1686private input and sets $X = x P$ as the common input. In the `real world',
1687the adversary is allowed to submit a (single) challenge to the prover; it is
1688given the prover's response, and must then compute its output. This is shown
1689on the left hand side of figure~\ref{fig:wident-sim}.
1690
1691The zero-knowledge property of the scheme is described by the following
1692theorem.
1693\begin{theorem}[Statistical zero-knowledge of $\Wident$]
1694 \label{thm:wident-zk}
1695 Let $I_\Wident$, $W_\Wident$ and $S_\Wident$ be the real-prover world and
1696 simulator shown in figure~\ref{fig:wident-sim}. Then, for any~$t$,
1697 $q_I$ and $q_C$,
1698 \[ \InSec{sim}(W_\Wident, I_\Wident, S_\Wident; t, q_I, q_C, 0) \le
1699 \frac{q_C}{2^\ell_I}.
1700 \]
1701 where $q_C$ is the maximum number of challenges allowed by the adversary.
1702\end{theorem}
32609df3 1703\begin{longproof}{}
a6e375a6
MW
1704 The simulator simply uses the extractor~$T_\Wident$ to extract the answer
1705 from the adversary's history of random oracle queries. Observe that
1706 $S_\Wident$ is a fake-world simulator. By lemma~\ref{lem:dlext}, the
1707 extractor fails with probability only $2^{-\ell_I}$. The theorem follows
1708 by a simple union bound and proposition~\ref{prop:fakesim}.
32609df3 1709\end{longproof}
a6e375a6 1710
32609df3 1711%\ifshort\else
a6e375a6
MW
1712\begin{figure}
1713 \begin{program}
1714 Initialization function $I_\Wident()$: \+ \\
32609df3 1715 $x \getsr \gf{q}$; \\
a6e375a6
MW
1716 $X \gets x P$; \\
1717 \RETURN $(x, X)$;
1718 \- \\[\medskipamount]
1719 Real-prover world $W_\Wident^{H_I(\cdot, \cdot)}
1720 ((x, X), \sigma, \tau, \mu)$: \+ \\
1721 \IF $\tau = \cookie{challenge}$ \THEN \\ \ind
1722 $(R, c) \gets \mu$; \\
1723 $Y \gets \id{response}^{H_I(\cdot, \cdot)}(R, c, x)$; \\
1724 \RETURN $(1, Y)$; \- \\
1725 \ELSE \\ \ind
1726 \RETURN $(\sigma, \bot)$;
1727 \next
1728 Simulator $S_\Wident$'s fake world \\
1729 \hspace{1in} $W_{\text{sim}}^{H_I(\cdot, \cdot)}
1730 ((X, u), \sigma, \tau, \mu)$: \+ \\
1731 \IF $\tau = \cookie{init}$ \THEN \\ \ind
1732 \RETURN $(\emptyset, \emptystring)$; \- \\
1733 $Q_H \gets \sigma$; \\
1734 \IF $\tau = \cookie{challenge}$ \THEN \\ \ind
1735 $(R, c) \gets \mu$; \\
1736 $(r, Y) \gets T_\Wident(R, c, Q_H)$; \\
1737 \RETURN $(Q_H, Y)$; \- \\
1738 \ELSE \IF $\tau = \cookie{random}$ \THEN \\ \ind
1739 $(i, (R', Y'), h) \gets \mu$; \\
1740 $Q_H \gets Q_H \cup \{(R', Y', h)\}$; \\
1741 \RETURN $(Q_H, \emptystring)$; \- \\
1742 \ELSE \\ \ind
1743 \RETURN $(\sigma, \bot)$;
1744 \end{program}
1745
1746 \caption{Real-prover and simulator for zero-knowledge of $\Wident$}
1747 \label{fig:wident-sim}
1748\end{figure}
32609df3 1749%\fi
a6e375a6 1750
32609df3
MW
1751\ifshort\else
1752We now return to proving that the extractor $T_\Wident$ functions as claimed.
dff0fad2 1753The following two trivial lemmata will be useful, both now and later.
a6e375a6
MW
1754\begin{lemma}[Uniqueness of discrete-logs]
1755 \label{lem:unique-dl}
1756 Let $G = \langle P \rangle$ be a cyclic group. For any $X \in G$ there is
32609df3 1757 a unique $x \in \gf{q}$ where $X = x P$.
a6e375a6
MW
1758\end{lemma}
1759\begin{proof}
1760 Certainly such an $x$ exists, since $G$ is cyclic and finite. Suppose $X =
1761 x P = x' P$: then $0 = x P - x' P = (x - x') P$. Hence $(x - x')$ is a
32609df3 1762 multiple of $q$, i.e., $x = x'$.
a6e375a6
MW
1763\end{proof}
1764\begin{lemma}[Uniqueness of check values]
1765 \label{lem:unique-c}
32609df3
MW
1766 Let $G = \langle P \rangle$ be a cyclic group of prime order $q$; let $H_I$
1767 be a function $H_I\colon \Bin^{2\ell_G} \to \Bin^{\ell_I}$. Fix some $x
1768 \in \gf{q}$ and define the set
a6e375a6
MW
1769 \[ V_x = \bigl\{\, (R, c) \in G \times \Bin^{\ell_I} \bigm|
1770 R = \bigl( c \xor H_I(R, x R) \bigr) P \,\bigr\}.
1771 \]
1772 Then, for any $R$, $c$, $c'$, if $(R, c) \in V_x$ and $(R, c') \in V_x$
32609df3 1773 then $c = c'$.
a6e375a6
MW
1774\end{lemma}
1775\begin{proof}
32609df3
MW
1776 From lemma~\ref{lem:unique-dl}, we see that there is a unique $r \in \gf{q}$
1777 for which $R = r P$. Now, if $(R, c) \in V_x$, we must have $r = c \xor
1778 H_I(R, x R)$. It follows that $c = r \xor H_I(R, x R)$.
a6e375a6
MW
1779\end{proof}
1780
1781\begin{proof}[Proof of lemma~\ref{lem:dlext}]
a6e375a6
MW
1782 Let $B$ be any randomized algorithm, and let $(R, c, Q_H)$ be as given to
1783 the extractor by $\Game{dl-ext}{G}(T_\Wident, B)$. Let the quantities
1784 $H_I$, $Q_C$, $r$, $r'$, $x$ and $X$ be as in that game.
1785
1786 Suppose that the extractor returns values $(r, Y) \ne (\bot, \bot)$. Let
1787 $h = r \xor c$; then there must be a query $(R, Y, h) \in Q_H$, and we have
1788 $R = r P$ and $Y = r X = r (x P) = x (r P) = x R = Y'$, so the extractor's
1789 output must be correct unless it fails.
1790
1791 Furthermore, in the case where the extractor did not fail, we have $h =
1792 H_I(R, Y) = H_I(R, Y')$ and $c = r \xor h$, so the challenge was valid.
1793 Therefore, if the challenge was invalid, the extractor will fail.
1794
1795 We now deal with the challenge-generation oracle. Suppose that $(R, c')
1796 \in Q_C$ for some $c'$. Now, if $c = c'$ then $(R, c')$ is a repeat of
1797 some challenge from the challenge-generation oracle, and the extractor is
1798 permitted to fail. On the other hand, suppose $c \ne c'$; then, the
1799 challenge $(R, c)$ must be invalid by lemma~\ref{lem:unique-c}, so the
1800 extractor is required to fail, and we have established that indeed it will.
1801 From now on, suppose that $R$ is distinct from all the $R$-values returned
1802 by $C()$.
1803
1804 Let $Y = x R$. Suppose that $B$ queried its random oracle at $(R, Y)$.
1805 Let $h = H_I(Y)$, so $r' = c \xor h$. If the challenge is valid then $R =
1806 r' P$; therefore $Y = x R = x r' P = r' X$, so we have $(R, Y, h) \in Q_H$
1807 with $R = r P$ and $Y = r X$. Hence the extractor returns $r = r'$ as
1808 required.
1809
1810 It remains to deal with the case where there is no random-oracle query at
1811 $(R, Y)$. But then $h = H_I(R, Y)$ is uniformly distributed, and
1812 independent of the entire game up to this point. Let $r$ be the correct
1813 discrete log of $R$; by lemma~\ref{lem:unique-dl} there is only one
1814 possible value. The extractor always fails under these circumstances, but
1815 a correct responder would reply with probability
1816 \[ \Pr[h = c \xor r] = \frac{1}{2^{\ell_I}}. \]
1817 This concludes the proof.
1818\end{proof}
1819\begin{remark}
1820 Note that the fact that the algorithm~$B$ was given the private key is
1821 irrelevant to the above argument. However, we shall need this property
1822 when we come to prove deniability for the key-exchange protocol.
1823\end{remark}
1824\begin{remark}
1825 It's easy to see from the above proof that the extractor works flawlessly
1826 on the `honest verifier' algorithm $\id{challenge}$ shown in
1827 figure~\ref{fig:wident-ro}. This shows that $\Wident$ is perfect
1828 zero-knowledge against honest verifiers. We're much more interested in
1829 dishonest verifiers, though.
1830\end{remark}
32609df3 1831\fi
a6e375a6
MW
1832
1833
32609df3 1834\ifshort\else
a6e375a6
MW
1835\subsection{An identity-based identification scheme}
1836\label{sec:wident-id}
1837
d8139f2c 1838Boneh and Franklin \cite{Boneh:2003:IBE} showed how to construct an
a6e375a6
MW
1839identity-based encryption scheme using bilinear pairings. The resulting
1840encryption scheme looks somewhat like a pairing-based version of ElGamal's
4331bf84 1841encryption scheme \cite{ElGamal:1985:PKCb}. We can easily apply their
a6e375a6
MW
1842techniques to our identification protocol, and thereby obtain an
1843identity-based identification scheme. Providing the necessary formalisms to
1844prove theorems analogous to our theorems~\ref{thm:wident-sound}
1845and~\ref{thm:wident-zk} would take us too far from our objectives; but given
1846appropriate security notions, we can readily adapt our existing proofs to the
1847new setting.
1848
1849\subsubsection{Bilinear pairings}
1850Before we describe the necessary modifications to the protocol, we first give
1851a (very brief!) summary of cryptographic pairings. (The Boneh-Franklin paper
d8139f2c 1852\cite{Boneh:2003:IBE} gives more detail; also \cite{Menezes:2005:IPB}
a6e375a6
MW
1853provides a useful introduction to the topic.)
1854
1855Let $(G, +)$, $(G', +)$ and $(G_T, \times)$ be cyclic groups with prime order
1856$q$; let $P \in G$ and $P' \in G'$ be elements of order $q$ in $G$ and $G'$
1857respectively. We say that a mapping $\hat{e}\colon G \times G' \to G_T$ is a
1858\emph{non-degenerate bilinear pairing} if it satisfies the following
1859properties.
1860\begin{description}
1861\item[Bilinearity] For all $R \in G$ and $S', T' \in G'$, we have $\hat{e}(R,
dff0fad2
MW
1862 S' + T') = \hat{e}(R, S')\,\hat{e}(R, T')$; and for all $R, S \in G$ and $T'
1863 \in G'$ we have $\hat{e}(R + S, T') = \hat{e}(R, T')\,\hat{e}(S, T')$.
a6e375a6
MW
1864\item[Non-degeneracy] $\hat{e}(P, P') \ne 1$.
1865\end{description}
1866For practical use, we also want $\hat{e}(\cdot, \cdot)$ to be efficient to
1867compute. The reader can verify that $\hat{e}(a P, b P') = \hat{e}(P,
1868P')^{ab}$. It is permitted for the two groups $G$ and $G'$ to be equal.
1869
1870We require a different intractability assumption, specifically that the
1871\emph{bilinear} Diffie-Hellman problem (BDH) -- given $(a P, b P, a P', c P')
1872\in G^2 \times G'^2$, find $\hat{e}(P, P')^{abc} \in G_T$ -- is difficult.
1873This implies the difficulty of the computational Diffie-Hellman problem in
1874all three of $G$, $G'$ and~$G_T$.
1875
1876\subsubsection{The identity-based scheme}
1877We need a trusted authority; following \cite{Schneier:1996:ACP} we shall call
32609df3 1878him Trent. Trent's private key is $t \in \gf{q}$; his public key is $T =
a6e375a6
MW
1879t P$.
1880
1881Finally, we need cryptographic hash functions $H_I\colon G \times G_T \to
1882\Bin^{\ell_I}$ and $\Hid\colon \Bin^* \to G'$; a formal security analysis
1883would model these as random oracles.
1884
1885Alice's public key is $A = \Hid(\texttt{Alice}) \in G'$. Her private key is
1886$K_A = t A \in G'$ -- she needs Trent to give this to her. Bob can interact
1887with Alice in order to verify her identity as follows.
1888\begin{enumerate}
1889\item Bob computes $\gamma_A = \hat{e}(T, A) \in G_T$. (He can do this once
1890 and store the result if he wants, but it's not that onerous to work it out
1891 each time.)
32609df3 1892\item Bob chooses $r \inr \gf{q}$, and sets $R = r P$. He also computes
a6e375a6
MW
1893 $\psi = \gamma_A^r$, $h = H_I(R, \psi)$ and $c = r \xor h$. He sends his
1894 challenge $(R, c)$ to Alice.
1895\item Alice receives $(R', c')$. She computes $\psi' = \hat{e}(R, K_A)$, $h'
1896 = H_I(R', \psi')$, and $r' = c' \xor h')$. She checks that $R' = r' P$; if
1897 so, she sends $\psi'$ back to Bob; otherwise she refuses to talk to him.
1898\item Bob receives $\psi'$. If $\psi = \psi'$ then he accepts that he's
1899 talking to Alice.
1900\end{enumerate}
1901This works because $\psi = \gamma_A^r = \hat{e}(T, A)^r = \hat{e}(t P, A)^r =
1902\hat{e}(r P, A)^t = \hat{e}(R, t A) = \psi'$.
1903
1904\subsubsection{Informal analysis}
1905An analogue to lemma~\ref{lem:dlext} can be proven to show how to extract $r$
1906from a verifier's random-oracle queries; statistical zero knowledge would
1907then follow easily, as in theorem~\ref{thm:wident-zk}. Soundness is
1908intuitively clear, since an adversary must compute $\psi = \hat{e}(P,
1909P')^{art}$ given $A = a P'$, $R = r P$ and $T = t P$, which is an instance of
1910the BDH problem. An analogue of theorem~\ref{thm:wident-sound} would have to
1911prove this for an adversary capable of making identity requests as well as
1912obtaining challenges. Finally, our key-exchange protocol can be constructed
1913out of this identity-based identification scheme, yielding an identity-based
1914authenticated key-exchange protocol. We leave it to the reader to work
1915through the details.
32609df3 1916\fi
a6e375a6
MW
1917
1918
32609df3 1919\ifshort\else
a6e375a6
MW
1920\subsection{Comparison with the protocol of Stinson and Wu}
1921\label{sec:stinson-ident}
1922
1923Our protocol is similar to a recent proposal by Stinson and Wu
4331bf84
MW
1924\cite{cryptoeprint:2006:337}. They restrict their attention to Schnorr
1925groups $G \subset \F_p^*$. Let $\gamma$ be an element of order $q = \#G$.
1926The prover's private key is $a \inr \gf{q}$ and her public key is
1927$\alpha = \gamma^a$. In their protocol, the challenger chooses
1928$r \inr \gf{q}$, computes $\rho = \gamma^r$ and $\psi = \alpha^r$, and sends
1929a challenge $(\rho, H(\psi))$. The prover checks that $\rho^q \ne 1$,
1930computes $\psi = \rho^a$, checks the hash, and sends $\psi$ back by way of
1931response. They prove their protocol's security in the random-oracle model.
a6e375a6
MW
1932
1933Both the Wrestlers protocol and Stinson-Wu require both prover and verifier
1934to compute two exponentiations (or scalar multiplications) each. The
32609df3 1935sizes of the messages used by the two protocols are also identical.
a6e375a6
MW
1936
1937(An earlier version of the Stinson-Wu protocol used a cofactor
1938exponentiation: if we set $f = (p - 1)/q$, then we use $\psi = \alpha^{rf}) =
1939\rho^{af} = \gamma^{afr}$. This is more efficient in typical elliptic curve
1940subgroups, since the cofactor of such subgroups is usually small: indeed,
1941\cite{SEC1} recommends rejecting groups with cofactor $f > 4$. However, in
1942the Schnorr groups used by Stinson and Wu, the cofactor is much larger than
1943$q$, and their new variant is more efficient.)
1944
1945We note that the zero-knowledge property of the Stinson-Wu protocol requires
1946the Diffie-Hellman knowledge of exponent assumption (KEA). Very briefly:
1947suppose $A$ is a randomized algorithm which takes as input $X \in G$ and
1948outputs a pair $(r P, r X)$; intuitively, the KEA asserts $A$ must have done
1949this by choosing $r$ somehow and then computing its output from it.
1950Formally, it asserts the existence of an `extractor' algorithm which takes as
1951input the element $X$ and the random coins used by $A$ and outputs $r$ with
1952high probability. This is a very strong assumption, and one which is
1953unnecessary for our protocol, since we can present an \emph{explicit}
1954extractor.
1955
4331bf84
MW
1956The KEA assumption as stated in \cite{cryptoeprint:2006:337} allows the
1957extractor to fail with some negligible probability, over and above the
1958probability that a dishonest verifier managed to guess the correct
1959$h = H(\psi)$ without making this random-oracle query. Not only does our
1960protocol achieve zero- knowledge without the KEA, our extractor is, in this
1961sense, `perfect'.
a6e375a6
MW
1962
1963Our protocol is just as strong as Stinson-Wu under attack from active
1964intruders: see table~\ref{tab:wident-active} for a very brief sketch of the
1965case-analysis which would be the basis of a proof of this.
1966
1967\begin{table}
1968 \begin{tabular}[C]{|*{3}{c|}p{8cm}|}
1969 \hlx{hv[1]}
1970 \multicolumn{2}{|c|}{\textbf{Challenge}} &
1971 \textbf{Response} &
1972 \textbf{Security}
1973 \\ \hlx{v[1]hv}
1974 %% unpleasant hacking to make the R and c columns the same width :-(
1975 \settowidth{\dimen0}{\textbf{Challenge}}%
32609df3 1976 \dimen0=.5\dimen0
a6e375a6
MW
1977 \advance\dimen0by-\tabcolsep
1978 \advance\dimen0by-.5\arrayrulewidth
1979 \hbox to\dimen0{\hfil$R$\hfil}
1980 & $c$ & $Y$ & Nothing to prove. \\ \hlx{v}
1981 $R$ & $c'$ & --- & Prover rejects by lemma~\ref{lem:unique-c};
1982 $Y'$ probably wrong by
1983 theorem~\ref{thm:wident-sound}. \\ \hlx{v}
1984 $R$ & $c$ & $Y'$ & Response is incorrect. \\ \hlx{v}
1985 $R'$ & --- & $Y$ & Response is incorrect. \\ \hlx{v}
1986 $R'$ & $c$ & $Y'$ & Prover rejects with probability $1 - 2^{-\ell_I}$;
1987 $Y'$ probably wrong by
1988 theorem~\ref{thm:wident-sound}. \\ \hlx{v}
1989 $R'$ & $c'$ & $Y'$ & Simulate prover using extractor
1990 (lemma~\ref{lem:dlext}); $Y'$ probably wrong by
1991 theorem~\ref{thm:wident-sound}. \\ \hlx{vh}
1992 \end{tabular}
32609df3 1993
a6e375a6
MW
1994 \caption{Security of $\Wident$ against active intruders (summary)}
1995 \label{tab:wident-active}
1996\end{table}
1997
1998An identity-based analogue of Stinson-Wu can be defined using a bilinear
1999pairing, just as we did in section~\ref{sec:wident-id}. However, to prove
2000the zero-knowledge property, one needs to make a bilinear analogue of the
2001knowledge of exponent assumption.
2002
2003We suspect that a key-exchange protocol like ours can be constructed using
2004Stinson-Wu rather than the Wrestlers identification scheme. We haven't,
2005however, gone through all the details, since we believe our protocol is just
2006as efficient and is based on much more conservative assumptions.
32609df3 2007\fi
a6e375a6
MW
2008
2009%%%--------------------------------------------------------------------------
2010
2011\section{A simple key-exchange protocol}
2012\label{sec:kx}
2013
2014In this section, we describe a simple key-exchange protocol built out of the
2015identification protocol shown previously.
2016
2017The key-exchange protocol arises from the following observation. If Bob
2018sends a challenge, presumably to Alice, and gets a correct response, then not
2019only did he really send the challenge to Alice but he knows that she received
2020it correctly.
2021
2022So, if Alice and Bob authenticate each other, by the end of it, they should
2023each have chosen a random private value, sent the corresponding public value
2024to the other, and been convinced that it arrived safely.
2025
2026Unfortunately, life isn't quite this kind, and we have to do some more work
2027to make this scheme secure.
2028
2029
2030Our key exchange protocol essentially consists of two parallel instances of
2031the identification protocol. If Alice receives a correct response to her
2032challenge, she will know that Bob received her challenge correctly, and
2033\emph{vice versa}. If we let Alice's challenge be $R_0 = r_0 P$ and Bob's
2034challenge be $R_1 = r_1 P$ then each can compute a shared secret $Z = r_0 R_1
2035= r_0 r_1 P = r_1 R_0$ unknown to an adversary. There are, unfortunately, a
2036few subtleties involved in turning this intuition into a secure key-exchange
2037protocol, which we now describe.
2038
2039
2040\subsection{Overview}
2041\label{sec:kx-overview}
2042
2043We present a quick, informal description of our basic key-exchange protocol.
2044In addition to our group $G$, we shall also need a secure symmetric
2045encryption scheme $\E = (\kappa, E, D)$, and two secure hash functions
2046$H_I\colon \Bin^{2\ell_G} \to \Bin^{\ell_I}$ and $H_K\colon \Bin^{\ell_G+1}
2047\to \Bin^\kappa$.
2048
2049Suppose that Alice's and Bob's private keys are $a$ and $b$ respectively, and
2050their public keys are $A = a P$ and $B = b P$.
2051\begin{enumerate}
32609df3
MW
2052\item Alice chooses a random index $r \inr \gf{q}$. She computes $R = r P$ and
2053 $c = r \xor H_I(R, r B)$. She sends the pair $(R, c)$ to Bob.
2054\item Similarly, Bob chooses a random $s \inr \gf{q}$. He computes $S = s P$
2055 and $d = s \xor H_I(S, s A)$. He sends $(S, d)$ to Alice.
a6e375a6
MW
2056\item Alice receives $(S', d')$ from Bob. She computes $s' = d' \xor H_I(S',
2057 a S')$, and verifies that $S' = s' P$. If so, she computes $K_A = H_K(0
2058 \cat r S')$, and sends $R, E_{K_A}(a S')$ to Bob.
2059\item Similarly, Bob receives $(R', c')$ from Alice. He verifies that $R' =
2060 \bigl( c' \xor H_I(R', b R') \bigr) P$. If so, he computes $K_B = H_K(0
2061 \cat s R')$ and sends S, $E_{K_B}(b R')$ to Alice.
2062\item Alice receives a ciphertext $(S'', \chi_B)$ from Bob. She checks that
2063 $S'' = S'$, decrypts $\chi_B$, and checks that $D_{K_A}(\chi_B) = r B$. If
2064 so, she uses $H_K(1 \cat r S')$ as her shared secret.
2065\item Similarly, Bob receives $(R'', \chi_A)$ from Alice, and checks $R'' =
2066 R'$ and $D_{K_B}(\chi_A) = s A$. If so, he uses $H_K(1 \cat s R')$ as his
2067 shared secret.
2068\end{enumerate}
2069This is the Wrestlers Key Exchange protocol, $\Wkx^{G, \E}$ (again, we omit
2070the superscripts when referring to the general protocol, or when confusion is
2071unlikely). A diagrammatic summary of the protocol is shown in
2072figure~\ref{fig:wkx}.
2073
2074\begin{figure}
2075 \begin{description}
32609df3 2076 \item[Setup] Group $G = \langle P \rangle$; $\#G = q$ is prime.
a6e375a6
MW
2077 $H_I(\cdot, \cdot)$ and $H_K(\cdot)$ are secure hashes. $\E = (\kappa,
2078 E, D)$ is an IND-CCA2 symmetric encryption scheme.
2079 \item[Parties] $U_i$ for $0 \le i < n$.
32609df3 2080 \item[Private keys] $x_i \inr \gf{q}$.
a6e375a6
MW
2081 \item[Public keys] $X_i = x_i P$.
2082 \end{description}
32609df3
MW
2083 \begin{protocol}
2084 $r_i \getsr I$; $R_i \gets r_i P$; &
2085 $r_j \getsr I$; $R_j \gets r_j P$; \\
2086 $c_i \gets r_i \xor H_I(R_i, r_i X_j)$; &
2087 $c_j \gets r_j \xor H_I(R_j, r_j X_i)$; \\
2088 \send{->}{(R_i, c_i)}
2089 \send{<-}{(R_j, c_j)}
2090 Check $R_j = \bigl(c_j \xor H_I(x_i R_j)\bigr) P$; &
2091 Check $R_i = \bigl(c_i \xor H_I(x_j R_i)\bigr) P$; \\
2092 $Z \gets r_i R_j$; $(K_0, K_1) \gets H_K(Z)$; &
2093 $Z \gets r_j R_i$; $(K_0, K_1) \gets H_K(Z)$; \\
2094 $\chi_i \gets E_{K_0}(x_i R_j)$; &
2095 $\chi_j \gets E_{K_0}(x_j R_i)$; \\
2096 \send{->}{(R_i, \chi_i)}
2097 \send{<-}{(R_j, \chi_j)}
2098 Check $D_{K_0}(\chi_j) = r_i X_j$; &
2099 Check $D_{K_0}(\chi_i) = r_j X_i$; \\
2100 Shared key is $K_1$. & Shared key is $K_1$.
2101 \end{protocol}
a6e375a6
MW
2102
2103 \caption{Summary of the Wrestlers Key Exchange protocol, $\Wkx$}
2104 \label{fig:wkx}
2105\end{figure}
2106
2107Assume, for the moment, that Alice and Bob's messages are relayed honestly.
2108Then:
2109\begin{itemize}
2110\item $a S' = a S = a (s P) = s (a P) = s A$, so $s' = d' \xor H_I(S' a S') =
2111 d \xor H_I(S, s A) = s$, and $S' = S = s P = s' P$, and therefore Alice
2112 responds to Bob's message;
2113\item similarly $b R' = r B$, so $r' = r$ and $R' = r' P$, and therefore Bob
2114 responds to Alice's message;
2115\item $b R' = b R = b (r P) = r (b P) = r B$, and $a S' = a S = a (s P) = s
2116 (a P) = s A$, and therefore both parties compute their responses correctly;
32609df3 2117 and
a6e375a6
MW
2118\item $r S' = r S = r (s P) = s (r P) = s R = s R'$, so $K_A = K_B$, and
2119 therefore they can decrypt each others' responses, and agree the same
2120 shared secret.
2121\end{itemize}
2122This shows that the protocol is basically valid, but says little about its
2123security. The remainder of this section will describe our protocol in more
2124formal detail, and prove its security in a model with multiple parties and an
2125adversary who controls the network.
2126
2127Observe that the protocol as we've presented here is \emph{symmetrical}.
2128There's no notion of `initiator' or `responder'. There are a total of four
2129messages which must be sent before both parties accept. However, this can be
2130reduced to three by breaking the symmetry of the protocol and combining one
2131or other party's challenge and response messages. We choose to analyse the
2132symmetrical version, since to do so, it suffices to consider only the two
2133different kinds of messages. Since our security model allows the messages to
2134be adversarially delayed and reordered, it is easy to show that the security
2135of an optimized, asymmetrical protocol is no worse than the symmetrical
2136version we present here.
2137
2138
2139\subsection{Security model and security definition}
2140\label{sec:um}
2141
2142Our model is very similar to that of Canetti and Krawczyk
dff0fad2 2143\cite{Canetti:2001:AKE}, though we have modified it in two ways.
a6e375a6
MW
2144\begin{enumerate}
2145\item We allow all the participants (including the adversary) in the protocol
2146 access to the various random oracles required to implement it.
2147\item Since we want to analyse a specific, practical scheme, asymptotic
2148 results are useless. We measure the adversary's resource usage carefully,
2149 and produce a quantitative bound on the adversary's advantage in the
2150 SK-security game.
2151\end{enumerate}
2152
32609df3
MW
2153\ifshort
2154
2155Readers interested in the details of the model should see Canetti and
2156Krawczyk's paper \cite{Canetti:2001:AKE}, or the full version of this paper.
2157
2158\else
2159
a6e375a6
MW
2160\subsubsection{Overview}
2161We briefly describe our modified model, pointing out the changes we have
2162made, and how they apply to our protocol. Much of Canetti and Krawczyk's
2163model (for example, the local and global outputs) is useful for proving more
2164general security properties such as demonstrating that SK-security suffices
2165for constructing secure channels, and we shall not concern ourselves with
2166such details. Other parts deal with issues such as security parameters and
2167ensuring that all the computation is polynomially bounded, which are
2168irrelevant since we are dealing with a single concrete protocol rather than a
2169family of them.
2170
2171The entities in the model are the \emph{adversary}~$A$, and a (fixed) number
2172of \emph{parties}~$P_i$, for $0 \le i < n$. If the protocol under
2173consideration makes use of random oracles, then all the participants -- the
2174adversary and the parties -- are all allowed access to the random oracles.
2175
2176The parties and the adversary play a `game'. At the beginning of the game,
2177the participants are given some inputs computed by a randomized
2178\emph{initialization procedure}~$\id{init}$. This produces as output a pair
2179$(i_U, \mathbf{i})$; the value $i_U$ is the \emph{global input}, and is given
2180to all the participants including the adversary. The vector $\mathbf{i}$ has
2181$n$ components, and party $P_i$ is given $(i_U, \mathbf{i}[i])$ as input.
2182
2183\subsubsection{Sessions}
2184Parties don't act directly. Instead, each party runs a number of
32609df3
MW
2185\emph{sessions}. A session is represented by a triple $S = (P_i, P_j, s)$,
2186where $i, j \in \Nupto{n}$ identify the owning party and a \emph{partner},
2187and $s \in \Bin^{\ell_S}$ is a \emph{session-id}. (The original model
2188includes a r\^ole, for distinguishing between initiators and responders. Our
2189protocol is symmetrical, so this distinction isn't useful.) If $P_i$ runs a
2190session $S = (P_i, P_j, s)$ and $P_j$ runs a session $S' = (P_j, P_i, s)$
2191then we say that $S$ and $S'$ are \emph{matching}, and that $P_j$ is $P_i$'s
a6e375a6
MW
2192\emph{partner} for the session.
2193
2194At most one participant in the game is \emph{active} at any given time.
2195Initially the adversary is active. The adversary may \emph{activate} a
2196session in one of two ways.
2197\begin{enumerate}
2198\item It may \emph{create a session} of a party~$P_i$, by selecting a
2199 session-id~$s \in \Bin^{\ell_S}$ and a partner $j$. There is no
2200 requirement that $P_j$ ever have a matching session. However, all sessions
2201 of a party must be distinct, i.e., sessions with the same partner must have
2202 different session-ids.
2203\item It may \emph{deliver a message}~$\mu \in \Bin^*$, from party~$P_j$, to
2204 an existing session~$S = (P_i, P_j, s)$. There is no requirement that any
2205 party previously sent $\mu$: the adversary is free to make up messages as
2206 it sees fit.
2207\end{enumerate}
2208The adversary becomes inactive, and the session becomes active. The session
2209performs some computation, according to its protocol, and may request a
2210message~$\mu$ be delivered to the matching session running in its partner
2211(which may not exist). The session may also \emph{terminate}. In the case
2212we are interested in, of key-exchange protocols, a session~$S = (P_i, P_j,
2213s)$ may terminate in one of two ways:
2214\begin{enumerate}
2215\item it may \emph{complete}, outputting $(i, j, s, K)$, for some
2216 \emph{session key}~$K$, or
2217\item it may \emph{abort}, outputting $(i, j, s, \bot)$.
2218\end{enumerate}
2219Once it has performed these actions, the session deactivates and the
2220adversary becomes active again. The adversary is given the message~$\mu$, if
2221any, and informed of whether the session completed or aborted, but, in the
2222case of completion, not of the value of the key~$K$. A session is
2223\emph{running} if it has been created and has not yet terminated.
2224
2225\subsubsection{Other adversarial actions}
2226As well as activating sessions, the adversary has other capabilities, as
2227follows.
2228\begin{itemize}
2229\item It may \emph{expire} any session~$S$, causing the owning party to
2230 `forget' the session key output by that session.
2231\item It may \emph{corrupt} any party~$P_i$, at will: the adversary learns
2232 the entire state of the corrupted party, including its initial
2233 input~$\mathbf{i}[i]$, the state of any sessions it was running at the
2234 time, and the session keys of any completed but unexpired sessions. Once
2235 corrupted, a party can no longer be activated. Of course, the adversary
2236 can continue to send messages allegedly from the corrupted party.
2237\item It may \emph{reveal the state} of a running session~$S$, learning any
2238 interesting values specific to that session, but \emph{not} the owning
2239 party's long-term secrets.
2240\item It may \emph{reveal the session-key} of a completed session~$S$.
2241\item It may elect to be \emph{challenged} with a completed session~$S$,
2242 provided. Challenge sessions form part of the security notion for
2243 key-exchange protocols. See below for more details.
2244\end{itemize}
2245We say that a session $S = (P_i, P_j, s)$ is \emph{locally exposed} if
2246\begin{itemize}
2247\item it has had its state revealed,
2248\item it has had its session-key revealed, or
2249\item $P_i$ has been corrupted, and $S$ had not been expired when this
2250 happened.
2251\end{itemize}
2252A session is \emph{exposed} if it is locally exposed, or if its matching
2253session exists and has been locally exposed.
2254
2255At the beginning of the game, a bit $b^*$ is chosen at random. The adversary
2256may choose to be \emph{challenged} with any completed, unexposed
2257session;\footnote{%
2258 The original Canetti-Krawczyk definition restricts the adversary to a
2259 single challenge session, but our proof works independent of the number of
2260 challenge sessions, so we get a stronger result by relaxing the requirement
2261 here.)}
2262the adversary is then given either the session's key -- if $b^* = 1$ -- or a
2263string chosen at random and independently of the game so far from a
2264protocol-specific distribution -- if $b^* = 0$. At the end of the game, the
2265adversary outputs a single bit~$b$.
2266
2267\subsubsection{SK-security}
2268We've now described the game; it is time to explain the adversary's goal in
2269it. The adversary \emph{wins} the game if either
2270\begin{enumerate}
2271\item two unexposed, matching sessions complete, but output different
2272 keys,\footnote{%
2273 The original Canetti-Krawczyk definition differs slightly here. It
2274 requires that `if two \emph{uncorrupted} parties complete matching
2275 sessions then they both output the same key' [original emphasis]. This
2276 can't be taken at face value, since none of the protocols they claim to
2277 be secure actually meet this requirement: they meet only the weaker
2278 requirement that parties completing matching sessions output different
2279 keys with negligible probability. We assume here that this is what they
2280 meant.}
2281 or
2282\item the adversary correctly guesses the hidden bit~$b^*$.
2283\end{enumerate}
2284More formally, we make the following definition.
32609df3 2285\fi
a6e375a6
MW
2286\begin{definition}[SK-security]
2287 \label{def:sk}
2288 Let $\Pi^{H_0(\cdot), H_1(\cdot), \ldots}$ be a key-exchange protocol
2289 which makes use of random oracles $H_0(\cdot)$, $H_1(\cdot)$, \dots, and
32609df3
MW
2290 let $A$ be an adversary playing the game described \ifshort in
2291 \cite{Canetti:2001:AKE}\else previously\fi, where $n$
a6e375a6
MW
2292 parties run the protocol~$\Pi$. Let $V$ be the event that any pair of
2293 matching, unexposed sessions completed, but output different session keys.
2294 Let $W$ be the event that the adversary's output bit matches the game's
2295 hidden bit~$b^*$. We define the adversary's \emph{advantage against the
2296 SK-security of the protocol~$\Pi$} to be
2297 \[ \Adv{sk}{\Pi}(A, n) = \max(\Pr[V], 2\Pr[W] - 1). \]
2298 Furthermore, we define the \emph{SK insecurity function of the
2299 protocol~$\Pi$} to be
2300 \[ \InSec{sk}(\Pi; t, n, q_S, q_M, q_{H_0}, q_{H_1}, \dots) =
2301 \max_A \Adv{sk}{\Pi}(A, n)
2302 \]
2303 where the maximum is taken over all adversaries~$A$ with total running
2304 time~$t$ (not including time taken by the parties), create at most $q_S$
2305 sessions, deliver at most $q_M$~messages, and (if applicable) make at most
2306 $q_{H_i}$ random-oracle queries to each random oracle $H_i(\cdot)$.
2307\end{definition}
2308
2309
2310\subsection{Security}
2311
2312In order to analyse our protocol $\Wkx^{G, \E}$ properly, we must describe
32609df3 2313exactly how it fits into our formal model.
a6e375a6
MW
2314
2315\subsubsection{Sessions and session-ids}
2316Our formal model introduced the concept of sessions, which the informal
2317description of section~\ref{sec:kx-overview} neglected to do. (One could
2318argue that we described a single session only.) As we shall show in
2319section~\ref{sec:kx-insecure}, our protocol is \emph{insecure} unless we
2320carefully modify it to distinguish between multiple sessions.
2321
2322In the model, distinct key-exchange sessions are given distinct partners and
2323session-ids. In order to prevent sessions interfering with each other, we
2324shall make explicit use of the session-ids.
2325
2326Suppose the session-ids are $\ell_S$-bit strings. We expand the domain of
2327the random oracle $H_I$ so that it's now
2328\[ H_I\colon G \times \Bin^{\ell_S} \times G \times G \to \Bin_{\ell_I}. \]
2329
2330\subsubsection{Messages}
2331We split the messages our protocols into two parts: a \emph{type}~$\tau$ and
2332a \emph{body}~$\mu$. We assume some convenient, unambiguous encoding of
2333pairs $(\tau, \mu)$ as bit-strings. For readability, we present message
2334types as text strings, e.g., `\cookie{challenge}', though in practice one
2335could use numerical codes instead.
2336
2337The message body itself may be a tuple of values, which, again, we assume are
2338encoded as bit-strings in some convenient and unambiguous fashion. We shall
2339abuse the notation for the sake of readability by dropping a layer of nesting
2340in this case: for example, we write $(\cookie{hello}, x, y, z)$ rather than
2341$\bigl(\cookie{hello}, (x, y, z)\bigr)$.
2342
2343\subsubsection{The protocol}
2344Our protocol is represented by three functions, shown in
2345figure~\ref{fig:wkx-formal}.
2346\begin{itemize}
2347\item $\id{init}(n)$ is the initialization function, as described in
2348 section~\ref{sec:um}. It outputs a pair $(\mathbf{p}, \mathbf{i})$, where
2349 $\mathbf{i}[i]$ is the private key of party~$P_i$ and $\mathbf{p}[i]$ is
2350 the corresponding public key. Only $P_i$ is given $\mathbf{i}[i]$, whereas
2351 all parties and the adversary are given $\mathbf{p}$.
2352\item $\id{new-session}^{H_I(\cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2353 (\mathbf{p}, x, i, j, s)$ is the new-session function. This is executed by
2354 party~$P_i$ when the adversary decides to create a new session~$S = (P_i,
2355 P_j, s)$. It is also given the relevant outputs of $\id{init}$, and
2356 allowed access to the random oracles $H_I$ and $H_K$.
2357\item $\id{message}^{H_I(\cdot, \cdot, \cdot, \cdot), H_K(\cdot)} (\tau,
2358 \mu)$ is the incoming-message function. This is executed by a session when
2359 the adversary decides to deliver a message $(\tau, \mu)$ to it. It makes
2360 use of the subsidiary functions $\id{msg-challenge}$ and
2361 $\id{msg-response}$ to handle the messages.
2362\end{itemize}
2363We observe that the protocol never aborts. We could make it do so if it
2364receives an invalid message, but we prefer to ignore invalid messages for the
2365sake of robustness.\footnote{%
2366 Note that this protocol would therefore require modification before it was
4331bf84
MW
2367 acceptable in the simulation-based model of \cite{cryptoeprint:1999:012}.
2368 There it is required that a key-exchange protocol terminate after a
a6e375a6
MW
2369 polynomially-bounded number of messages are delivered to it.}
2370
2371\begin{figure}
2372 \begin{program}
2373 Function $\id{init}(n)$: \+ \\
2374 \FOR $i \in \Nupto{n}$ \DO \\ \ind
32609df3 2375 $x \getsr \gf{q}$; \\
a6e375a6
MW
2376 $\mathbf{i}[i] \gets x$; \\
2377 $\mathbf{p}[i] \gets x P$; \- \\
2378 \RETURN $(\mathbf{p}, \mathbf{i})$;
2379 \- \\[\medskipamount]
2380 Function $\id{new-session}^{H_I(\cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2381 (\mathbf{p}, x, i, j, s)$: \+ \\
2382 $X \gets \mathbf{p}[i]$;
2383 $X' \gets \mathbf{p}[j]$;
2384 $C \gets \emptyset$; \\
32609df3 2385 $r \getsr \gf{q}$;
a6e375a6
MW
2386 $R \gets r P$;
2387 $Y \gets r X'$; \\
2388 $h \gets H_I(X, s, R, Y)$;
2389 $c \gets r \xor h$; \\
2390 \SEND $(\cookie{challenge}, R, c)$;
2391 \- \\[\medskipamount]
2392 Function $\id{message}^{H_I(\cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2393 (\tau, \mu)$: \+ \\
2394 \IF $\tau = \cookie{challenge}$ \THEN $\id{msg-challenge}(\mu)$; \\
2395 \ELSE \IF $\tau = \cookie{response}$ \THEN $\id{msg-response}(\mu)$;
2396 \next
2397 Function $\id{msg-challenge}(\mu)$: \+ \\
2398 $(R', c') \gets \mu$; \\
2399 $Y' \gets x R'$; \\
2400 $h' \gets H_I(X', s, R', Y')$; \\
2401 $r' \gets c' \xor h'$; \\
2402 \IF $R' \ne r' P$ \THEN \RETURN; \\
2403 $C \gets C \cup \{R\}$; \\
2404 $Z \gets r R'$; \\
2405 $(K_0, K_1) \gets H_K(Z)$; \\
2406 $\chi \gets E_{K_0}(Y')$; \\
2407 \SEND $(\cookie{response}, R, \chi)$;
2408 \- \\[\medskipamount]
2409 Function $\id{msg-response}(\mu)$: \+ \\
2410 $(R', \chi') \gets \mu$; \\
2411 \IF $R' \notin C$ \THEN \RETURN; \\
2412 $Z \gets r R'$; \\
2413 $(K_0, K_1) \gets H_K(Z)$; \\
2414 $Y' \gets D_{K_0}(\chi')$; \\
2415 \IF $Y' \ne Y$ \THEN \RETURN; \\
2416 \OUTPUT $K_1$;
2417 \STOP;
2418 \end{program}
32609df3 2419
a6e375a6
MW
2420 \caption{Formalization of $\Wkx$}
2421 \label{fig:wkx-formal}
2422\end{figure}
2423
2424\subsubsection{Session states}
2425We must specify what the adversary obtains when it chooses to reveal a
2426session's state. Given the program in figure~\ref{fig:wkx-formal}, we can
2427see that the session state consists of the variables $(x, X, X', r, R, Y,
2428C)$.
2429
2430However, $x$ is the owning party's long-term secret, and it seems
2431unreasonable to disclose this to an adversary who stops short of total
2432corruption.
2433
2434The public keys $X$ and $X'$ are part of the adversary's input anyway, so
2435revealing them doesn't help. Similarly, the set $C$ of valid challenges
2436could have been computed easily by the adversary, since a group element $R'
2437\in C$ if and only if the session $S$ responded to some message
2438$(\cookie{challenge}, R', c')$.
2439
2440The value $R = r P$ is easily computed given $r$, and besides is sent in the
2441clear in the session's first message. The expected response $Y = r X'$ is
2442also easily computed from $r$. The converse is also true, since $r$ can be
2443recovered from $R$ and $c$ in the session's challenge message and the value
2444$Y$. Besides, $r$ is necessary for computing $Z$ in response to incoming
2445challenges.
2446
2447We conclude that the only `interesting' session state is $r$.
2448
2449\subsubsection{Security}
2450Having formally presented the protocol, we can now state our main theorem
32609df3
MW
2451about its security. The proof is given in \ifshort the full version of the
2452paper\else appendix~\ref{sec:sk-proof}\fi.
a6e375a6
MW
2453\begin{theorem}[SK-security of $\Wkx$]
2454 \label{thm:sk}
2455 Let $G$ be a cyclic group. Let $\E = (\kappa, E, D)$ be a symmetric
2456 encryption scheme. Then
2457 \begin{spliteqn*}
2458 \InSec{sk}(\Wkx^{G, \E}; t, n, q_S, q_M, q_I, q_K) \le
dff0fad2
MW
2459 2 q_S \bigl( \InSec{ind-cca}(\E; t', q_M, q_M) + {} \\
2460 \InSec{mcdh}(G; t', q_K) +
2461 n \,\InSec{mcdh}(G; t', q_M + q_I) \bigr) +
32609df3 2462 \frac{n (n - 1)}{q} +
dff0fad2 2463 \frac{2 q_M}{2^{\ell_I}}.
a6e375a6
MW
2464 \end{spliteqn*}
2465 where $t' = t + O(n) + O(q_S) + O(q_M q_I) + O(q_K)$.
2466\end{theorem}
2467
2468
32609df3 2469\ifshort\else
a6e375a6
MW
2470\subsection{Insecure protocol variants}
2471\label{sec:kx-insecure}
2472
2473It's important to feed the session-id and verifier's public key to the random
2474oracle $H_I$ when constructing the check-value~$c$. Without these, the
2475protocol is vulnerable to attack. In this section, we consider some variants
2476of our protocol which hash less information, and show attacks against them.
2477
2478To simplify the presentation, we shall consider Alice and Bob, and a third
2479character Carol. We shall be attacking a pair of matching sessions $A$
2480and~$B$, run by Alice and Bob respectively. Let Alice and Bob's private keys
2481be $x_A$ and~$x_B$, and let their public keys be $X_A = x_A P$ and $X_B = x_B
2482P$ respectively.
2483
2484\subsubsection{Protocol diagram notation}
2485In order to keep the presentation as clear as possible, we use a simple
2486diagrammatic notation for describing protocol runs. A line of the form
2487\protocolrun{\textit{action} \ar[r] & \PRsession{S} & \textit{result}}
2488states that the adversary performs the given \textit{action} on session~$S$,
2489with the stated \textit{result}. The \textit{action} may be
2490\begin{itemize}
2491\item \textsf{Create session $(P_i, P_j, s)$}: the session is created,
2492 running in party~$P_i$, with partner~$P_j$ and session-id~$s$.
2493\item \textsf{Receive $\mu$}: the session is activated with an incoming message~$\mu$.
2494\item \textsf{Session-state reveal}: The adversary requests the session's
2495 internal state.
2496\end{itemize}
2497The \textit{result} may be
2498\begin{itemize}
2499\item \textsf{Send $\mu'$}: the session requests the delivery of the
2500 message~$\mu'$.
2501\item \textsf{Complete: $K$}: the session completes, outputting the key~$K$.
2502\item \textsf{State: $\sigma$}: the session's state is revealed to
2503 be~$\sigma$.
2504\item \textsf{(Ignore)}: the result of the action is unimportant.
2505\end{itemize}
2506
2507\subsubsection{Omitting the session-id}
2508Consider a protocol variant where session $S$ sets $h_S = H_I(X_N, R_S,
2509Y_S)$, where $N$ is the session's partner. That is, we've omitted the
2510session-id from the hash. An adversary can cross over two sessions between
2511Alice and Bob. Here's how.
2512
2513The attack assumes that Alice and Bob set up two pairs of matching sessions
2514with each other, thus.
2515\protocolrun{
2516 \PRcreate{Alice}{Bob}{s} & \PRsession{A} &
2517 \PRsend{challenge}{R_A, c_A} \\
2518 \PRcreate{Bob}{Alice}{s} & \PRsession{B} &
2519 \PRsend{challenge}{R_B, c_B} \\
2520 \PRcreate{Alice}{Bob}{s'} & \PRsession{A'} &
2521 \PRsend{challenge}{R_{A'}, c_{A'}} \\
2522 \PRcreate{Bob}{Alice}{s'} & \PRsession{B'} &
2523 \PRsend{challenge}{R_{B'}, c_{B'}}
2524}
2525Observe that the session pairs use distinct session-ids $s \ne s'$, so this
2526is allowed. Now the adversary crosses over the challenges, using the second
2527pair of sessions to provide responses to the challenges issued by the first
2528pair. By revealing the state in the second pair of sessions, the adversary
2529can work out the (probably different) session keys accepted by the first
2530pair.
2531\protocolrun{
2532 \PRreceive{challenge}{R_B, c_B} & \PRsession{A'} &
2533 \PRsend{response}{R_{A'}, E_{K_{A'B,0}}(x_A R_B)} \\
2534 \PRreceive{challenge}{R_A, c_A} & \PRsession{B'} &
2535 \PRsend{response}{R_{B'}, E_{K_{B'A,0}}(x_B R_A)} \\
2536 \PRreceive{challenge}{R_{A'}, c_{A'}} & \PRsession{A} & \PRignore \\
2537 \PRreceive{challenge}{R_{B'}, c_{B'}} & \PRsession{B} & \PRignore \\
2538 \PRreveal & \PRsession{A'} & r_{A'} \\
2539 \PRreveal & \PRsession{B'} & r_{B'} \\
2540 \PRreceive{response}{R_{B'}, E_{K_{B'A,0}}(x_B R_A)} & \PRsession{A} &
2541 \PRcomplete{K_{AB',1}} \\
2542 \PRreceive{response}{R_{A'}, E_{K_{A'B,0}}(x_A R_B)} & \PRsession{B} &
2543 \PRcomplete{K_{BA',1}} \\
2544}
2545The adversary can now compute $K_{AB'} = H_K(r_{B'} R_A)$ and $K_{B'A} =
2546H_K(r_{A'} R_B)$. Safely in possession of both keys, the adversary can now
2547read and impersonate freely.
2548
2549\subsubsection{Omitting the partner's public key}
2550Now consider a protocol variant where session $S$ sets $h_S = H_I(s, R_S,
2551Y_S)$, where $s$ is the session-id. An adversary can use a sessions with
2552Carol to attack a session between Alice and Bob. Here's how the sessions are
2553set up.
2554\protocolrun{
2555 \PRcreate{Alice}{Bob}{s} & \PRsession{A} &
2556 \PRsend{challenge}{R_A, c_A} \\
2557 \PRcreate{Bob}{Alice}{s} & \PRsession{B} &
2558 \PRsend{challenge}{R_B, c_B} \\
2559 \PRcreate{Alice}{Carol}{s} & \PRsession{A'} &
2560 \PRsend{challenge}{R_{A'}, c_{A'}} \\
2561 \PRcreate{Bob}{Carol}{s} & \PRsession{B'} &
2562 \PRsend{challenge}{R_{B'}, c_{B'}}
2563}
2564Although each of Alice and Bob have two sessions with session-id~$s$, this is
2565allowed, since they are with different partners. The rest of the attack in
2566fact proceeds identically to the previous case.
32609df3 2567\fi
a6e375a6
MW
2568
2569\subsection{Deniability}
2570\label{sec:denial}
2571
2572We have claimed that the Wrestlers key-exchange protocol is \emph{deniable}.
2573In this section, we define what we mean, explain the limits of the
2574deniablility of the protocol as currently presented, fix the protocol with an
2575additional pass (which can be collapsed to a single message flow by breaking
2576the protocol's symmetry), and prove the deniability of the resulting
2577protocol.
2578
2579Our notion of deniability is taken from Di~Raimondo, Gennaro and Krawczyk
4331bf84 2580\cite{cryptoeprint:2006:280}, except that, as usual, we opt for a concrete
a6e375a6
MW
2581security approach.
2582
2583\subsubsection{Discussion}
2584Our definition for deniability is that, for any adversary interacting with
2585participants in the protocol, a simulator exists which can compute the same
2586things as the adversary. In particular, since an adversary could output a
2587transcript of the interactions between itself and the parties, it would
2588follow that a simulator could do this too. If the simulator is effective,
2589its output is indistinguishable from that of the real adversary, and hence no
2590`judge' (distinguisher) should be persuaded by evidence presented by someone
2591who claims to have witnessed or participated in an interaction.
2592
2593We work again the model described in~\ref{sec:um}. That is, our adversary
2594has complete control over the ordering and delivery of all messages. The
2595adversary is also able, at any time, to reveal the state of any session.
2596However, deniability is obviously impossible against an adversary who can
2597\emph{corrupt} other parties, since simulating such an adversary's actions
2598would necessarily require the simulator to compute private keys corresponding
2599to the known public keys, and this is (we believe) difficult, because an
2600efficient algorithm for doing this could easily attack our protocol, which we
2601already proved secure. Therefore, we forbid the adversary from corrupting
2602parties.
2603
2604In order to allow the adversary to participate in the protocol, rather than
2605merely observing it, we need to give it one or more private keys. We could
2606modify the initialization function \id{init} from figure~\ref{fig:wkx-formal}
2607to give the adversary a private key, but there's an easier way: we can just
2608give the adversary a number of private keys in its auxiliary input.
2609
2610\subsubsection{Definitions}
2611Let $\Pi$ be a key-exchange protocol, in the model described in
2612section~\ref{sec:um}. We use the simulation framework of
2613section~\ref{sec:sim}. We define the initialization function $I_\Pi$ to be
2614the initialization function of $\Pi$, as before, and the corresponding
2615world~$W_\Pi(\iota, \sigma, \tau, \mu)$ is a fairly straightforward mapping
2616of the adversary's possible actions to the simulation model:
2617\begin{itemize}
2618\item The invocation $\cookie{new-session}$ with $\mu = (i, j, s)$ creates a
2619 new session on party~$P_i$, with partner~$P_j$ and session-id~$s$. The
2620 reply $\rho = (\delta, m)$ is a \emph{decision} $\delta \in
2621 \{\cookie{continue}, \cookie{abort}, \cookie{complete}\}$ and an output
2622 message $m \in \Bin^* \cup \{\bot\}$. If $m \ne \bot$ then $m$ is a
2623 message to be sent to the matching session (if any).
2624\item The invocation $\cookie{deliver}$ with $\mu = (i, j, s, m)$ delivers
2625 message $m$ to the session $S = (P_i, P_j, s)$. The response $\rho$ is as
2626 for $\cookie{new-session}$ invocations.
2627\item The invocation $\cookie{reveal-session-state}$ with $\mu = (i, j, s)$
2628 reveals to the adversary the state of the running session $S = (P_i, P_j,
2629 s)$. The response $\rho$ is the session's state if $S$ is indeed a running
2630 session, or $\bot$ otherwise.
2631\item The invocation $\cookie{reveal-session-key}$ with $\mu = (i, j, s)$
2632 reveals to the adversary the session-key of the completed session~$S =
2633 (P_i, P_j, s)$. The response $\rho$ is the session key~$K$ if the session
2634 is indeed complete, or $\bot$ otherwise.
2635\end{itemize}
2636There are no invocations corresponding to the adversary actions of corrupting
2637parties (since deniability against an corrupting adversary is impossible, as
2638discussed earlier), or session expiry or challenging (since they are useless
2639in this context).
2640
2641We measure the deniability of a protocol~$\Pi$, using a given simulator~$S$,
2642by the insecurity function $\InSec{sim}(W_\Pi, I_\Pi, S; t_D, t_A,
2643\mathcal{Q}_D, \mathcal{Q}_A, \mathcal{R}, \mathcal{U})$ of
2644definition~\ref{def:sim}. The interaction bounds $\mathcal{R} = (q_S, q_M)$
2645we place on the adversary are on the number ($q_S$) of \cookie{new-session}
2646and ($q_M$) \cookie{deliver} invocations it makes.
2647
2648We shall (informally) say that a protocol~$\Pi$ is deniable if there is a
2649simulator~$S_\Pi$ for which the insecurity function is small for appropriate
2650resource bounds on the adversary and distinguisher.
2651
2652\subsubsection{The current protocol}
2653As it stands, $\Wkx$ isn't deniable, according to our definition, for
2654arbitrary auxiliary inputs. Let's see why.
2655
2656Suppose that Bob is an informant for the secret police, and wants to convince
2657a judge that Alice is involved in subversive activities in cyberspace.
2658Unfortunately, Bob's job is difficult, because of the strong zero-knowledge
2659nature of the Wrestlers identification protocol. However, Bob can work with
2660the judge to bring him the evidence necessary to convict Alice. Here's how.
2661
2662Alice's public key is $A$, and Bob's public key is $B$. The judge chooses
32609df3 2663some session-id $s$, and $r \inr \gf{q}$. He computes $R = r P$ and $c =
a6e375a6
MW
2664r \xor H_I(B, s, R, r A)$, and gives Bob the triple $(s, R, c)$, keeping $r$
2665secret. Bob can now persuade Alice to enter into a key-exchange with him,
2666with session-id $s$. He uses $(R, c)$ as his challenge message. When Alice
2667sends back her response $(R', \chi)$ (because Bob's challenge is correctly
2668formed), Bob aborts and shows $(R', \chi)$ to the judge. The judge knows $r$
2669and can therefore decrypt $\chi$ and check the response. If it's wrong, then
2670Bob was cheating and gets demoted -- he can't get the right answer by himself
2671because that would require him to impersonate Alice. If it's right, Alice is
2672really a subversive element and `disappears' in the night.
2673
2674We shall show in theorem~\ref{thm:denial} below that this is basically the
2675only attack against the deniability of the protocol. However, we can do
2676better.
2677
2678\subsubsection{Fixing deniability}
2679We can fix the protocol to remove even the attack discussed above. The
2680change is simple: we feed \emph{both} parties' challenges to the hash
2681function~$H_I$ rather than just the sender's. We use a five-argument hash
2682function (random oracle) $H_I\colon G^2 \times \Bin^{\ell_S} \times G^2 \to
2683\Bin^{\ell_I}$. We introduce a new message pass at the beginning of the
2684protocol: each session simply sends its challenge point $R = r P$ in the
2685clear as a `pre-challenge'. The actual challenge is $R$ and $c = r \xor
2686H_I(X, R', s, R, c)$, where $R'$ is the challenge of the matching session.
2687
2688By breaking symmetry, we can reduce the communication complexity of this
2689variant to four messages. As before, we analyse the symmetrical version.
2690The extra flow might seem a high price to pay, but we shall see that it has
2691additional benefits beyond deniability.
2692
2693A summary of the new protocol is shown in figure~\ref{fig:wdkx}, and the
2694formal description is shown in figure~\ref{fig:wdkx-formal}.
2695
2696\begin{figure}
2697 \begin{description}
32609df3 2698 \item[Setup] Group $G = \langle P \rangle$; $\#G = q$ is prime.
a6e375a6
MW
2699 $H_I(\cdot, \cdot, \cdot, \cdot, \cdot)$ and $H_K(cdot)$ are secure
2700 hashes. $\E = (\kappa, E, D)$ is an IND-CCA2 symmetric encryption
2701 scheme.
2702 \item[Parties] $U_i$ for $0 \le i < n$.
32609df3 2703 \item[Private keys] $x_i \inr \gf{q}$.
a6e375a6
MW
2704 \item[Public keys] $X_i = x_i P$.
2705 \end{description}
32609df3
MW
2706
2707 \begin{protocol}
2708 $r_i \getsr I$; $R_i \gets r_i P$; &
2709 $r_j \getsr I$; $R_j \gets r_j P$; \\
2710 \send{->}{R_i}
2711 \send{<-}{R_j}
2712 $c_i \gets r_i \xor H_I(R_j, X_i, s, R_i, r_i X_j)$; &
2713 $c_j \gets r_j \xor H_I(R_i, X_j, s, R_j, r_j X_i)$; \\
2714 \send{->}{(R_i, c_i)}
2715 \send{<-}{(R_j, c_j)}
2716 Check $R_j = \bigl(c_j \xor H_I(x_i R_j)\bigr) P$; &
2717 Check $R_i = \bigl(c_i \xor H_I(x_j R_i)\bigr) P$; \\
2718 $Z \gets r_i R_j$; $(K_0, K_1) \gets H_K(Z)$; &
2719 $Z \gets r_j R_i$; $(K_0, K_1) \gets H_K(Z)$; \\
2720 $\chi_i \gets E_{K_0}(x_i R_j)$; &
2721 $\chi_j \gets E_{K_0}(x_j R_i)$; \\
2722 \send{->}{(R_i, \chi_i)}
2723 \send{<-}{(R_j, \chi_j)}
2724 Check $D_{K_0}(\chi_j) = r_i X_j$; &
2725 Check $D_{K_0}(\chi_i) = r_j X_i$; \\
2726 Shared key is $K_1$. & Shared key is $K_1$.
2727 \end{protocol}
a6e375a6
MW
2728
2729 \caption{Summary of the Deniable Wrestlers Key Exchange protocol, $\Wdkx$}
2730 \label{fig:wdkx}
2731\end{figure}
2732
2733\begin{figure}
2734 \begin{program}
2735 Function $\id{init}(n)$: \+ \\
2736 \FOR $i \in \Nupto{n}$ \DO \\ \ind
32609df3 2737 $x \getsr \gf{q}$; \\
a6e375a6
MW
2738 $\mathbf{i}[i] \gets x$; \\
2739 $\mathbf{p}[i] \gets x P$; \- \\
2740 \RETURN $(\mathbf{p}, \mathbf{i})$;
2741 \- \\[\medskipamount]
2742 Function $\id{new-session}^{H_I(\cdot, \cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2743 (\mathbf{p}, x, i, j, s)$: \+ \\
2744 $X \gets \mathbf{p}[i]$;
2745 $X' \gets \mathbf{p}[j]$;
2746 $C \gets \emptyset$; \\
32609df3 2747 $r \getsr \gf{q}$;
a6e375a6
MW
2748 $R \gets r P$;
2749 $Y \gets r X'$; \\
2750 \SEND $(\cookie{pre-challange}, R)$;
2751 \- \\[\medskipamount]
2752 Function $\id{message}^{H_I(\cdot, \cdot, \cdot, \cdot, \cdot), H_K(\cdot)}
2753 (\tau, \mu)$: \+ \\
2754 \IF $\tau = \cookie{pre-challenge}$ \THEN
2755 $\id{msg-pre-challenge}(\mu)$; \\
2756 \ELSE \IF $\tau = \cookie{challenge}$ \THEN
2757 $\id{msg-challenge}(\mu)$; \\
2758 \ELSE \IF $\tau = \cookie{response}$ \THEN $\id{msg-response}(\mu)$;
2759 \next
2760 Function $\id{msg-pre-challenge}(\mu)$: \+ \\
2761 $R' \gets \mu$; \\
2762 $h \gets H_I(R', X, s, R, c)$; \\
2763 $c \gets r \xor h$; \\
2764 \SEND $(\id{msg-challenge}, R, c)$;
2765 \- \\[\medskipamount]
2766 Function $\id{msg-challenge}(\mu)$: \+ \\
2767 $(R', c') \gets \mu$; \\
2768 $Y' \gets x R'$; \\
2769 $h' \gets H_I(R, X', s, R', Y')$; \\
2770 $r' \gets c' \xor h'$; \\
2771 \IF $R' \ne r' P$ \THEN \RETURN; \\
2772 $C \gets C \cup \{R\}$; \\
2773 $Z \gets r R'$; \\
2774 $(K_0, K_1) \gets H_K(Z)$; \\
2775 $\chi \gets E_{K_0}(Y')$; \\
2776 \SEND $(\cookie{response}, R, \chi)$;
2777 \- \\[\medskipamount]
2778 Function $\id{msg-response}(\mu)$: \+ \\
2779 $(R', \chi') \gets \mu$; \\
2780 \IF $R' \notin C$ \THEN \RETURN; \\
2781 $Z \gets r R'$; \\
2782 $(K_0, K_1) \gets H_K(Z)$; \\
2783 $Y' \gets D_{K_0}(\chi')$; \\
2784 \IF $Y' \ne Y$ \THEN \RETURN; \\
2785 \OUTPUT $K_1$;
2786 \STOP;
2787 \end{program}
32609df3 2788
a6e375a6
MW
2789 \caption{Deniable key-exchange: formalization of $\Wdkx$}
2790 \label{fig:wdkx-formal}
2791\end{figure}
2792
2793The security of this variant is given by the following theorem, whose proof
32609df3
MW
2794is \ifshort given in the full version of this paper\else in
2795appendix~\ref{sec:sk2-proof}\fi.
a6e375a6
MW
2796\begin{theorem}[SK-security of $\Wdkx$]
2797 \label{thm:sk2}
2798 Let $G$ be a cyclic group. Let $\E = (\kappa, E, D)$ be a symmetric
2799 encryption scheme. Then
2800 \[ \InSec{sk}(\Wdkx^{G, \E}; t, n, q_S, q_M, q_I, q_K) =
2801 \InSec{sk}(\Wkx^{G, \E}; t, n, q_S, q_M, q_I, q_K)
2802 \]
2803\end{theorem}
2804
2805\subsubsection{Deniability of the Wrestlers protocols}
2806In order to quantify the level of deniability our protocols provide, we shall
2807impose a limit on the auxiliary input to the adversary. In particular, we
2808shall use $\mathcal{U}$ of definition~\ref{def:sim} to count the number of
2809\emph{challenges} in the auxiliary input. That is, $\mathcal{U} = n_C$ is
2810the number of tuples $(i, j, s, R', R, c)$ for which there is an $r$ such
2811that $R = r P$ and $c = r \xor H_I(R', X_j, s, R, r X_i)$ (or without the
2812$R'$ for $\Wkx$).
2813
2814With this restriction in place, we can state the following theorem about the
2815deniability of our protocols.
2816\begin{theorem}[Deniability of $\Wkx$ and $\Wdkx$]
2817 \label{thm:denial}
2818 There exist simulators $S_\Wkx$ and $\Wdkx$ such that
2819 \[ \InSec{sim}(W_{\Wkx^{G, \E}}, I_{\Wkx^{G, \E}}, S_{\Wkx^{G, \E}};
2820 t_D, t_A, \mathcal{Q}_D, \mathcal{Q}_A, (q_S, q_M), 0) \le
2821 \frac{q_M}{2^{\ell_I}}
2822 \]
2823 and
32609df3
MW
2824 \iffancystyle\[\else\begin{spliteqn*}\fi
2825 \InSec{sim}(W_{\Wdkx^{G, \E}}, I_{\Wdkx^{G, \E}}, S_{\Wdkx^{G, \E}};
2826 t_D, t_A, \mathcal{Q}_D, \mathcal{Q}_A, (q_S, q_M), n_C) \le
2827 \iffancystyle\else\\\fi
2828 \frac{n_C q_S}{\#G} +
a6e375a6 2829 \frac{q_M}{2^{\ell_I}}.
32609df3 2830 \iffancystyle\]\else\end{spliteqn*}\fi
a6e375a6
MW
2831 The running time of the simulators is $O(t_A) + O(\mathcal{Q}_A q_M)$.
2832\end{theorem}
32609df3
MW
2833\begin{longproof}{The proof of this theorem can be found in the full version
2834 of this paper.}
a6e375a6
MW
2835 The simulators $S_\Wkx$ and $S_\Wdkx$ are very similar. We describe both
2836 here. Both are fake-world simulators, working as follows.
2837 \begin{enumerate}
2838 \item Initially, it constructs simulated parties $P_i$, for $0 \le i < n$,
2839 giving each the public key $X_i$ from the common input.
2840 \item Suppose the adversary requests creation of a new session $S = (P_i,
2841 P_j, s)$. Then the simulator creates a new session, including a random
32609df3 2842 value $r_S \inr \gf{q}$, and computes $R_S = r_S P$, and $Y_S = r_S
a6e375a6
MW
2843 X_j$. For $\Wdkx$, it sends the message $(\cookie{pre-challenge}, R_S)$;
2844 for $\Wkx$, it additionally computes $h = H_I(X_i, s, R_S, Y_S)$ and
2845 sends $(\cookie{challenge}, R_S, r_S \xor h)$.
2846 \item Suppose, for $\Wdkx$, the adversary sends a message
2847 $(\cookie{pre-challenge}, R')$ to a session~$S = (P_i, P_j, s)$. The
2848 simulator computes $h = H_I(R', X_i, s, R_S, Y_S)$, and sends
2849 $(\cookie{challenge}, R_S, r_S \xor h)$.
2850 \item Suppose the adversary sends a message $(\cookie{challenge}, R', c')$
2851 to session $S = (P_i, P_j, s)$. The simulator doesn't know $x_i$.
2852 \begin{enumerate}
2853 \item If $R' = R_T$ for some other simulated session $T$, then the
2854 simulator knows $r_T$ such that $R_T = r_T P$. Let $Y' = r_T X_i$.
2855 The simulator computes $h = H_I(X_j, s, R', Y')$ (resp.\ $h = H_I(R_S,
2856 X_j, s, R', Y')$) for $\Wkx$ (resp.\ $\Wdkx$) and checks that $r_T = c'
2857 \xor h$. If not, the simulator discards the message. Otherwise, it
2858 computes $(K_0, K_1) = H_K(r_S R')$, and sends the message
2859 $(\cookie{response}, R, E_{K_0}(Y'))$.
2860 \item \label{en:simextract} Otherwise the simulator runs the extractor
2861 $T_\Wident$ on the adversary's history of queries $H_I(X_j, s, R',
2862 \cdot)$ (resp.\ $H_I(R_S, X_j, s, R', \cdot)$) for $\Wkx$ (resp.\
2863 $\Wdkx$). The extractor returns $(r', Y')$. If $Y' = \bot$ then the
2864 simulator ignores the message. Otherwise, the simulator computes
2865 $(K_0, K_1) = H_K(r R')$ and sends back $(\cookie{response}, R,
2866 E_{K_0}(Y'))$.
2867 \end{enumerate}
2868 \item Suppose the adversary sends a message $(\cookie{response}, R', \chi)$
2869 to session $S = (P_i, P_j, s)$. The simulator computes $(K_0, K_1) =
2870 H_K(r_S R')$, and decrypts $Y' = D_{K_0}(\chi)$. If $Y' \ne Y_S$ then
2871 the simulator discards the message. Otherwise, it makes the simulated
2872 session complete, and outputs key $K_1$.
2873 \item Finally, if the adversary reveals a session's state, the simulator
2874 reveals $r_S$ as required; if the adversary reveals a session-key, the
2875 simulator reveals the $K_1$ it output.
2876 \end{enumerate}
2877 The only point where the simulation fails to be perfect is in
2878 \ref{en:simextract}. Let $R'$ and $c'$ be the values from an incoming
2879 challenge message to session $S = (P_i, P_j, s)$. Let $r'$ be such that
2880 $R' = r' P$ and let $Y' = r' X_i$. If a random-oracle query $H_I(X_j, s,
2881 R', Y')$ (or $H_I(R_S, X_j, s, R', Y')$ for $\Wdkx$) has been issued, then
2882 there are a number of possibilities. Let $h'$ be the result of this query.
2883 \begin{itemize}
2884 \item The adversary made this query. Then the extractor will find it and
2885 return $Y'$ if $c' = h' \xor r'$, or $\bot$ otherwise.
2886 \item Some simulated session $U = (P_{i'}, P_{j'}, s')$ made this query.
2887 But simulated sessions only make $H_I$-queries when constructing
2888 challenges, so $R' = R_U$ for some session~$U$. But the simulator does
2889 something different in that case.
2890 \item In $\Wdkx$, the quadruple $(s, R_S, R', c')$ came from the
2891 adversary's auxiliary input. In this case the simulator must fail. But
2892 $R_S = r_S P$, and $r_S$ was chosen uniformly at random. If there are at
2893 most $n_C$ challenge sets in the auxiliary input then this happens with
32609df3 2894 probability at most $n_C/\#G$ for any given session.
a6e375a6
MW
2895 \end{itemize}
2896 We conclude that the simulator fails with probability
32609df3 2897 \[ \frac{q_M}{2^{\ell_I}} + \frac{q_S n_C}{\#G}. \]
a6e375a6
MW
2898 (Note that we only consider $n_C = 0$ for $\Wkx$.) No adversary can
2899 distinguish the simulator from a real interaction unless the simulator
2900 fails, and the simulator is a fake-world simulator. We therefore apply
2901 proposition~\ref{prop:fakesim}; the theorem follows.
32609df3 2902\end{longproof}
a6e375a6 2903
32609df3 2904\ifshort\else
a6e375a6
MW
2905\subsection{Practical issues}
2906\label{sec:practice}
2907
2908\subsubsection{Denial of service from spoofers}
2909The adversary we considered in~\ref{sec:um} is very powerful. Proving
2910security against such a powerful adversary is good and useful. However,
2911there are other useful security properties we might like against weaker
2912adversaries.
2913
2914Eavesdropping on the Internet is actually nontrivial. One needs control of
2915one of the intermediate routers between two communicating parties. (There
2916are tricks one can play involving subversion of DNS servers, but this is also
2917nontrivial.) However, injecting packets with bogus source addresses is very
2918easy.
2919
4331bf84 2920Layering the protocol over TCP \cite{rfc793} ameliorates this problem because
a6e375a6
MW
2921an adversary needs to guess or eavesdrop in order to obtain the correct
2922sequence numbers for a spoofed packet; but the Wrestlers protocol is
2923sufficiently simple that we'd like to be able to run it over UDP
4331bf84 2924\cite{rfc768}, for which spoofing is trivial.
a6e375a6
MW
2925
2926Therefore, it's possible for anyone on the 'net to send Alice a spurious
2927challenge message $(R, c)$. She will then compute $Y = a R$, recover $r' = c
2928\xor H_I(\ldots, R, Y)$ check that $R = r' P$ and so on. That's at least two
2929scalar multiplications to respond to a spoofed packet, and even with very
2930efficient group operations, coping with this kind of simple denial-of-service
2931attack might be difficult.
2932
2933A straightforward solution is to use the Deniable variant of the protocol,
2934and require a challenge to quote its matching session's challenge $R'$ in its
2935challenge. That is, upon receiving a $(\cookie{pre-challenge}, R')$, the
2936session sends $(\cookie{challenge}, R', R, c)$. Alice then rejects any
2937\emph{incoming} challenge message which doesn't quote her current challenge
2938value. Now only eavesdroppers can force her to perform expensive
2939computations.
2940
2941Indeed, one need not quote the entire challenge $R'$: it suffices to send
2942some short hard-to-guess hash of it, maybe just the bottom 128 bits or so.
2943
2944This can't reduce security. Consider any adversary attacking this protocol
2945variant. We can construct an adversary which attacks the original protocol
2946just as efficiently. The new adversary attaches fake $R'$ values to
2947challenges output by other parties, and strips them off on delivery,
2948discarding messages with incorrect $R'$ values.
2949
a6e375a6
MW
2950\subsubsection{Key confirmation}
2951Consider an application which uses the Wrestlers protocol to re-exchange keys
2952periodically. The application can be willing to \emph{receive} incoming
2953messages using the new key as soon as the key exchange completes
2954successfully; however, it should refrain from \emph{sending} messages under
2955the new key until it knows that its partner has also completed. The partner
2956may not have received the final response message, and therefore be unwilling
2957to accept a new key; it will therefore (presumably) reject incoming messages
2958under this new key.
2959
2960While key confirmation is unnecessary for \emph{security}, it has
2961\emph{practical} value, since it solves the above problem. If the
2962application sends a \cookie{switch} message when it `completes', it can
2963signal its partner that it is indeed ready to accept messages under the new
dff0fad2
MW
2964key. Our implementation sends $(\cookie{switch-rq}, E_{K_0}(H_S(0, R, R')))$
2965as its switch message; the exact contents aren't important. Our
2966retransmission policy (below) makes use of an additional message
2967\cookie{switch-ok}, which can be defined similarly.
2968
2969It's not hard to show that this doesn't adversely affect the security of the
2970protocol, since the encrypted message is computed only from public values.
2971In the security proof, we modify the generation of \cookie{response}
2972messages, so that the plaintexts are a constant string rather than the true
2973responses, guaranteeing that the messages give no information about the
2974actual response. To show this is unlikely to matter, we present an adversary
2975attacking the encryption scheme by encrypting either genuine responses or
2976fake constant strings. Since the adversary can't distinguish which is being
2977encrypted (by the definition of IND-CCA security,
2978definition~\ref{def:ind-cca}), the change goes unnoticed. In order to allow
2979incorporate our switch messages, we need only modify this adversary, to
2980implement the modified protocol. This is certainly possible, since the
2981messages contain (hashes of) public values. We omit the details.
2982
d8139f2c
MW
2983However, while the extra message doesn't affect the security of our protocol,
2984it would be annoying if an adversary could forge the switch request message,
2985since this would be a denial of service. In the strong adversarial model,
2986this doesn't matter, since the adversary can deny service anyway, but it's a
2987concern against less powerful adversaries. Most IND-CCA symmetric encryption
2988schemes also provide integrity of plaintexts \cite{Bellare:2000:AER} (e.g.,
2989the encrypt-then-MAC generic composition approach \cite{Bellare:2000:AER,%
2990Krawczyk:2001:OEA}, and the authenticated-encryption modes of
4331bf84 2991\cite{Rogaway:2003:OBC,Bellare:2004:EAX,McGrew:2004:SPG}), so this isn't a
d8139f2c
MW
2992great imposition.
2993
dff0fad2
MW
2994\subsubsection{Optimization and piggybacking}
2995We can optimize the number of messages sent by combining them. Here's one
2996possible collection of combined messages:
2997\begin{description}
2998\item [\cookie{pre-challenge}] $R$
2999\item [\cookie{challenge}] $R'$, $R$, $c = H_I(R', X, s, R, c) \xor r$
3000\item [\cookie{response}] $R'$, $R$, $c$, $E_{K_0}(x R')$
3001\item [\cookie{switch}] $R'$, $E_{K_0}(x R', H_S(0, R, R'))$
3002\item [\cookie{switch-ok}] $R'$, $E_{K_0}(H_S(1, R, R'))$
3003\end{description}
3004The combination is safe:
3005\begin{itemize}
3006\item the \cookie{switch} and \cookie{switch-ok} messages are safe by the
3007 argument above; and
3008\item the other recombinations can be performed and undone in a `black box'
3009 way, by an appropriately defined SK-security adversary.
3010\end{itemize}
a6e375a6
MW
3011
3012\subsubsection{Unreliable transports}
4331bf84 3013The Internet UDP \cite{rfc768} is a simple, unreliable protocol for
a6e375a6
MW
3014transmitting datagrams. However, it's very efficient, and rather attractive
3015as a transport for datagram-based applications such as virtual private
dff0fad2
MW
3016networks (VPNs). Since UDP is a best-effort rather than a reliable
3017transport, it can occasionally drop packets. Therefore it is necessary for a
3018UDP application to be able to retransmit messages which appear to have been
3019lost.
a6e375a6 3020
dff0fad2
MW
3021We recommend the following simple retransmission policy for running the
3022Wrestlers protocol over UDP.
3023\begin{itemize}
3024\item Initially, send out the \cookie{pre-challenge} message every minute.
3025\item On receipt of a \cookie{pre-challenge} message, send the corresponding
3026 full \cookie{challenge}, but don't retain any state.
3027\item On receipt of a (valid) \cookie{challenge}, record the challenge value
3028 $R'$ in a table, together with $K = (K_0, K_1)$ and the response $Y' = x
3029 R'$. If the table is full, overwrite an existing entry at random. Send
3030 the corresponding \cookie{response} message, and retransmit it every ten
3031 seconds or so.
3032\item On receipt of a (valid) \cookie{response}, discard any other
3033 challenges, and stop sending \cookie{pre-challenge} and \cookie{response}
3034 retransmits. At this point, the basic protocol described above would
3035 \emph{accept}, so the key $K_1$ is known to be good. Send the
3036 \cookie{switch} message, including its response to the (now known-good)
3037 sender's challenge.
3038\item On receipt of a (valid) \cookie{switch}, send back a \cookie{switch-ok}
3039 message and stop retransmissions. It is now safe to start sending messages
3040 under $K_1$.
3041\item On receipt of a (valid) \cookie{switch-ok}, stop retransmissions. It
3042 is now safe to start sending messages under $K_1$.
3043\end{itemize}
a6e375a6
MW
3044
3045\subsubsection{Key reuse}
dff0fad2
MW
3046Suppose our symmetric encryption scheme $\E$ is not only IND-CCA secure
3047(definition~\ref{def:ind-cca}) but also provides integrity of plaintexts
d8139f2c
MW
3048\cite{Bellare:2000:AER} (or, alternatively, is an AEAD scheme
3049\cite{Rogaway:2002:AEA}. Then we can use it to construct a secure channel,
3050by including message type and sequence number fields in the plaintexts, along
3051with the message body. If we do this, we can actually get away with just the
3052one key $K = H_K(Z)$ rather than both $K_0$ and $K_1$.
dff0fad2
MW
3053
3054To do this, it is essential that the plaintext messages (or additional data)
3055clearly distinguish between the messages sent as part of the key-exchange
3056protocol and messages sent over the `secure channel'. Otherwise, there is a
d8139f2c
MW
3057protocol-interference attack: an adversary can replay key-exchange
3058ciphertexts to insert the corresponding plaintexts into the channel.
dff0fad2
MW
3059
3060We offer a sketch proof of this claim in appendix~\ref{sec:sc-proof}.
32609df3 3061\fi
dff0fad2
MW
3062
3063%%%--------------------------------------------------------------------------
a6e375a6 3064
dff0fad2
MW
3065\section{Conclusions}
3066\label{sec:conc}
3067
3068We have presented new protocols for identification and authenticated
3069key-exchange, and proven their security. We have shown them to be efficient
3070and simple. We have also shown that our key-exchange protocol is deniable.
3071Finally, we have shown how to modify the key-exchange protocol for practical
3072use, and proven that this modification is still secure.
a6e375a6
MW
3073
3074%%%--------------------------------------------------------------------------
3075
3076\section{Acknowledgements}
3077
3078The Wrestlers Protocol is named after the Wrestlers pub in Cambridge where
3079Clive Jones and I worked out the initial design.
3080
3081%%%--------------------------------------------------------------------------
3082
4331bf84
MW
3083\bibliography{%
3084 mdw-crypto,%
3085 eprint,%
3086 focs1990,stoc1990,tissec,jacm,%
3087 lncs1997a,lncs1997b,lncs1998a,lncs2001a,%
3088 cryptography1990,cryptography2000,cryptography2010,cryptography,%
3089 rfc,std}
a6e375a6
MW
3090
3091%%%--------------------------------------------------------------------------
3092
32609df3 3093\ifshort\def\next{\end{document}}\expandafter\next\fi
a6e375a6
MW
3094\appendix
3095\section{Proofs}
3096
3097\subsection{Proof of theorem~\ref{thm:sk}}
3098\label{sec:sk-proof}
3099
3100Before we embark on the proof proper, let us settle on some notation. Let
3101$P_i$ be a party. Then we write $x_i$ for $P_i$'s private key and $X_i = x_i
3102P$ is $P_i$'s public key. Let $S = (P_i, P_j, s)$ be a session. We write
3103$r_S$ for the random value chosen at the start of the session, and $R_S$,
3104$c_S$ etc.\ are the corresponding derived values in that session.
3105
a6e375a6
MW
3106The proof uses a sequence of games. For each game~$\G{i}$, let $V_i$ be the
3107event that some pair of unexposed, matching sessions both complete but output
3108different keys, and let $W_i$ be the event that the adversary's final output
3109equals the game's hidden bit~$b^*$. To save on repetition, let us write
3110\[ \diff{i}{j} = \max(|\Pr[V_i] - \Pr[V_j]|, |\Pr[W_i] - \Pr[W_j]|). \]
3111Obviously,
3112\[ \diff{i}{j} \le \sum_{i\le k<j} \diff{k}{k + 1}. \]
3113
3114Here's a quick overview of the games we use.
3115\begin{itemize}
3116\item $\G0$ is the original SK-security game.
3117\item In $\G1$, we abort the game unless all parties' public keys are
3118 distinct. Since keys are generated at random, parties are unlikely to be
3119 given the same key by accident.
3120\item In $\G2$, we change the way sessions respond to challenge messages, by
3121 using the extractor to fake up answers to most challenges. Since the
3122 extractor is good, the adversary is unlikely to notice.
3123\item In $\G3$, we abort the game if the adversary ever queries $H_K(\cdot)$
3124 on the Diffie-Hellman secret $r_S r_T P$ shared between two unexposed
3125 matching sessions. We show that this is unlikely to happen if the
3126 Diffie-Hellman problem is hard.
3127\item In $\G4$, we abort the game if any unexposed session \emph{accepts} a
3128 response message which wasn't sent by a matching session.
3129\end{itemize}
3130Finally, we show that the adversary has no advantage in $\G4$. The theorem
3131follows.
3132
3133For ease of comprehension, we postpone the detailed proofs of some of the
3134steps until after we conclude the main proof.
3135
3136Let $A$ be a given adversary which runs in time~$t$, creates at most~$q_S$
3137sessions, delivers at most~$q_M$ messages, and makes at most~$q_I$ queries to
3138its $H_I(\cdot, \cdot, \cdot, \cdot)$ oracle and at most~$q_K$ queries to its
3139$H_K(\cdot)$ oracle. Let $\G0$ be the original SK-security game of
3140definition~\ref{def:sk}, played with adversary~$A$.
3141
3142Game~$\G1$ is the same as game~$\G0$ except, if the initialization function
3143reports two parties as having the same public key (i.e., we have $X_i \ne
3144X_j$ where $0 \le i < j < n$), we stop the game immediately and without
3145crediting the adversary with a win. This only happens when the corresponding
3146private keys are equal, i.e., $x_i = x_j$, and since the initialization
3147function chooses private keys uniformly at random, this happens with
32609df3 3148probability at most $\binom{n}{2}/\#G$. Since if this doesn't happen, the
a6e375a6
MW
3149game is identical to $\G0$, we can apply lemma~\ref{lem:shoup}, and see that
3150\begin{equation}
3151 \label{eq:sk-g0-g1}
32609df3 3152 \diff{0}{1} \le \frac{1}{\#G} \binom{n}{2} = \frac{n (n - 1)}{2 \#G}.
a6e375a6
MW
3153\end{equation}
3154In game~$\G1$ and onwards, we can assume that public keys for distinct
dff0fad2
MW
3155parties are themselves distinct. Note that the game now takes at most
3156$O(q_I)$ times longer to process each message delivered by the adversary.
3157This is where the $O(q_I q_M)$ term comes from in the theorem statement.
a6e375a6
MW
3158
3159Game~$\G2$ is the same as game~$\G1$, except that we change the way that we
3160make parties respond to \cookie{challenge} messages $(\cookie{challenge}, R,
3161c)$. Specifically, suppose that $S = (P_i, P_j, s)$ is a session.
3162\begin{itemize}
3163\item Suppose $T = (P_j, P_i, s)$ is the matching session of $S$. The game
3164 proceeds as before if $(R, c) = (R_T, c_T)$ is the challenge issued by $T$.
3165\item Otherwise, we run the extractor $T_\Wident$ on the adversary's history
3166 so far of oracle queries $H_I(X_i, s, R, \cdot)$ to determine a pair $(r,
3167 Y)$. If $r = \bot$ then we discard the message. Otherwise, we add $R$ to
3168 the list~$C$, and return a fake response to the adversary by computing $K =
3169 H_K(r R_S)$ and handing the adversary $(\cookie{response}, R_S, E_K(Y))$.
3170\end{itemize}
3171The following lemma shows how this affects the adversary's probabilities of
3172winning.
3173\begin{lemma}
3174 \label{lem:sk-g1-g2}
3175 \begin{equation}
3176 \label{eq:sk-g1-g2}
3177 \diff{1}{2} \le \frac{q_M}{2^{\ell_I}}.
3178 \end{equation}
3179\end{lemma}
3180
3181Let us say that a session $S = (P_i, P_j, s)$ is \emph{ripe} if
3182\begin{itemize}
3183\item there is a matching session $T = (P_j, P_i, s)$, and
3184\item $S$ is unexposed.
3185\end{itemize}
3186Suppose that $S$ is a ripe session, and that it has a matching session~$T$:
3187let $Z_S = Z_T = r_S r_T P$.
3188
3189Game~$\G3$ is the same as $\G2$, except that the game is immediately aborted
3190if ever the adversary queries its random oracle $H_K(\cdot)$ at a value $Z_S$
3191for any ripe session~$S$. The following lemma shows how this affects the
3192adversary's probabilities of winning.
3193\begin{lemma}
3194 \label{lem:sk-g2-g3}
dff0fad2 3195 For some $t'$ within the bounds given in the theorem statement we have
a6e375a6
MW
3196 \begin{equation}
3197 \label{eq:sk-g2-g3}
3198 \diff{2}{3} \le q_S \InSec{mcdh}(G; t', q_K).
3199 \end{equation}
3200\end{lemma}
3201
3202Game~$\G4$ is the same as $\G3$ except that the game is immediately aborted
3203if ever the adversary sends a response message to a ripe session~$S$ which
3204wasn't output by its matching session as a response to $S$'s challenge, with
3205the result that $S$ completes.
3206
3207Let's make this more precise. Let $U$ and $V$ be a pair of matching
3208sessions. Let $C_U = (\cookie{challenge}, R_U, c_U$ be the challenge message
3209sent by $U$. Let $M_T$ be the set of messages which $T$ has sent upon
3210delivery of $C_U$. Then, in $\G4$, we abort the game if, for any pair $S$
3211and~$T$ of matching, unexposed sessions, $S$ has completed as a result of
3212being sent a message $\mu \notin M_T$. We have the following lemma.
3213\begin{lemma}
3214 \label{lem:sk-g3-g4}
dff0fad2 3215 For a $t'$ within the stated bounds, we have
a6e375a6
MW
3216 \begin{equation}
3217 \label{eq:sk-g3-g4}
3218 \diff{3}{4} \le q_S \bigl( \InSec{ind-cca}(\E; t', q_M, q_M) +
3219 n \cdot \InSec{mcdh}(G; t', q_M + q_I) \bigr)
3220 \end{equation}
3221\end{lemma}
3222
3223Finally, let us consider the state we're in with $\G4$.
3224\begin{itemize}
3225\item No ripe session completes except as a result the adversary faithfully
3226 delivering messages between it and its matching session.
3227\item The adversary never queries $Z_S$ for any ripe session~$S$. If we set
3228 $K_S = (K_{S, 0}, K_{S, 1}) = H_K(Z_S)$, then $K_{S, 1}$ is the key output
3229 by $S$ when it completes.
3230\item If $S$ and $T$ are matching ripe sessions, then $K_S = K_T$, since $Z_S
3231 = r_S R_T = r_T R_S = Z_T$.
3232\item For any ripe session~$S$, $K_{S, 1}$ is uniformly distributed in
3233 $\Bin^\kappa$ and independent of the adversary's view.
3234\item If $S = (P_i, P_j, s)$ and $T = (P_j, P_i, s)$ are matching ripe
3235 sessions, then $Z_S$ depends only $r_S$ and $r_T$. Hence, once $S$ and~$T$
3236 complete, and erase their states, $Z_S$ is independent of everything except
3237 the messages sent between the two sessions. In particular, $Z_S$ is
3238 independent of the long-term secrets $x_i$ and~$x_j$, so if either player
3239 is later corrupted, the key $K_{S, 1}$ remains independent of the
3240 adversary's view.
3241\item Hence, the keys output by unexposed sessions are indistinguishable from
3242 freshly-generated random strings, and remain so indefinitely.
3243\end{itemize}
3244We conclude that, for any adversary $A$,
3245\begin{equation}
3246 \label{eq:sk-g4}
3247 \Pr[V_4] = 0 \qquad \text{and} \qquad \Pr[W_4] = \frac{1}{2}.
3248\end{equation}
3249Putting equations~\ref{eq:sk-g0-g1}--\ref{eq:sk-g4} together, we find
32609df3 3250\begingroup \splitright=4em minus 4em
a6e375a6 3251\begin{spliteqn}
dff0fad2
MW
3252 \Adv{sk}{\Wident^{G, \E}}(A) \le
3253 2 q_S \bigl(\InSec{ind-cca}(\E; t', q_M, q_M) + {} \\
3254 \InSec{mcdh}(G; t', q_K) +
3255 n \,\InSec{mcdh}(G; t', q_M + q_I) \bigr) + {}
32609df3 3256 \frac{n (n - 1)}{\#G} +
a6e375a6 3257 \frac{2 q_M}{2^{\ell_I}}.
32609df3 3258\end{spliteqn} \endgroup
a6e375a6
MW
3259The theorem follows, since $A$ was chosen arbitrarily.
3260
3261
3262\begin{proof}[Proof of lemma~\ref{lem:sk-g1-g2}]
3263 The two games $\G1$ and~$\G2$ differ only in whether they accept or reject
3264 particular challenge messages $(\cookie{challenge}, R, c)$.
3265
3266 We claim firstly that no message is \emph{accepted} by $\G2$ which would
3267 have been rejected by $\G1$. To prove the claim, it is sufficient to note
3268 that the extractor's output, if not $\bot$, is always correct, and hence if
3269 $\G2$ accepts a message then $\G1$ would have done so too.
3270
3271 Since $\G2$ also behaves identically when the adversary submits to~$S$ the
3272 challenge from the matching session~$T$, we have nothing to prove in this
3273 case. Let $F$ be the event that the adversary submits a message
3274 $(\cookie{challenge}, R, c)$ to a session~$S$ which $S$ would have accepted
3275 in $\G1$ but would be rejected by the new rule in~$\G2$. By
3276 lemma~\ref{lem:shoup} we have $\diff{1}{2} \le \Pr[F]$. To prove the
3277 current lemma, therefore, we must show that $\Pr[F] \le q_M/2^{\ell_I}$.
3278
3279 Rather than consider individual challenge messages, we consider
3280 \emph{classes} of messages. We shall refer to a quadruple~$\Cid = (i, j,
3281 s, R)$ as a \emph{class-id}, and define some useful functions:
3282 \begin{itemize}
3283 \item the class's \emph{session} $\Csession(\Cid) = (P_i, P_j, s)$;
3284 \item the class's \emph{index} $\Cindex(\Cid)$ is $r \in I$ where $R = r
3285 P$, which is well-defined by lemma~\ref{lem:unique-dl};
3286 \item the class's \emph{query} $\Cquery(\Cid) = (X_j, s, R, x_i R)$;
3287 \item the class's \emph{hash} $\Chash(\Cid) = H_I(\Cquery(\Cid)) = H_I(X_j,
3288 s, R, x_i R)$;
3289 \item the class's \emph{check-value} $\Ccheck(\Cid) = \Chash(\Cid) \xor
3290 \Cindex(\Cid)$;
3291 \item the class's \emph{check-set} $\Ccheckset(\Cid)$ is the set of
3292 check-values $c$ such that a message $(\cookie{challenge}, R, c)$ was
3293 sent to session $S = (P_i, P_j, s)$; and
3294 \item the class's \emph{count} $\Ccount(\Cid) = |\Ccheckset(\Cid)|$.
3295 \end{itemize}
3296
3297 Consider any class-id~$\Cid = (i, j, s, R)$. A natural question which
3298 arises is: which participants have issued $\Cid$'s query, i.e., queried
3299 $H_I$ at $\Cquery(\Cid)$?
3300
3301 We can characterise the $H_I(\cdot, \cdot, \cdot, \cdot)$ queries of a
3302 session $U = (P_{i'}, P_{j'}, s')$ as follows:
3303 \begin{itemize}
3304 \item computing the check-value for the challenge $R_U$ by querying
3305 $H_I(X_{i'}, s', R_U, r_U X_{j'})$, and
3306 \item checking an incoming challenge~$R'$ by querying $H_I(X_{j'}, s', R',
3307 x_{i'} R')$.
3308 \end{itemize}
3309 The class~$\Cid$'s query $\Cquery(\Cid)$ is $U$'s check-value query if
3310 \[ (j, i, s, R) = (i', j', s', R_U) \]
3311 i.e., $U$ is the matching session of $\Csession(\Cid)$, and moreover $R =
3312 R_U$ is the challenge value issued by $U$. For any $c \in
3313 \Ccheckset(\Cid)$, if $c = \Ccheck(\Cid)$ then $(\cookie{challenge}, R, c)$
3314 is precisely the challenge message issued by~$U$ to $\Csession(\Cid)$; the
3315 rules for handling this message didn't change. However, if $c \ne
3316 \Ccheck(\Cid)$ then the message would have been rejected in $\G1$, and we
3317 have already shown that $\G2$ continues to reject all messages rejected by
3318 $\G1$.
3319
3320 Let us say that a class-id~$\Cid = (i, j, s, R)$ is \emph{bad} if
3321 \begin{enumerate}
3322 \item the value $R$ is not the challenge issued by $\Csession(\Cid)$'s
3323 matching session, and
3324 \item the adversary has not issued $\Cid$'s query $\Cquery(\Cid)$,
3325 \emph{but}
3326 \item $\Ccheck(\Cid) \in \Ccheckset(\Cid)$, so one of the check-values
3327 submitted to $S$ was actually correct.
3328 \end{enumerate}
3329 We claim that our extractor will work perfectly unless some class-id is
3330 bad. Certainly, if $R$ was issued by the matching session, there is
3331 nothing to prove; if the adversary has issued the relevant query then the
3332 extractor will recover $\Cindex(\Cid)$ just fine; and if $\Ccheck(\Cid)
3333 \notin \Ccheckset(\Cid)$ then all messages in the class would have been
3334 rejected by $\G1$ anyway.
3335
3336 Let $B(\Cid)$ be the event that the class~$\Cid$ is bad. We claim that
3337 \[ \Pr[B(\Cid)] \le \frac{\Ccount(\Cid)}{2^{\ell_I}}. \]
3338 The present lemma follows, since
3339 \[ \diff{1}{2}
3340 \le \Pr[F]
3341 \le \sum_\Cid \Pr[B(\Cid)]
3342 \le \sum_\Cid \frac{\Ccount(\Cid)}{2^{\ell_I}}
3343 = \frac{1}{2^{\ell_I}} \sum_\Cid \Ccount(\Cid)
3344 \le \frac{q_M}{2^{\ell_I}}
3345 \]
32609df3 3346 as required.
a6e375a6
MW
3347
3348 Now observe that, in $\G2$, sessions don't actually check incoming
3349 challenges in this way any more -- instead we run the extractor. So, to
3350 prove the claim, we consider a class~$\Cid$ where properties~1 and~2 above
3351 hold. The correct hash $\Chash(\Cid)$ is then independent of the rest of
3352 the game, so the probability that $\Ccheck(\Cid) \in \Ccheckset(\Cid)$ is
3353 precisely $\Ccount(\Cid)/2^{\ell_I}$ as required.
3354
3355 This completes the proof the lemma.
3356\end{proof}
3357
3358\begin{proof}[Proof of lemma~\ref{lem:sk-g2-g3}]
3359 Let $F$ be the event that the adversary makes a query $H_K(Z_S)$ for some
3360 ripe session~$S$. Since $\G3$ proceeds exactly as $\G2$ did unless $F_2$
3361 occurs, we apply lemma~\ref{lem:shoup}, which tells us that $\diff{2}{3}
3362 \le \Pr[F_2]$. We must therefore bound this probability.
3363
3364 To do this, we consider a new game~$\G3'$, which is the same as $\G3$,
3365 except that, at the start of the game, we choose a random number $k \inr
3366 \Nupto{q_S}$. For $0 \le i < q_S$, let $S_i$ be the $i$th session created
3367 by the adversary. We define $F'$ to be the event that the adversary
3368 queries $H_K(Z_{S_k})$ when $S_k$ is ripe.
3369
3370 The lemma now follows from these two claims.
3371
3372 \begin{claim}
3373 $\Pr[F] \le q_S \Pr[F']$.
3374 \end{claim}
3375 To see this, for any session~$S$, let $F_S$ be the event that the adversary
3376 queries~$H_K(Z_S)$ when $S$ is ripe. Then
3377 \[ \Pr[F] \le \sum_{0\le i<q_S} \Pr[F_{S_i}]. \]
3378 Hence,
3379 \[ \Pr[F'] = \Pr[F_{S_k}] = \sum_{0\le i<q_S} \Pr[F_{S_i}] \Pr[k = i]
3380 = \frac{1}{q_S} \sum_{0\le i<q_S} \Pr[F_{S_i}]
3381 \ge \frac{\Pr[F]}{q_S}
3382 \]
3383 proving the claim.
3384
3385 \begin{claim}
3386 For some $t' = t + O(n) + O(q_S q_M) + O(q_I) + O(q_K)$, we have
3387 $\Pr[F'] \le \InSec{mcdh}(G; t', q_K).$
3388 \end{claim}
3389 To prove this claim, we construct an adversary~$B$ which solves the MCDH
3390 problem in~$G$. The adversary works as follows.
3391 \begin{enumerate}
3392 \item It is given a pair $(R^*, S^*) = (r^* P, s^* P)$ of group elements;
3393 its objective is to make a verification-oracle query $V(Z^*)$ where $Z^*
3394 = r^* s^* P$.
3395 \item It sets up a simulation of the game~$\G3'$, by running the
3396 $\id{init}$ function, and simulating all of the parties. In particular,
3397 it chooses a random~$k \in \Nupto{q_S}$.
3398 \item It sets up accurate simulations of the random oracles $H_K(\cdot)$
3399 and $H_I(\cdot, \cdot, \cdot, \cdot)$, which choose random outputs for
3400 new, fresh inputs. However, whenever $A$ queries $H_K(\cdot)$ on a group
3401 element $Z$, $B$ also queries $V(Z)$.
3402 \item It runs $A$ in its simulated game. It responds to all of $A$'s
3403 instructions faithfully, until the $k$th session-creation.
3404 \item When creating the $k$th session $S = S_k = (P_i, P_j, s)$, $B$ has
3405 party~$P_i$ choose $R^*$ as its challenge, rather than choosing $r_S$ and
3406 setting $R_S = r_S P$. Because it simulates all the parties, $B$ can
3407 compute $Y_S = x_j R$, which is still correct.
3408 \item If $A$ requests the creation of a matching session $T = (P_j, P_i,
3409 s)$ then $B$ has party~$P_j$ choose $S^*$ as its challenge. Again, $B$
3410 computes $Y_T = x_i S^*$.
3411 \item If $A$ ever corrupts the parties $P_i$ or $P_j$, or reveals the
3412 session state of $S$ or $T$ then $B$ stops the simulation abruptly and
3413 halts.
3414 \end{enumerate}
dff0fad2
MW
3415 Adversary $B$'s running time is within the bounds required of $t'$, and $B$
3416 makes at most $q_K$ queries to $V(\cdot)$; we therefore have
a6e375a6
MW
3417 \[ \Pr[F'] \le \Succ{mcdh}{G}(B) \le \InSec{mcdh}(G; t', q_K) \]
3418 as required.
3419\end{proof}
3420
3421\begin{proof}[Proof of lemma~\ref{lem:sk-g3-g4}]
3422 Let $F_4$ be the event under which we abort the game~$\G4$. Clearly, if
3423 $F$ doesn't occur, games $\G3$ and $\G4$ proceed identically, so we can
3424 apply lemma~\ref{lem:shoup} to see that $\diff{3}{4} \le \Pr[F_4]$.
3425 Bounding $\Pr[F_4]$, however, is somewhat complicated. We use a further
3426 sequence of games.
3427
3428 Firstly, let $\G5$ be like $\G4$ with the exception that we choose, at
3429 random, an integer~$k \inr \Nupto{q_S}$. As we did in the proof for
3430 lemma~\ref{lem:sk-g3-g4}, let $S_i$ be the $i$th session created by the
3431 adversary. For each session~$S_i$, let $T_i$ be its matching session, if
3432 there is one. We define $F_5$ to be the event that
3433 \begin{itemize}
3434 \item $S_k$ completes immediately following delivery of a message $\mu
3435 \notin M_{T_k}$, and
3436 \item $S_k$ was ripe at this point.
3437 \end{itemize}
3438 For games~$\G{i}$, for $i > 5$, we define the event $F_i$ to be the event
3439 corresponding to $F_5$ in $\G{i}$. Note that if $S_k$ \emph{is} sent a
3440 message in $M_{T_k}$ then $S_k$ immediately completes.
3441
3442 \begin{claim}
3443 $\Pr[F_4] \le \Pr[F_5]/q_S$.
3444 \end{claim}
3445 This claim is proven exactly as we did for claim~1 of
3446 lemma~\ref{lem:sk-g2-g3}.
3447
3448 Let~$\G6$ be the same as $\G5$ except that we change the encrypted
3449 responses of session~$S_k$ and its matching session~$T_k$. Let $K^* =
3450 (K_0^*, K_1^*) = H_K(Z_S)$. Then, rather than sending $(\cookie{response},
3451 R_S, E_{K_0^*}(Y_T))$, session~$S$ sends $(\cookie{response}, R_S,
3452 E_{K_0^*}(1^{\ell_G}))$.
3453 \begin{claim}
3454 $|\Pr[F_6] - \Pr[F_5]| \le \InSec{ind-cca}(\E; t', q_M, q_M).$
3455 \end{claim}
3456 To prove this claim, we construct an adversary~$B$ which attacks the
3457 IND-CCA security of our encryption scheme $\E$. The adversary~$B$ works as
3458 follows.
3459 \begin{enumerate}
3460 \item It is given no input, but a pair of oracles $E(\cdot, \cdot)$ and
3461 $D(\cdot)$; the former encrypts either the left or right input, according
3462 to a hidden bit, and the latter decrypts ciphertexts other than those
3463 returned by $E(\cdot, \cdot)$. Its goal is to guess the hidden bit.
3464 \item It sets up a simulation of the game~$\G5$, by running the $\id{init}$
3465 function, and simulating all of the parties. In particular, it chooses a
3466 random $k \in \Nupto{q_S}$.
3467 \item It sets up accurate simulations of the random oracles $H_K(\cdot)$
3468 and $H_I(\cdot, \cdot, \cdot, \cdot)$.
3469 \item It runs $A$ in its simulated game. It responds to all of $A$'s
3470 instructions faithfully, except for the matching sessions $S_k$ and
3471 $T_k$. Let $S = S_k = (P_i, P_j, s)$, and $T = T_k = (P_j, P_i, s)$.
3472 \item Suppose $T$ is sent the message $C_S = (\cookie{challenge}, R_S,
3473 c_S)$. Rather than computing $K^* = H_K(r_T R_S)$ and performing the
3474 encryption properly, $B$ queries its left-or-right encryption oracle
3475 $E(\cdot, \cdot)$ on $E(1^{\ell_G}, x_j R_S)$, and sends the resulting
3476 ciphertext~$\chi$ back to~$S$ as part of a message $(\cookie{response},
3477 R_T, \chi)$. The set $M_T$ is precisely the set of messages constructed
3478 in this fashion. (Recall that challenge messages other than $C_S$ aren't
3479 actually delivered to $T$, since we simulate the responses using the
3480 extractor, as of $\G2$.)
3481 \item Suppose $S$ is sent a message $M = (\cookie{response}, R_T, \chi) \in
3482 M_T$. We immediately stop the simulation, and $B$ returns $0$.
3483 \item Suppose, instead, that $S$ is sent some message $M' =
3484 (\cookie{response}, R, \chi) \notin M_T$. There are two cases to
3485 consider. If $R = R_T$ then we must have $\chi$ distinct from the
3486 ciphertexts returned by the $E(\cdot, \cdot)$ oracle, so we can invoke
3487 the decryption oracle $D(\cdot)$ on $\chi$ to obtain a response $Y$.
3488 Alternatively, if $R \ne R_T$, we can compute the key $K = (K_0, K_1) =
3489 H_K(r_S R)$, and recover $Y = D_{K_0}(\chi)$. In either case, if $Y =
3490 r_S X_j)$ then $S$ would complete at this point: $B$ stops the simulation
3491 and returns $1$.
3492 \item If $A$ exposes $S$ (by corrupting $P_i$ or~$P_j$, or revealing $S$ or
3493 $T$) then we stop the simulation and $B$ returns $0$.
3494 \item Finally, if the game stops, either because $A$ halts, or because of
3495 one of the special rules introduced in earlier games, $B$ returns $0$.
3496 \end{enumerate}
3497 It is clear that $B$'s oracle queries are acceptable, since $|x_j R_S| =
3498 \ell_G$ by definition, and $B$ never queries $D(\cdot)$ on a ciphertext
3499 returned by its encryption oracle. By the rules of~$\G3$, we know that the
3500 game stops immediately if $A$ ever queries $Z_S$, so the key $K^*$ is
3501 independent of everything in $A$'s view except the ciphertexts $\chi$
3502 output by $S$ and $T$. Therefore, if the hidden bit of the IND-CCA game is
3503 $1$, $B$ accurately simulates $\G5$, whereas if the bit is $0$ then $B$
dff0fad2
MW
3504 accurately simulates $\G6$. We issue no more that $q_M$ encryption or
3505 decryption queries. Finally, $B$'s running time is within the bounds
3506 allowed for $t'$. Therefore,
a6e375a6
MW
3507 \[ \Adv{ind-cca}{\E}(B) = \Pr[F_5] - \Pr[F_6]
3508 \le \InSec{ind-cca}(\E; t', q_M, q_M). \]
3509 We construct the adversary~$\bar{B}$ which is the same as $B$ above, except
3510 that $\bar{B}$ returns $0$ whenever $B$ returns~$1$, and \emph{vice versa}.
3511 Clearly
3512 \[ \Adv{ind-cca}{\E}(\bar{B})
3513 = (1 - \Pr[F_5]) - (1 - \Pr[F_6])
3514 = \Pr[F_6] - \Pr[F_5]
3515 \le \InSec{ind-cca}(\E; t', q_M, q_M).
3516 \]
3517 This proves the claim.
3518
3519 Let $\G7$ be the same as $\G6$, except that at the start of the game we
3520 choose a random $m \in \Nupto{n}$, and when the adversary creates the
3521 session $S = S_k = (P_i, P_j, s)$, we abort the game unless $j = m$.
3522 Clearly we have $\Pr[F_6] = n \Pr[F_7]$.
32609df3 3523
a6e375a6
MW
3524 Finally, we can explicitly bound $F_6$. In $\G6$, the adversary's view is
3525 independent of the correct response $Y_S = r_S X_S = x_j R_S$ to $S$'s
3526 challenge. Therefore, if $A$ manages to send any message $\mu \notin M_T$
3527 which causes $S$ to complete, then it has impersonated~$P_j$.
3528 \begin{claim}
3529 $\Pr[F_7] \le \InSec{mcdh}(G; t', q_M + q_I)$.
3530 \end{claim}
3531 The present lemma follows from this and the previous claims.
3532
3533 To prove the claim formally, we construct an adversary~$B'$, which behaves
3534 as follows.
3535 \begin{enumerate}
3536 \item It is given as input a public key $X^*$ and a single challenge $(R^*,
3537 c^*)$, a random oracle $H^*_I(\cdot, \cdot)$, and an oracle $V(\cdot,
3538 \cdot)$, which verifies responses $(R, Y)$. Its goal is to invoke
3539 $V(\cdot, \cdot)$ with a correct response to the challenge.
3540 \item It chooses a random $k \in \Nupto{q_S}$ and $m \in \Nupto{n}$. It
3541 sets up a simulation of the game~$\G7$, by running the $\id{init}$
3542 function, and simulating all of the parties, except that it gives party
3543 $P_m$ the public key $X^*$. This makes no difference, since $P_m$
3544 doesn't actually need to give any `honest' responses because of the
3545 change we made in $\G6$.
3546 \item It sets up accurate simulations of the random oracles $H_K(\cdot)$
3547 and $H_I(\cdot, \cdot, \cdot, \cdot)$, with one exception -- see below.
3548 \item It runs $A$ in its simulated game. It responds to all of $A$'s
3549 instructions faithfully, except for the session $S_k$. Let $S = S_k =
3550 (P_i, P_j, s)$, and let $T = T_k = (P_j, P_i, s)$ be its matching
3551 session.
3552 \item When session~$S$ is created, $B'$ checks that $j = m$, and if not
3553 stops the simulation and halts. Otherwise, $B'$ invokes its oracle~$C()$
3554 to obtain a pair $(R, c)$. Session~$S$ sends $C_S = (\cookie{challenge},
3555 R, c)$ as its challenge to~$T$.
3556 \item When $A$ makes a query $H_I(X^*, s, R, Y)$, $B$ answers it by
3557 querying its \emph{own} random oracle $H^*_I(R, Y)$.
3558 \item When $S$ receives a message $(\cookie{response}, R, \chi)$, we
3559 compute $(K_0, K_1) = r_S R$, and $Y = D_{K_0}(\chi)$. If $Y \ne \bot$
3560 then $B'$ calls $V(R, Y)$.
3561 \item If $A$ reveals $S$ or corrupts $P_i$ or $P_j$ then $B'$ stops the
3562 simulation immediately and halts.
3563 \end{enumerate}
dff0fad2
MW
3564 The running time of $B'$ is within the bounds required of $t'$; it makes at
3565 most $q_I$ random-oracle and at most $q_M$ verification queries. Clearly
3566 $B'$ succeeds whenever $F_7$ occurs. The claim follows from
3567 theorem~\ref{thm:wident-sound}.
a6e375a6
MW
3568\end{proof}
3569
3570
3571\subsection{Proof of theorem~\ref{thm:sk2}}
3572\label{sec:sk2-proof}
3573
3574The proof is almost identical to the proof of theorem~\ref{thm:sk}, in
3575appendix~\ref{sec:sk-proof}. Unfortunately a black-box reduction doesn't
32609df3 3576seem possible.
a6e375a6
MW
3577
3578We use the games and notation of section~\ref{sec:sk-proof}.
3579
3580The change to the check-value calculation doesn't affect key-generation at
3581all, so the transition to $\G1$ goes through as before.
3582
3583The transition from $\G1$ to $\G2$ -- answering challenges using the
3584extractor -- needs a little care. Let $S = (P_i, P_j, s)$ be a session, and
3585consider an incoming message $(\cookie{challenge}, R, c)$.
3586\begin{itemize}
3587\item If $T = (P_j, P_i, s)$ is the matching session to $S$, and $R = R_T$ is
3588 the public challenge value of $T$, and $c = r_T \xor H_I(R_S, X_j, s, R_T,
3589 r_T X_i)$ is the check-value output by $T$ when it received
3590 $(\cookie{pre-challenge}, R_S)$ as input, then $S$ replies as it did in
3591 $\G1$.
3592\item If the challenge message is any other message, then we use the
3593 extractor.
3594\end{itemize}
3595As in lemma~\ref{lem:sk-g1-g2}, we examine which sessions could have queried
3596$H_I(R_S, X_j, s, R, x_i R)$, and for the same reasons conclude that only the
3597matching session would have done this, and only in response to the
3598pre-challenge $R_S$. It follows that $\diff{1}{2} \le q_M/2^{\ell_I}$ as
3599before.
3600
3601The remaining game steps go through unchanged. In particular, we conclude
3602that a ripe session will only complete if the adversary has transmitted
3603messages from its matching session correctly, and the session key is
3604independent of the adversary's view. The theorem follows.
3605
dff0fad2
MW
3606
3607\subsection{Sketch proof of single-key protocol for secure channels}
3608\label{sec:sc-proof}
3609
3610We want to show that the Wrestlers Key-Exchange protocol, followed by use of
3611the encryption scheme $\E$, with the \emph{same} key $K = K_0$, still
3612provides secure channels.
3613
3614\subsubsection{Secure channels definition}
3615We (very briefly!) recall the \cite{Canetti:2001:AKE} definition of a secure
3616channels protocol. We again play a game with the adversary. At the
3617beginning, we choose a bit $b^* \inr \{0, 1\}$ at random. We allow the
3618adversary the ability to establish \emph{secure channels} sessions within the
3619parties. Furthermore, for any running session $S = (P_i, P_j, s)$, we allow
3620the adversary to request $S$ to send a message~$\mu$ through its secure
3621channel. Finally, the adversary is allowed to choose one ripe
3622\emph{challenge} session, and ask for it to send of one of a \emph{pair} of
3623messages $(\mu_0, \mu_1)$, subject to the restriction that $|\mu_0| =
3624|\mu_1|$; the session sends message $\mu_{b^*}$. The adversary may not
3625expose the challenge session.
3626
3627The adversary wins if (a)~it can guess the bit $b^*$, or (b)~it can cause a
3628ripe session $S$ (i.e., an unexposed, running session), with a matching
3629session~$T$ to output a message other than one that it requested that $T$
3630send.
3631
3632\subsubsection{Protocol definition}
3633The protocol begins with Wrestlers key-exchange. The encryption in the
3634key-exchange protocol is performed as $E_K(\cookie{kx}, \cdot)$; encryption
3635for secure channels is performed as $E_K(\cookie{sc}, i, o, \cdot)$, where
3636$i$ is a sequence number to prevent replays and $o \in \{S, T\}$ identifies
3637the sender.
3638
3639\subsubsection{Proof sketch}
3640We use the games and notation of appendix~\ref{sec:sk-proof}.
3641
3642The proof goes through as far as the step between $\G5$ and $\G6$ in the
3643proof of lemma~\ref{lem:sk-g3-g4}. Here we make the obvious adjustments to
3644our adversary against the IND-CCA security of $\E$. (Our adversary will need
3645to use the left-or-right oracle for messages sent using the secure channel
3646built on $K^*$. That's OK.)
3647
3648In $\G4$, we know that ripe sessions agree the correct key, and the adversary
3649never queries the random oracle, so the key is independent of the adversary's
3650view.
3651
3652We define a new game $\G8$, which is like $\G4$, except that we stop the game
3653if the adversary ever forges a message sent over the secure channel. That
3654is, if a ripe session $S$ ever announces receipt of a message not sent (at
3655the adversary's request) by its matching session $T$. Let $F_8$ be the event
3656that a forgery occurs. We apply lemma~\ref{lem:shoup}, which tells us that
3657$\diff{4}{8} \le \Pr[F_8]$. To bound $F_8$, we isolate a session at random
3658(as in lemmata \ref{lem:sk-g2-g3} and~\ref{lem:sk-g3-g4}), which tells us
3659that
3660\begin{equation}
3661 \label{eq:sc-g4-g8}
3662 \diff{4}{8} \le q_S \cdot \InSec{int-ptxt}(\E; t', q_M, q_M)
3663\end{equation}
3664Finally, we can bound the adversary's advantage at guessing the hidden bit
3665$b^*$. We isolate (we hope) the challenge session $S$ by choosing a target
32609df3 3666session at random, as before. Let $K^* = H_K(Z_S)$ be the key agreed by the
dff0fad2
MW
3667session (if it becomes ripe). We define an adversary $B$ against the IND-CCA
3668security of $\E$. The adversary $B$ simulates the game. If the adversary
3669exposes the target session, or doesn't choose it as the challenge session,
3670$B$ fails (and exits 0); otherwise it uses the left-or-right encryption
3671oracle to encrypt both of the adversary's message choices, and outputs the
3672adversary's choice. Let $b$ be the adversary's output, and let $\epsilon$ be
3673the advantage of our IND-CCA distinguisher. Then
3674\begin{eqnarray*}[rl]
3675 \Pr[b = b^*]
3676 & = \Pr[b = b^* \wedge b^* = 1] + \Pr[b = b^* \wedge b^* = 0] \\
3677 & = \frac{1}{2}\bigl( \Pr[b = b^* \mid b^* = 1] +
3678 \Pr[b = b^* \mid b^* = 0] \bigr) \\
3679 & = \frac{1}{2}\bigl( \Pr[b = b^* \mid b^* = 1] +
3680 (1 - \Pr[b \ne b^* \mid b^* = 0]) \bigr) \\
3681 & = \frac{1}{2}\bigl( \Pr[b = 1 \mid b^* = 1] -
3682 \Pr[b = 1 \mid b^* = 0] + 1 \bigr) \\
3683 & = \frac{1}{2}\bigl(1 + q_S\,\Adv{ind-cca}{\E}(B) \bigr) \\
3684 & \le \frac{1}{2} \bigl( 1 + q_S\,\InSec{ind-cca}(\E; t', q_M, q_M) \bigr).
3685 \eqnumber
3686\end{eqnarray*}
3687
a6e375a6
MW
3688\end{document}
3689
32609df3 3690%%% Local Variables:
a6e375a6
MW
3691%%% mode: latex
3692%%% TeX-master: t
32609df3 3693%%% End: